repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
tknapen/FIRDeconvolution
src/FIRDeconvolution.py
FIRDeconvolution.predict_from_design_matrix
def predict_from_design_matrix(self, design_matrix): """predict_from_design_matrix predicts signals given a design matrix. :param design_matrix: design matrix from which to predict a signal. :type design_matrix: numpy array, (nr_samples x betas.shape) :returns: predicted signal(s) :rtype: numpy array (nr_signals x nr_samples) """ # check if we have already run the regression - which is necessary assert hasattr(self, 'betas'), 'no betas found, please run regression before prediction' assert design_matrix.shape[0] == self.betas.shape[0], \ 'designmatrix needs to have the same number of regressors as the betas already calculated' # betas = np.copy(self.betas.T, order="F", dtype = np.float32) # f_design_matrix = np.copy(design_matrix, order = "F", dtype = np.float32) prediction = np.dot(self.betas.astype(np.float32).T, design_matrix.astype(np.float32)) return prediction
python
def predict_from_design_matrix(self, design_matrix): """predict_from_design_matrix predicts signals given a design matrix. :param design_matrix: design matrix from which to predict a signal. :type design_matrix: numpy array, (nr_samples x betas.shape) :returns: predicted signal(s) :rtype: numpy array (nr_signals x nr_samples) """ # check if we have already run the regression - which is necessary assert hasattr(self, 'betas'), 'no betas found, please run regression before prediction' assert design_matrix.shape[0] == self.betas.shape[0], \ 'designmatrix needs to have the same number of regressors as the betas already calculated' # betas = np.copy(self.betas.T, order="F", dtype = np.float32) # f_design_matrix = np.copy(design_matrix, order = "F", dtype = np.float32) prediction = np.dot(self.betas.astype(np.float32).T, design_matrix.astype(np.float32)) return prediction
[ "def", "predict_from_design_matrix", "(", "self", ",", "design_matrix", ")", ":", "# check if we have already run the regression - which is necessary", "assert", "hasattr", "(", "self", ",", "'betas'", ")", ",", "'no betas found, please run regression before prediction'", "assert", "design_matrix", ".", "shape", "[", "0", "]", "==", "self", ".", "betas", ".", "shape", "[", "0", "]", ",", "'designmatrix needs to have the same number of regressors as the betas already calculated'", "# betas = np.copy(self.betas.T, order=\"F\", dtype = np.float32)", "# f_design_matrix = np.copy(design_matrix, order = \"F\", dtype = np.float32)", "prediction", "=", "np", ".", "dot", "(", "self", ".", "betas", ".", "astype", "(", "np", ".", "float32", ")", ".", "T", ",", "design_matrix", ".", "astype", "(", "np", ".", "float32", ")", ")", "return", "prediction" ]
predict_from_design_matrix predicts signals given a design matrix. :param design_matrix: design matrix from which to predict a signal. :type design_matrix: numpy array, (nr_samples x betas.shape) :returns: predicted signal(s) :rtype: numpy array (nr_signals x nr_samples)
[ "predict_from_design_matrix", "predicts", "signals", "given", "a", "design", "matrix", "." ]
train
https://github.com/tknapen/FIRDeconvolution/blob/6263496a356c449062fe4c216fef56541f6dc151/src/FIRDeconvolution.py#L280-L298
tknapen/FIRDeconvolution
src/FIRDeconvolution.py
FIRDeconvolution.calculate_rsq
def calculate_rsq(self): """calculate_rsq calculates coefficient of determination, or r-squared, defined here as 1.0 - SS_res / SS_tot. rsq is only calculated for those timepoints in the data for which the design matrix is non-zero. """ assert hasattr(self, 'betas'), 'no betas found, please run regression before rsq' explained_times = self.design_matrix.sum(axis = 0) != 0 explained_signal = self.predict_from_design_matrix(self.design_matrix) self.rsq = 1.0 - np.sum((explained_signal[:,explained_times] - self.resampled_signal[:,explained_times])**2, axis = -1) / np.sum(self.resampled_signal[:,explained_times].squeeze()**2, axis = -1) self.ssr = np.sum((explained_signal[:,explained_times] - self.resampled_signal[:,explained_times])**2, axis = -1) return np.squeeze(self.rsq)
python
def calculate_rsq(self): """calculate_rsq calculates coefficient of determination, or r-squared, defined here as 1.0 - SS_res / SS_tot. rsq is only calculated for those timepoints in the data for which the design matrix is non-zero. """ assert hasattr(self, 'betas'), 'no betas found, please run regression before rsq' explained_times = self.design_matrix.sum(axis = 0) != 0 explained_signal = self.predict_from_design_matrix(self.design_matrix) self.rsq = 1.0 - np.sum((explained_signal[:,explained_times] - self.resampled_signal[:,explained_times])**2, axis = -1) / np.sum(self.resampled_signal[:,explained_times].squeeze()**2, axis = -1) self.ssr = np.sum((explained_signal[:,explained_times] - self.resampled_signal[:,explained_times])**2, axis = -1) return np.squeeze(self.rsq)
[ "def", "calculate_rsq", "(", "self", ")", ":", "assert", "hasattr", "(", "self", ",", "'betas'", ")", ",", "'no betas found, please run regression before rsq'", "explained_times", "=", "self", ".", "design_matrix", ".", "sum", "(", "axis", "=", "0", ")", "!=", "0", "explained_signal", "=", "self", ".", "predict_from_design_matrix", "(", "self", ".", "design_matrix", ")", "self", ".", "rsq", "=", "1.0", "-", "np", ".", "sum", "(", "(", "explained_signal", "[", ":", ",", "explained_times", "]", "-", "self", ".", "resampled_signal", "[", ":", ",", "explained_times", "]", ")", "**", "2", ",", "axis", "=", "-", "1", ")", "/", "np", ".", "sum", "(", "self", ".", "resampled_signal", "[", ":", ",", "explained_times", "]", ".", "squeeze", "(", ")", "**", "2", ",", "axis", "=", "-", "1", ")", "self", ".", "ssr", "=", "np", ".", "sum", "(", "(", "explained_signal", "[", ":", ",", "explained_times", "]", "-", "self", ".", "resampled_signal", "[", ":", ",", "explained_times", "]", ")", "**", "2", ",", "axis", "=", "-", "1", ")", "return", "np", ".", "squeeze", "(", "self", ".", "rsq", ")" ]
calculate_rsq calculates coefficient of determination, or r-squared, defined here as 1.0 - SS_res / SS_tot. rsq is only calculated for those timepoints in the data for which the design matrix is non-zero.
[ "calculate_rsq", "calculates", "coefficient", "of", "determination", "or", "r", "-", "squared", "defined", "here", "as", "1", ".", "0", "-", "SS_res", "/", "SS_tot", ".", "rsq", "is", "only", "calculated", "for", "those", "timepoints", "in", "the", "data", "for", "which", "the", "design", "matrix", "is", "non", "-", "zero", "." ]
train
https://github.com/tknapen/FIRDeconvolution/blob/6263496a356c449062fe4c216fef56541f6dc151/src/FIRDeconvolution.py#L300-L310
tknapen/FIRDeconvolution
src/FIRDeconvolution.py
FIRDeconvolution.bootstrap_on_residuals
def bootstrap_on_residuals(self, nr_repetitions = 1000): """bootstrap_on_residuals bootstraps, by shuffling the residuals. bootstrap_on_residuals should only be used on single-channel data, as otherwise the memory load might increase too much. This uses the lstsq backend regression for a single-pass fit across repetitions. Please note that shuffling the residuals may change the autocorrelation of the bootstrap samples relative to that of the original data and that may reduce its validity. Reference: https://en.wikipedia.org/wiki/Bootstrapping_(statistics)#Resampling_residuals :param nr_repetitions: number of repetitions for the bootstrap. :type nr_repetitions: int """ assert self.resampled_signal.shape[0] == 1, \ 'signal input into bootstrap_on_residuals cannot contain signals from multiple channels at once, present shape %s' % str(self.resampled_signal.shape) assert hasattr(self, 'betas'), 'no betas found, please run regression before bootstrapping' # create bootstrap data by taking the residuals bootstrap_data = np.zeros((self.resampled_signal_size, nr_repetitions)) explained_signal = self.predict_from_design_matrix(self.design_matrix).T for x in range(bootstrap_data.shape[-1]): # loop over bootstrapsamples bootstrap_data[:,x] = (self.residuals.T[np.random.permutation(self.resampled_signal_size)] + explained_signal).squeeze() self.bootstrap_betas, bs_residuals, rank, s = LA.lstsq(self.design_matrix.T, bootstrap_data) self.bootstrap_betas_per_event_type = np.zeros((len(self.covariates), self.deconvolution_interval_size, nr_repetitions)) for i, covariate in enumerate(list(self.covariates.keys())): # find the index in the designmatrix of the current covariate this_covariate_index = list(self.covariates.keys()).index(covariate) self.bootstrap_betas_per_event_type[i] = self.bootstrap_betas[this_covariate_index*self.deconvolution_interval_size:(this_covariate_index+1)*self.deconvolution_interval_size]
python
def bootstrap_on_residuals(self, nr_repetitions = 1000): """bootstrap_on_residuals bootstraps, by shuffling the residuals. bootstrap_on_residuals should only be used on single-channel data, as otherwise the memory load might increase too much. This uses the lstsq backend regression for a single-pass fit across repetitions. Please note that shuffling the residuals may change the autocorrelation of the bootstrap samples relative to that of the original data and that may reduce its validity. Reference: https://en.wikipedia.org/wiki/Bootstrapping_(statistics)#Resampling_residuals :param nr_repetitions: number of repetitions for the bootstrap. :type nr_repetitions: int """ assert self.resampled_signal.shape[0] == 1, \ 'signal input into bootstrap_on_residuals cannot contain signals from multiple channels at once, present shape %s' % str(self.resampled_signal.shape) assert hasattr(self, 'betas'), 'no betas found, please run regression before bootstrapping' # create bootstrap data by taking the residuals bootstrap_data = np.zeros((self.resampled_signal_size, nr_repetitions)) explained_signal = self.predict_from_design_matrix(self.design_matrix).T for x in range(bootstrap_data.shape[-1]): # loop over bootstrapsamples bootstrap_data[:,x] = (self.residuals.T[np.random.permutation(self.resampled_signal_size)] + explained_signal).squeeze() self.bootstrap_betas, bs_residuals, rank, s = LA.lstsq(self.design_matrix.T, bootstrap_data) self.bootstrap_betas_per_event_type = np.zeros((len(self.covariates), self.deconvolution_interval_size, nr_repetitions)) for i, covariate in enumerate(list(self.covariates.keys())): # find the index in the designmatrix of the current covariate this_covariate_index = list(self.covariates.keys()).index(covariate) self.bootstrap_betas_per_event_type[i] = self.bootstrap_betas[this_covariate_index*self.deconvolution_interval_size:(this_covariate_index+1)*self.deconvolution_interval_size]
[ "def", "bootstrap_on_residuals", "(", "self", ",", "nr_repetitions", "=", "1000", ")", ":", "assert", "self", ".", "resampled_signal", ".", "shape", "[", "0", "]", "==", "1", ",", "'signal input into bootstrap_on_residuals cannot contain signals from multiple channels at once, present shape %s'", "%", "str", "(", "self", ".", "resampled_signal", ".", "shape", ")", "assert", "hasattr", "(", "self", ",", "'betas'", ")", ",", "'no betas found, please run regression before bootstrapping'", "# create bootstrap data by taking the residuals", "bootstrap_data", "=", "np", ".", "zeros", "(", "(", "self", ".", "resampled_signal_size", ",", "nr_repetitions", ")", ")", "explained_signal", "=", "self", ".", "predict_from_design_matrix", "(", "self", ".", "design_matrix", ")", ".", "T", "for", "x", "in", "range", "(", "bootstrap_data", ".", "shape", "[", "-", "1", "]", ")", ":", "# loop over bootstrapsamples", "bootstrap_data", "[", ":", ",", "x", "]", "=", "(", "self", ".", "residuals", ".", "T", "[", "np", ".", "random", ".", "permutation", "(", "self", ".", "resampled_signal_size", ")", "]", "+", "explained_signal", ")", ".", "squeeze", "(", ")", "self", ".", "bootstrap_betas", ",", "bs_residuals", ",", "rank", ",", "s", "=", "LA", ".", "lstsq", "(", "self", ".", "design_matrix", ".", "T", ",", "bootstrap_data", ")", "self", ".", "bootstrap_betas_per_event_type", "=", "np", ".", "zeros", "(", "(", "len", "(", "self", ".", "covariates", ")", ",", "self", ".", "deconvolution_interval_size", ",", "nr_repetitions", ")", ")", "for", "i", ",", "covariate", "in", "enumerate", "(", "list", "(", "self", ".", "covariates", ".", "keys", "(", ")", ")", ")", ":", "# find the index in the designmatrix of the current covariate", "this_covariate_index", "=", "list", "(", "self", ".", "covariates", ".", "keys", "(", ")", ")", ".", "index", "(", "covariate", ")", "self", ".", "bootstrap_betas_per_event_type", "[", "i", "]", "=", "self", ".", "bootstrap_betas", "[", "this_covariate_index", "*", "self", ".", "deconvolution_interval_size", ":", "(", "this_covariate_index", "+", "1", ")", "*", "self", ".", "deconvolution_interval_size", "]" ]
bootstrap_on_residuals bootstraps, by shuffling the residuals. bootstrap_on_residuals should only be used on single-channel data, as otherwise the memory load might increase too much. This uses the lstsq backend regression for a single-pass fit across repetitions. Please note that shuffling the residuals may change the autocorrelation of the bootstrap samples relative to that of the original data and that may reduce its validity. Reference: https://en.wikipedia.org/wiki/Bootstrapping_(statistics)#Resampling_residuals :param nr_repetitions: number of repetitions for the bootstrap. :type nr_repetitions: int
[ "bootstrap_on_residuals", "bootstraps", "by", "shuffling", "the", "residuals", ".", "bootstrap_on_residuals", "should", "only", "be", "used", "on", "single", "-", "channel", "data", "as", "otherwise", "the", "memory", "load", "might", "increase", "too", "much", ".", "This", "uses", "the", "lstsq", "backend", "regression", "for", "a", "single", "-", "pass", "fit", "across", "repetitions", ".", "Please", "note", "that", "shuffling", "the", "residuals", "may", "change", "the", "autocorrelation", "of", "the", "bootstrap", "samples", "relative", "to", "that", "of", "the", "original", "data", "and", "that", "may", "reduce", "its", "validity", ".", "Reference", ":", "https", ":", "//", "en", ".", "wikipedia", ".", "org", "/", "wiki", "/", "Bootstrapping_", "(", "statistics", ")", "#Resampling_residuals" ]
train
https://github.com/tknapen/FIRDeconvolution/blob/6263496a356c449062fe4c216fef56541f6dc151/src/FIRDeconvolution.py#L312-L337
cga-harvard/Hypermap-Registry
hypermap/context_processors.py
resource_urls
def resource_urls(request): """Global values to pass to templates""" url_parsed = urlparse(settings.SEARCH_URL) defaults = dict( APP_NAME=__description__, APP_VERSION=__version__, SITE_URL=settings.SITE_URL.rstrip('/'), SEARCH_TYPE=settings.SEARCH_TYPE, SEARCH_URL=settings.SEARCH_URL, SEARCH_IP='%s://%s:%s' % (url_parsed.scheme, url_parsed.hostname, url_parsed.port) ) return defaults
python
def resource_urls(request): """Global values to pass to templates""" url_parsed = urlparse(settings.SEARCH_URL) defaults = dict( APP_NAME=__description__, APP_VERSION=__version__, SITE_URL=settings.SITE_URL.rstrip('/'), SEARCH_TYPE=settings.SEARCH_TYPE, SEARCH_URL=settings.SEARCH_URL, SEARCH_IP='%s://%s:%s' % (url_parsed.scheme, url_parsed.hostname, url_parsed.port) ) return defaults
[ "def", "resource_urls", "(", "request", ")", ":", "url_parsed", "=", "urlparse", "(", "settings", ".", "SEARCH_URL", ")", "defaults", "=", "dict", "(", "APP_NAME", "=", "__description__", ",", "APP_VERSION", "=", "__version__", ",", "SITE_URL", "=", "settings", ".", "SITE_URL", ".", "rstrip", "(", "'/'", ")", ",", "SEARCH_TYPE", "=", "settings", ".", "SEARCH_TYPE", ",", "SEARCH_URL", "=", "settings", ".", "SEARCH_URL", ",", "SEARCH_IP", "=", "'%s://%s:%s'", "%", "(", "url_parsed", ".", "scheme", ",", "url_parsed", ".", "hostname", ",", "url_parsed", ".", "port", ")", ")", "return", "defaults" ]
Global values to pass to templates
[ "Global", "values", "to", "pass", "to", "templates" ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/context_processors.py#L7-L19
cga-harvard/Hypermap-Registry
hypermap/aggregator/tasks.py
index_cached_layers
def index_cached_layers(self): """ Index and unindex all layers in the Django cache (Index all layers who have been checked). """ from hypermap.aggregator.models import Layer if SEARCH_TYPE == 'solr': from hypermap.aggregator.solr import SolrHypermap solrobject = SolrHypermap() else: from hypermap.aggregator.elasticsearch_client import ESHypermap from elasticsearch import helpers es_client = ESHypermap() layers_cache = cache.get('layers') deleted_layers_cache = cache.get('deleted_layers') # 1. added layers cache if layers_cache: layers_list = list(layers_cache) LOGGER.debug('There are %s layers in cache: %s' % (len(layers_list), layers_list)) batch_size = settings.REGISTRY_SEARCH_BATCH_SIZE batch_lists = [layers_list[i:i+batch_size] for i in range(0, len(layers_list), batch_size)] for batch_list_ids in batch_lists: layers = Layer.objects.filter(id__in=batch_list_ids) if batch_size > len(layers): batch_size = len(layers) LOGGER.debug('Syncing %s/%s layers to %s: %s' % (batch_size, len(layers_cache), layers, SEARCH_TYPE)) try: # SOLR if SEARCH_TYPE == 'solr': success, layers_errors_ids = solrobject.layers_to_solr(layers) if success: # remove layers from cache here layers_cache = layers_cache.difference(set(batch_list_ids)) LOGGER.debug('Removing layers with id %s from cache' % batch_list_ids) cache.set('layers', layers_cache) # ES elif SEARCH_TYPE == 'elasticsearch': with_bulk, success = True, False layers_to_index = [es_client.layer_to_es(layer, with_bulk) for layer in layers] message = helpers.bulk(es_client.es, layers_to_index) # Check that all layers where indexed...if not, don't clear cache. # TODO: Check why es does not index all layers at first. len_indexed_layers = message[0] if len_indexed_layers == len(layers): LOGGER.debug('%d layers indexed successfully' % (len_indexed_layers)) success = True if success: # remove layers from cache here layers_cache = layers_cache.difference(set(batch_list_ids)) cache.set('layers', layers_cache) else: raise Exception("Incorrect SEARCH_TYPE=%s" % SEARCH_TYPE) except Exception as e: LOGGER.error('Layers were NOT indexed correctly') LOGGER.error(e, exc_info=True) else: LOGGER.debug('No cached layers to add in search engine.') # 2. deleted layers cache if deleted_layers_cache: layers_list = list(deleted_layers_cache) LOGGER.debug('There are %s layers in cache for deleting: %s' % (len(layers_list), layers_list)) # TODO implement me: batch layer index deletion for layer_id in layers_list: # SOLR if SEARCH_TYPE == 'solr': if Layer.objects.filter(pk=layer_id).exists(): layer = Layer.objects.get(id=layer_id) unindex_layer(layer.id, use_cache=False) deleted_layers_cache = deleted_layers_cache.difference(set([layer_id])) cache.set('deleted_layers', deleted_layers_cache) else: # TODO implement me raise NotImplementedError else: LOGGER.debug('No cached layers to remove in search engine.')
python
def index_cached_layers(self): """ Index and unindex all layers in the Django cache (Index all layers who have been checked). """ from hypermap.aggregator.models import Layer if SEARCH_TYPE == 'solr': from hypermap.aggregator.solr import SolrHypermap solrobject = SolrHypermap() else: from hypermap.aggregator.elasticsearch_client import ESHypermap from elasticsearch import helpers es_client = ESHypermap() layers_cache = cache.get('layers') deleted_layers_cache = cache.get('deleted_layers') # 1. added layers cache if layers_cache: layers_list = list(layers_cache) LOGGER.debug('There are %s layers in cache: %s' % (len(layers_list), layers_list)) batch_size = settings.REGISTRY_SEARCH_BATCH_SIZE batch_lists = [layers_list[i:i+batch_size] for i in range(0, len(layers_list), batch_size)] for batch_list_ids in batch_lists: layers = Layer.objects.filter(id__in=batch_list_ids) if batch_size > len(layers): batch_size = len(layers) LOGGER.debug('Syncing %s/%s layers to %s: %s' % (batch_size, len(layers_cache), layers, SEARCH_TYPE)) try: # SOLR if SEARCH_TYPE == 'solr': success, layers_errors_ids = solrobject.layers_to_solr(layers) if success: # remove layers from cache here layers_cache = layers_cache.difference(set(batch_list_ids)) LOGGER.debug('Removing layers with id %s from cache' % batch_list_ids) cache.set('layers', layers_cache) # ES elif SEARCH_TYPE == 'elasticsearch': with_bulk, success = True, False layers_to_index = [es_client.layer_to_es(layer, with_bulk) for layer in layers] message = helpers.bulk(es_client.es, layers_to_index) # Check that all layers where indexed...if not, don't clear cache. # TODO: Check why es does not index all layers at first. len_indexed_layers = message[0] if len_indexed_layers == len(layers): LOGGER.debug('%d layers indexed successfully' % (len_indexed_layers)) success = True if success: # remove layers from cache here layers_cache = layers_cache.difference(set(batch_list_ids)) cache.set('layers', layers_cache) else: raise Exception("Incorrect SEARCH_TYPE=%s" % SEARCH_TYPE) except Exception as e: LOGGER.error('Layers were NOT indexed correctly') LOGGER.error(e, exc_info=True) else: LOGGER.debug('No cached layers to add in search engine.') # 2. deleted layers cache if deleted_layers_cache: layers_list = list(deleted_layers_cache) LOGGER.debug('There are %s layers in cache for deleting: %s' % (len(layers_list), layers_list)) # TODO implement me: batch layer index deletion for layer_id in layers_list: # SOLR if SEARCH_TYPE == 'solr': if Layer.objects.filter(pk=layer_id).exists(): layer = Layer.objects.get(id=layer_id) unindex_layer(layer.id, use_cache=False) deleted_layers_cache = deleted_layers_cache.difference(set([layer_id])) cache.set('deleted_layers', deleted_layers_cache) else: # TODO implement me raise NotImplementedError else: LOGGER.debug('No cached layers to remove in search engine.')
[ "def", "index_cached_layers", "(", "self", ")", ":", "from", "hypermap", ".", "aggregator", ".", "models", "import", "Layer", "if", "SEARCH_TYPE", "==", "'solr'", ":", "from", "hypermap", ".", "aggregator", ".", "solr", "import", "SolrHypermap", "solrobject", "=", "SolrHypermap", "(", ")", "else", ":", "from", "hypermap", ".", "aggregator", ".", "elasticsearch_client", "import", "ESHypermap", "from", "elasticsearch", "import", "helpers", "es_client", "=", "ESHypermap", "(", ")", "layers_cache", "=", "cache", ".", "get", "(", "'layers'", ")", "deleted_layers_cache", "=", "cache", ".", "get", "(", "'deleted_layers'", ")", "# 1. added layers cache", "if", "layers_cache", ":", "layers_list", "=", "list", "(", "layers_cache", ")", "LOGGER", ".", "debug", "(", "'There are %s layers in cache: %s'", "%", "(", "len", "(", "layers_list", ")", ",", "layers_list", ")", ")", "batch_size", "=", "settings", ".", "REGISTRY_SEARCH_BATCH_SIZE", "batch_lists", "=", "[", "layers_list", "[", "i", ":", "i", "+", "batch_size", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "layers_list", ")", ",", "batch_size", ")", "]", "for", "batch_list_ids", "in", "batch_lists", ":", "layers", "=", "Layer", ".", "objects", ".", "filter", "(", "id__in", "=", "batch_list_ids", ")", "if", "batch_size", ">", "len", "(", "layers", ")", ":", "batch_size", "=", "len", "(", "layers", ")", "LOGGER", ".", "debug", "(", "'Syncing %s/%s layers to %s: %s'", "%", "(", "batch_size", ",", "len", "(", "layers_cache", ")", ",", "layers", ",", "SEARCH_TYPE", ")", ")", "try", ":", "# SOLR", "if", "SEARCH_TYPE", "==", "'solr'", ":", "success", ",", "layers_errors_ids", "=", "solrobject", ".", "layers_to_solr", "(", "layers", ")", "if", "success", ":", "# remove layers from cache here", "layers_cache", "=", "layers_cache", ".", "difference", "(", "set", "(", "batch_list_ids", ")", ")", "LOGGER", ".", "debug", "(", "'Removing layers with id %s from cache'", "%", "batch_list_ids", ")", "cache", ".", "set", "(", "'layers'", ",", "layers_cache", ")", "# ES", "elif", "SEARCH_TYPE", "==", "'elasticsearch'", ":", "with_bulk", ",", "success", "=", "True", ",", "False", "layers_to_index", "=", "[", "es_client", ".", "layer_to_es", "(", "layer", ",", "with_bulk", ")", "for", "layer", "in", "layers", "]", "message", "=", "helpers", ".", "bulk", "(", "es_client", ".", "es", ",", "layers_to_index", ")", "# Check that all layers where indexed...if not, don't clear cache.", "# TODO: Check why es does not index all layers at first.", "len_indexed_layers", "=", "message", "[", "0", "]", "if", "len_indexed_layers", "==", "len", "(", "layers", ")", ":", "LOGGER", ".", "debug", "(", "'%d layers indexed successfully'", "%", "(", "len_indexed_layers", ")", ")", "success", "=", "True", "if", "success", ":", "# remove layers from cache here", "layers_cache", "=", "layers_cache", ".", "difference", "(", "set", "(", "batch_list_ids", ")", ")", "cache", ".", "set", "(", "'layers'", ",", "layers_cache", ")", "else", ":", "raise", "Exception", "(", "\"Incorrect SEARCH_TYPE=%s\"", "%", "SEARCH_TYPE", ")", "except", "Exception", "as", "e", ":", "LOGGER", ".", "error", "(", "'Layers were NOT indexed correctly'", ")", "LOGGER", ".", "error", "(", "e", ",", "exc_info", "=", "True", ")", "else", ":", "LOGGER", ".", "debug", "(", "'No cached layers to add in search engine.'", ")", "# 2. deleted layers cache", "if", "deleted_layers_cache", ":", "layers_list", "=", "list", "(", "deleted_layers_cache", ")", "LOGGER", ".", "debug", "(", "'There are %s layers in cache for deleting: %s'", "%", "(", "len", "(", "layers_list", ")", ",", "layers_list", ")", ")", "# TODO implement me: batch layer index deletion", "for", "layer_id", "in", "layers_list", ":", "# SOLR", "if", "SEARCH_TYPE", "==", "'solr'", ":", "if", "Layer", ".", "objects", ".", "filter", "(", "pk", "=", "layer_id", ")", ".", "exists", "(", ")", ":", "layer", "=", "Layer", ".", "objects", ".", "get", "(", "id", "=", "layer_id", ")", "unindex_layer", "(", "layer", ".", "id", ",", "use_cache", "=", "False", ")", "deleted_layers_cache", "=", "deleted_layers_cache", ".", "difference", "(", "set", "(", "[", "layer_id", "]", ")", ")", "cache", ".", "set", "(", "'deleted_layers'", ",", "deleted_layers_cache", ")", "else", ":", "# TODO implement me", "raise", "NotImplementedError", "else", ":", "LOGGER", ".", "debug", "(", "'No cached layers to remove in search engine.'", ")" ]
Index and unindex all layers in the Django cache (Index all layers who have been checked).
[ "Index", "and", "unindex", "all", "layers", "in", "the", "Django", "cache", "(", "Index", "all", "layers", "who", "have", "been", "checked", ")", "." ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/tasks.py#L94-L177
cga-harvard/Hypermap-Registry
hypermap/aggregator/tasks.py
remove_service_checks
def remove_service_checks(self, service_id): """ Remove all checks from a service. """ from hypermap.aggregator.models import Service service = Service.objects.get(id=service_id) service.check_set.all().delete() layer_to_process = service.layer_set.all() for layer in layer_to_process: layer.check_set.all().delete()
python
def remove_service_checks(self, service_id): """ Remove all checks from a service. """ from hypermap.aggregator.models import Service service = Service.objects.get(id=service_id) service.check_set.all().delete() layer_to_process = service.layer_set.all() for layer in layer_to_process: layer.check_set.all().delete()
[ "def", "remove_service_checks", "(", "self", ",", "service_id", ")", ":", "from", "hypermap", ".", "aggregator", ".", "models", "import", "Service", "service", "=", "Service", ".", "objects", ".", "get", "(", "id", "=", "service_id", ")", "service", ".", "check_set", ".", "all", "(", ")", ".", "delete", "(", ")", "layer_to_process", "=", "service", ".", "layer_set", ".", "all", "(", ")", "for", "layer", "in", "layer_to_process", ":", "layer", ".", "check_set", ".", "all", "(", ")", ".", "delete", "(", ")" ]
Remove all checks from a service.
[ "Remove", "all", "checks", "from", "a", "service", "." ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/tasks.py#L195-L205
cga-harvard/Hypermap-Registry
hypermap/aggregator/tasks.py
index_service
def index_service(self, service_id): """ Index a service in search engine. """ from hypermap.aggregator.models import Service service = Service.objects.get(id=service_id) if not service.is_valid: LOGGER.debug('Not indexing service with id %s in search engine as it is not valid' % service.id) return LOGGER.debug('Indexing service %s' % service.id) layer_to_process = service.layer_set.all() for layer in layer_to_process: if not settings.REGISTRY_SKIP_CELERY: index_layer(layer.id, use_cache=True) else: index_layer(layer.id)
python
def index_service(self, service_id): """ Index a service in search engine. """ from hypermap.aggregator.models import Service service = Service.objects.get(id=service_id) if not service.is_valid: LOGGER.debug('Not indexing service with id %s in search engine as it is not valid' % service.id) return LOGGER.debug('Indexing service %s' % service.id) layer_to_process = service.layer_set.all() for layer in layer_to_process: if not settings.REGISTRY_SKIP_CELERY: index_layer(layer.id, use_cache=True) else: index_layer(layer.id)
[ "def", "index_service", "(", "self", ",", "service_id", ")", ":", "from", "hypermap", ".", "aggregator", ".", "models", "import", "Service", "service", "=", "Service", ".", "objects", ".", "get", "(", "id", "=", "service_id", ")", "if", "not", "service", ".", "is_valid", ":", "LOGGER", ".", "debug", "(", "'Not indexing service with id %s in search engine as it is not valid'", "%", "service", ".", "id", ")", "return", "LOGGER", ".", "debug", "(", "'Indexing service %s'", "%", "service", ".", "id", ")", "layer_to_process", "=", "service", ".", "layer_set", ".", "all", "(", ")", "for", "layer", "in", "layer_to_process", ":", "if", "not", "settings", ".", "REGISTRY_SKIP_CELERY", ":", "index_layer", "(", "layer", ".", "id", ",", "use_cache", "=", "True", ")", "else", ":", "index_layer", "(", "layer", ".", "id", ")" ]
Index a service in search engine.
[ "Index", "a", "service", "in", "search", "engine", "." ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/tasks.py#L209-L228
cga-harvard/Hypermap-Registry
hypermap/aggregator/tasks.py
index_layer
def index_layer(self, layer_id, use_cache=False): """ Index a layer in the search backend. If cache is set, append it to the list, if it isn't send the transaction right away. cache needs memcached to be available. """ from hypermap.aggregator.models import Layer layer = Layer.objects.get(id=layer_id) if not layer.is_valid: LOGGER.debug('Not indexing or removing layer with id %s in search engine as it is not valid' % layer.id) unindex_layer(layer.id, use_cache) return if layer.was_deleted: LOGGER.debug('Not indexing or removing layer with id %s in search engine as was_deleted is true' % layer.id) unindex_layer(layer.id, use_cache) return # 1. if we use cache if use_cache: LOGGER.debug('Caching layer with id %s for syncing with search engine' % layer.id) layers = cache.get('layers') if layers is None: layers = set([layer.id]) else: layers.add(layer.id) cache.set('layers', layers) return # 2. if we don't use cache # TODO: Make this function more DRY # by abstracting the common bits. if SEARCH_TYPE == 'solr': from hypermap.aggregator.solr import SolrHypermap LOGGER.debug('Syncing layer %s to solr' % layer.name) solrobject = SolrHypermap() success, message = solrobject.layer_to_solr(layer) # update the error message if using celery if not settings.REGISTRY_SKIP_CELERY: if not success: self.update_state( state=states.FAILURE, meta=message ) raise Ignore() elif SEARCH_TYPE == 'elasticsearch': from hypermap.aggregator.elasticsearch_client import ESHypermap LOGGER.debug('Syncing layer %s to es' % layer.name) esobject = ESHypermap() success, message = esobject.layer_to_es(layer) # update the error message if using celery if not settings.REGISTRY_SKIP_CELERY: if not success: self.update_state( state=states.FAILURE, meta=message ) raise Ignore()
python
def index_layer(self, layer_id, use_cache=False): """ Index a layer in the search backend. If cache is set, append it to the list, if it isn't send the transaction right away. cache needs memcached to be available. """ from hypermap.aggregator.models import Layer layer = Layer.objects.get(id=layer_id) if not layer.is_valid: LOGGER.debug('Not indexing or removing layer with id %s in search engine as it is not valid' % layer.id) unindex_layer(layer.id, use_cache) return if layer.was_deleted: LOGGER.debug('Not indexing or removing layer with id %s in search engine as was_deleted is true' % layer.id) unindex_layer(layer.id, use_cache) return # 1. if we use cache if use_cache: LOGGER.debug('Caching layer with id %s for syncing with search engine' % layer.id) layers = cache.get('layers') if layers is None: layers = set([layer.id]) else: layers.add(layer.id) cache.set('layers', layers) return # 2. if we don't use cache # TODO: Make this function more DRY # by abstracting the common bits. if SEARCH_TYPE == 'solr': from hypermap.aggregator.solr import SolrHypermap LOGGER.debug('Syncing layer %s to solr' % layer.name) solrobject = SolrHypermap() success, message = solrobject.layer_to_solr(layer) # update the error message if using celery if not settings.REGISTRY_SKIP_CELERY: if not success: self.update_state( state=states.FAILURE, meta=message ) raise Ignore() elif SEARCH_TYPE == 'elasticsearch': from hypermap.aggregator.elasticsearch_client import ESHypermap LOGGER.debug('Syncing layer %s to es' % layer.name) esobject = ESHypermap() success, message = esobject.layer_to_es(layer) # update the error message if using celery if not settings.REGISTRY_SKIP_CELERY: if not success: self.update_state( state=states.FAILURE, meta=message ) raise Ignore()
[ "def", "index_layer", "(", "self", ",", "layer_id", ",", "use_cache", "=", "False", ")", ":", "from", "hypermap", ".", "aggregator", ".", "models", "import", "Layer", "layer", "=", "Layer", ".", "objects", ".", "get", "(", "id", "=", "layer_id", ")", "if", "not", "layer", ".", "is_valid", ":", "LOGGER", ".", "debug", "(", "'Not indexing or removing layer with id %s in search engine as it is not valid'", "%", "layer", ".", "id", ")", "unindex_layer", "(", "layer", ".", "id", ",", "use_cache", ")", "return", "if", "layer", ".", "was_deleted", ":", "LOGGER", ".", "debug", "(", "'Not indexing or removing layer with id %s in search engine as was_deleted is true'", "%", "layer", ".", "id", ")", "unindex_layer", "(", "layer", ".", "id", ",", "use_cache", ")", "return", "# 1. if we use cache", "if", "use_cache", ":", "LOGGER", ".", "debug", "(", "'Caching layer with id %s for syncing with search engine'", "%", "layer", ".", "id", ")", "layers", "=", "cache", ".", "get", "(", "'layers'", ")", "if", "layers", "is", "None", ":", "layers", "=", "set", "(", "[", "layer", ".", "id", "]", ")", "else", ":", "layers", ".", "add", "(", "layer", ".", "id", ")", "cache", ".", "set", "(", "'layers'", ",", "layers", ")", "return", "# 2. if we don't use cache", "# TODO: Make this function more DRY", "# by abstracting the common bits.", "if", "SEARCH_TYPE", "==", "'solr'", ":", "from", "hypermap", ".", "aggregator", ".", "solr", "import", "SolrHypermap", "LOGGER", ".", "debug", "(", "'Syncing layer %s to solr'", "%", "layer", ".", "name", ")", "solrobject", "=", "SolrHypermap", "(", ")", "success", ",", "message", "=", "solrobject", ".", "layer_to_solr", "(", "layer", ")", "# update the error message if using celery", "if", "not", "settings", ".", "REGISTRY_SKIP_CELERY", ":", "if", "not", "success", ":", "self", ".", "update_state", "(", "state", "=", "states", ".", "FAILURE", ",", "meta", "=", "message", ")", "raise", "Ignore", "(", ")", "elif", "SEARCH_TYPE", "==", "'elasticsearch'", ":", "from", "hypermap", ".", "aggregator", ".", "elasticsearch_client", "import", "ESHypermap", "LOGGER", ".", "debug", "(", "'Syncing layer %s to es'", "%", "layer", ".", "name", ")", "esobject", "=", "ESHypermap", "(", ")", "success", ",", "message", "=", "esobject", ".", "layer_to_es", "(", "layer", ")", "# update the error message if using celery", "if", "not", "settings", ".", "REGISTRY_SKIP_CELERY", ":", "if", "not", "success", ":", "self", ".", "update_state", "(", "state", "=", "states", ".", "FAILURE", ",", "meta", "=", "message", ")", "raise", "Ignore", "(", ")" ]
Index a layer in the search backend. If cache is set, append it to the list, if it isn't send the transaction right away. cache needs memcached to be available.
[ "Index", "a", "layer", "in", "the", "search", "backend", ".", "If", "cache", "is", "set", "append", "it", "to", "the", "list", "if", "it", "isn", "t", "send", "the", "transaction", "right", "away", ".", "cache", "needs", "memcached", "to", "be", "available", "." ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/tasks.py#L232-L291
cga-harvard/Hypermap-Registry
hypermap/aggregator/tasks.py
unindex_layers_with_issues
def unindex_layers_with_issues(self, use_cache=False): """ Remove the index for layers in search backend, which are linked to an issue. """ from hypermap.aggregator.models import Issue, Layer, Service from django.contrib.contenttypes.models import ContentType layer_type = ContentType.objects.get_for_model(Layer) service_type = ContentType.objects.get_for_model(Service) for issue in Issue.objects.filter(content_type__pk=layer_type.id): unindex_layer(issue.content_object.id, use_cache) for issue in Issue.objects.filter(content_type__pk=service_type.id): for layer in issue.content_object.layer_set.all(): unindex_layer(layer.id, use_cache)
python
def unindex_layers_with_issues(self, use_cache=False): """ Remove the index for layers in search backend, which are linked to an issue. """ from hypermap.aggregator.models import Issue, Layer, Service from django.contrib.contenttypes.models import ContentType layer_type = ContentType.objects.get_for_model(Layer) service_type = ContentType.objects.get_for_model(Service) for issue in Issue.objects.filter(content_type__pk=layer_type.id): unindex_layer(issue.content_object.id, use_cache) for issue in Issue.objects.filter(content_type__pk=service_type.id): for layer in issue.content_object.layer_set.all(): unindex_layer(layer.id, use_cache)
[ "def", "unindex_layers_with_issues", "(", "self", ",", "use_cache", "=", "False", ")", ":", "from", "hypermap", ".", "aggregator", ".", "models", "import", "Issue", ",", "Layer", ",", "Service", "from", "django", ".", "contrib", ".", "contenttypes", ".", "models", "import", "ContentType", "layer_type", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "Layer", ")", "service_type", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "Service", ")", "for", "issue", "in", "Issue", ".", "objects", ".", "filter", "(", "content_type__pk", "=", "layer_type", ".", "id", ")", ":", "unindex_layer", "(", "issue", ".", "content_object", ".", "id", ",", "use_cache", ")", "for", "issue", "in", "Issue", ".", "objects", ".", "filter", "(", "content_type__pk", "=", "service_type", ".", "id", ")", ":", "for", "layer", "in", "issue", ".", "content_object", ".", "layer_set", ".", "all", "(", ")", ":", "unindex_layer", "(", "layer", ".", "id", ",", "use_cache", ")" ]
Remove the index for layers in search backend, which are linked to an issue.
[ "Remove", "the", "index", "for", "layers", "in", "search", "backend", "which", "are", "linked", "to", "an", "issue", "." ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/tasks.py#L295-L310
cga-harvard/Hypermap-Registry
hypermap/aggregator/tasks.py
unindex_layer
def unindex_layer(self, layer_id, use_cache=False): """ Remove the index for a layer in the search backend. If cache is set, append it to the list of removed layers, if it isn't send the transaction right away. """ from hypermap.aggregator.models import Layer layer = Layer.objects.get(id=layer_id) if use_cache: LOGGER.debug('Caching layer with id %s for being removed from search engine' % layer.id) deleted_layers = cache.get('deleted_layers') if deleted_layers is None: deleted_layers = set([layer.id]) else: deleted_layers.add(layer.id) cache.set('deleted_layers', deleted_layers) return if SEARCH_TYPE == 'solr': from hypermap.aggregator.solr import SolrHypermap LOGGER.debug('Removing layer %s from solr' % layer.id) try: solrobject = SolrHypermap() solrobject.remove_layer(layer.uuid) except Exception: LOGGER.error('Layer NOT correctly removed from Solr') elif SEARCH_TYPE == 'elasticsearch': # TODO implement me pass
python
def unindex_layer(self, layer_id, use_cache=False): """ Remove the index for a layer in the search backend. If cache is set, append it to the list of removed layers, if it isn't send the transaction right away. """ from hypermap.aggregator.models import Layer layer = Layer.objects.get(id=layer_id) if use_cache: LOGGER.debug('Caching layer with id %s for being removed from search engine' % layer.id) deleted_layers = cache.get('deleted_layers') if deleted_layers is None: deleted_layers = set([layer.id]) else: deleted_layers.add(layer.id) cache.set('deleted_layers', deleted_layers) return if SEARCH_TYPE == 'solr': from hypermap.aggregator.solr import SolrHypermap LOGGER.debug('Removing layer %s from solr' % layer.id) try: solrobject = SolrHypermap() solrobject.remove_layer(layer.uuid) except Exception: LOGGER.error('Layer NOT correctly removed from Solr') elif SEARCH_TYPE == 'elasticsearch': # TODO implement me pass
[ "def", "unindex_layer", "(", "self", ",", "layer_id", ",", "use_cache", "=", "False", ")", ":", "from", "hypermap", ".", "aggregator", ".", "models", "import", "Layer", "layer", "=", "Layer", ".", "objects", ".", "get", "(", "id", "=", "layer_id", ")", "if", "use_cache", ":", "LOGGER", ".", "debug", "(", "'Caching layer with id %s for being removed from search engine'", "%", "layer", ".", "id", ")", "deleted_layers", "=", "cache", ".", "get", "(", "'deleted_layers'", ")", "if", "deleted_layers", "is", "None", ":", "deleted_layers", "=", "set", "(", "[", "layer", ".", "id", "]", ")", "else", ":", "deleted_layers", ".", "add", "(", "layer", ".", "id", ")", "cache", ".", "set", "(", "'deleted_layers'", ",", "deleted_layers", ")", "return", "if", "SEARCH_TYPE", "==", "'solr'", ":", "from", "hypermap", ".", "aggregator", ".", "solr", "import", "SolrHypermap", "LOGGER", ".", "debug", "(", "'Removing layer %s from solr'", "%", "layer", ".", "id", ")", "try", ":", "solrobject", "=", "SolrHypermap", "(", ")", "solrobject", ".", "remove_layer", "(", "layer", ".", "uuid", ")", "except", "Exception", ":", "LOGGER", ".", "error", "(", "'Layer NOT correctly removed from Solr'", ")", "elif", "SEARCH_TYPE", "==", "'elasticsearch'", ":", "# TODO implement me", "pass" ]
Remove the index for a layer in the search backend. If cache is set, append it to the list of removed layers, if it isn't send the transaction right away.
[ "Remove", "the", "index", "for", "a", "layer", "in", "the", "search", "backend", ".", "If", "cache", "is", "set", "append", "it", "to", "the", "list", "of", "removed", "layers", "if", "it", "isn", "t", "send", "the", "transaction", "right", "away", "." ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/tasks.py#L314-L343
cga-harvard/Hypermap-Registry
hypermap/aggregator/tasks.py
index_all_layers
def index_all_layers(self): """ Index all layers in search engine. """ from hypermap.aggregator.models import Layer if not settings.REGISTRY_SKIP_CELERY: layers_cache = set(Layer.objects.filter(is_valid=True).values_list('id', flat=True)) deleted_layers_cache = set(Layer.objects.filter(is_valid=False).values_list('id', flat=True)) cache.set('layers', layers_cache) cache.set('deleted_layers', deleted_layers_cache) else: for layer in Layer.objects.all(): index_layer(layer.id)
python
def index_all_layers(self): """ Index all layers in search engine. """ from hypermap.aggregator.models import Layer if not settings.REGISTRY_SKIP_CELERY: layers_cache = set(Layer.objects.filter(is_valid=True).values_list('id', flat=True)) deleted_layers_cache = set(Layer.objects.filter(is_valid=False).values_list('id', flat=True)) cache.set('layers', layers_cache) cache.set('deleted_layers', deleted_layers_cache) else: for layer in Layer.objects.all(): index_layer(layer.id)
[ "def", "index_all_layers", "(", "self", ")", ":", "from", "hypermap", ".", "aggregator", ".", "models", "import", "Layer", "if", "not", "settings", ".", "REGISTRY_SKIP_CELERY", ":", "layers_cache", "=", "set", "(", "Layer", ".", "objects", ".", "filter", "(", "is_valid", "=", "True", ")", ".", "values_list", "(", "'id'", ",", "flat", "=", "True", ")", ")", "deleted_layers_cache", "=", "set", "(", "Layer", ".", "objects", ".", "filter", "(", "is_valid", "=", "False", ")", ".", "values_list", "(", "'id'", ",", "flat", "=", "True", ")", ")", "cache", ".", "set", "(", "'layers'", ",", "layers_cache", ")", "cache", ".", "set", "(", "'deleted_layers'", ",", "deleted_layers_cache", ")", "else", ":", "for", "layer", "in", "Layer", ".", "objects", ".", "all", "(", ")", ":", "index_layer", "(", "layer", ".", "id", ")" ]
Index all layers in search engine.
[ "Index", "all", "layers", "in", "search", "engine", "." ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/tasks.py#L347-L360
cga-harvard/Hypermap-Registry
hypermap/aggregator/tasks.py
update_last_wm_layers
def update_last_wm_layers(self, service_id, num_layers=10): """ Update and index the last added and deleted layers (num_layers) in WorldMap service. """ from hypermap.aggregator.models import Service LOGGER.debug( 'Updating the index the last %s added and %s deleted layers in WorldMap service' % (num_layers, num_layers) ) service = Service.objects.get(id=service_id) # TODO raise error if service type is not WM type if service.type == 'Hypermap:WorldMapLegacy': from hypermap.aggregator.models import update_layers_wm_legacy as update_layers_wm if service.type == 'Hypermap:WorldMap': from hypermap.aggregator.models import update_layers_geonode_wm as update_layers_wm update_layers_wm(service, num_layers) # Remove in search engine last num_layers that were deleted LOGGER.debug('Removing the index for the last %s deleted layers' % num_layers) layer_to_unindex = service.layer_set.filter(was_deleted=True).order_by('-last_updated')[0:num_layers] for layer in layer_to_unindex: if not settings.REGISTRY_SKIP_CELERY: unindex_layer(layer.id, use_cache=True) else: unindex_layer(layer.id) # Add/Update in search engine last num_layers that were added LOGGER.debug('Adding/Updating the index for the last %s added layers' % num_layers) layer_to_index = service.layer_set.filter(was_deleted=False).order_by('-last_updated')[0:num_layers] for layer in layer_to_index: if not settings.REGISTRY_SKIP_CELERY: index_layer(layer.id, use_cache=True) else: index_layer(layer.id)
python
def update_last_wm_layers(self, service_id, num_layers=10): """ Update and index the last added and deleted layers (num_layers) in WorldMap service. """ from hypermap.aggregator.models import Service LOGGER.debug( 'Updating the index the last %s added and %s deleted layers in WorldMap service' % (num_layers, num_layers) ) service = Service.objects.get(id=service_id) # TODO raise error if service type is not WM type if service.type == 'Hypermap:WorldMapLegacy': from hypermap.aggregator.models import update_layers_wm_legacy as update_layers_wm if service.type == 'Hypermap:WorldMap': from hypermap.aggregator.models import update_layers_geonode_wm as update_layers_wm update_layers_wm(service, num_layers) # Remove in search engine last num_layers that were deleted LOGGER.debug('Removing the index for the last %s deleted layers' % num_layers) layer_to_unindex = service.layer_set.filter(was_deleted=True).order_by('-last_updated')[0:num_layers] for layer in layer_to_unindex: if not settings.REGISTRY_SKIP_CELERY: unindex_layer(layer.id, use_cache=True) else: unindex_layer(layer.id) # Add/Update in search engine last num_layers that were added LOGGER.debug('Adding/Updating the index for the last %s added layers' % num_layers) layer_to_index = service.layer_set.filter(was_deleted=False).order_by('-last_updated')[0:num_layers] for layer in layer_to_index: if not settings.REGISTRY_SKIP_CELERY: index_layer(layer.id, use_cache=True) else: index_layer(layer.id)
[ "def", "update_last_wm_layers", "(", "self", ",", "service_id", ",", "num_layers", "=", "10", ")", ":", "from", "hypermap", ".", "aggregator", ".", "models", "import", "Service", "LOGGER", ".", "debug", "(", "'Updating the index the last %s added and %s deleted layers in WorldMap service'", "%", "(", "num_layers", ",", "num_layers", ")", ")", "service", "=", "Service", ".", "objects", ".", "get", "(", "id", "=", "service_id", ")", "# TODO raise error if service type is not WM type", "if", "service", ".", "type", "==", "'Hypermap:WorldMapLegacy'", ":", "from", "hypermap", ".", "aggregator", ".", "models", "import", "update_layers_wm_legacy", "as", "update_layers_wm", "if", "service", ".", "type", "==", "'Hypermap:WorldMap'", ":", "from", "hypermap", ".", "aggregator", ".", "models", "import", "update_layers_geonode_wm", "as", "update_layers_wm", "update_layers_wm", "(", "service", ",", "num_layers", ")", "# Remove in search engine last num_layers that were deleted", "LOGGER", ".", "debug", "(", "'Removing the index for the last %s deleted layers'", "%", "num_layers", ")", "layer_to_unindex", "=", "service", ".", "layer_set", ".", "filter", "(", "was_deleted", "=", "True", ")", ".", "order_by", "(", "'-last_updated'", ")", "[", "0", ":", "num_layers", "]", "for", "layer", "in", "layer_to_unindex", ":", "if", "not", "settings", ".", "REGISTRY_SKIP_CELERY", ":", "unindex_layer", "(", "layer", ".", "id", ",", "use_cache", "=", "True", ")", "else", ":", "unindex_layer", "(", "layer", ".", "id", ")", "# Add/Update in search engine last num_layers that were added", "LOGGER", ".", "debug", "(", "'Adding/Updating the index for the last %s added layers'", "%", "num_layers", ")", "layer_to_index", "=", "service", ".", "layer_set", ".", "filter", "(", "was_deleted", "=", "False", ")", ".", "order_by", "(", "'-last_updated'", ")", "[", "0", ":", "num_layers", "]", "for", "layer", "in", "layer_to_index", ":", "if", "not", "settings", ".", "REGISTRY_SKIP_CELERY", ":", "index_layer", "(", "layer", ".", "id", ",", "use_cache", "=", "True", ")", "else", ":", "index_layer", "(", "layer", ".", "id", ")" ]
Update and index the last added and deleted layers (num_layers) in WorldMap service.
[ "Update", "and", "index", "the", "last", "added", "and", "deleted", "layers", "(", "num_layers", ")", "in", "WorldMap", "service", "." ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/tasks.py#L364-L399
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
bbox2wktpolygon
def bbox2wktpolygon(bbox): """ Return OGC WKT Polygon of a simple bbox list """ try: minx = float(bbox[0]) miny = float(bbox[1]) maxx = float(bbox[2]) maxy = float(bbox[3]) except: LOGGER.debug("Invalid bbox, setting it to a zero POLYGON") minx = 0 miny = 0 maxx = 0 maxy = 0 return 'POLYGON((%.2f %.2f, %.2f %.2f, %.2f %.2f, %.2f %.2f, %.2f %.2f))' \ % (minx, miny, minx, maxy, maxx, maxy, maxx, miny, minx, miny)
python
def bbox2wktpolygon(bbox): """ Return OGC WKT Polygon of a simple bbox list """ try: minx = float(bbox[0]) miny = float(bbox[1]) maxx = float(bbox[2]) maxy = float(bbox[3]) except: LOGGER.debug("Invalid bbox, setting it to a zero POLYGON") minx = 0 miny = 0 maxx = 0 maxy = 0 return 'POLYGON((%.2f %.2f, %.2f %.2f, %.2f %.2f, %.2f %.2f, %.2f %.2f))' \ % (minx, miny, minx, maxy, maxx, maxy, maxx, miny, minx, miny)
[ "def", "bbox2wktpolygon", "(", "bbox", ")", ":", "try", ":", "minx", "=", "float", "(", "bbox", "[", "0", "]", ")", "miny", "=", "float", "(", "bbox", "[", "1", "]", ")", "maxx", "=", "float", "(", "bbox", "[", "2", "]", ")", "maxy", "=", "float", "(", "bbox", "[", "3", "]", ")", "except", ":", "LOGGER", ".", "debug", "(", "\"Invalid bbox, setting it to a zero POLYGON\"", ")", "minx", "=", "0", "miny", "=", "0", "maxx", "=", "0", "maxy", "=", "0", "return", "'POLYGON((%.2f %.2f, %.2f %.2f, %.2f %.2f, %.2f %.2f, %.2f %.2f))'", "%", "(", "minx", ",", "miny", ",", "minx", ",", "maxy", ",", "maxx", ",", "maxy", ",", "maxx", ",", "miny", ",", "minx", ",", "miny", ")" ]
Return OGC WKT Polygon of a simple bbox list
[ "Return", "OGC", "WKT", "Polygon", "of", "a", "simple", "bbox", "list" ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L975-L994
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
create_metadata_record
def create_metadata_record(**kwargs): """ Create a csw:Record XML document from harvested metadata """ if 'srs' in kwargs: srs = kwargs['srs'] else: srs = '4326' modified = '%sZ' % datetime.datetime.utcnow().isoformat().split('.')[0] nsmap = Namespaces().get_namespaces(['csw', 'dc', 'dct', 'ows']) e = etree.Element(nspath_eval('csw:Record', nsmap), nsmap=nsmap) etree.SubElement(e, nspath_eval('dc:identifier', nsmap)).text = kwargs['identifier'] etree.SubElement(e, nspath_eval('dc:title', nsmap)).text = kwargs['title'] if 'alternative' in kwargs: etree.SubElement(e, nspath_eval('dct:alternative', nsmap)).text = kwargs['alternative'] etree.SubElement(e, nspath_eval('dct:modified', nsmap)).text = modified etree.SubElement(e, nspath_eval('dct:abstract', nsmap)).text = kwargs['abstract'] etree.SubElement(e, nspath_eval('dc:type', nsmap)).text = kwargs['type'] etree.SubElement(e, nspath_eval('dc:format', nsmap)).text = kwargs['format'] etree.SubElement(e, nspath_eval('dc:source', nsmap)).text = kwargs['source'] if 'relation' in kwargs: etree.SubElement(e, nspath_eval('dc:relation', nsmap)).text = kwargs['relation'] if 'keywords' in kwargs: if kwargs['keywords'] is not None: for keyword in kwargs['keywords']: etree.SubElement(e, nspath_eval('dc:subject', nsmap)).text = keyword for link in kwargs['links']: etree.SubElement(e, nspath_eval('dct:references', nsmap), scheme=link[0]).text = link[1] bbox2 = loads(kwargs['wkt_geometry']).bounds bbox = etree.SubElement(e, nspath_eval('ows:BoundingBox', nsmap), crs='http://www.opengis.net/def/crs/EPSG/0/%s' % srs, dimensions='2') etree.SubElement(bbox, nspath_eval('ows:LowerCorner', nsmap)).text = '%s %s' % (bbox2[1], bbox2[0]) etree.SubElement(bbox, nspath_eval('ows:UpperCorner', nsmap)).text = '%s %s' % (bbox2[3], bbox2[2]) return etree.tostring(e, pretty_print=True)
python
def create_metadata_record(**kwargs): """ Create a csw:Record XML document from harvested metadata """ if 'srs' in kwargs: srs = kwargs['srs'] else: srs = '4326' modified = '%sZ' % datetime.datetime.utcnow().isoformat().split('.')[0] nsmap = Namespaces().get_namespaces(['csw', 'dc', 'dct', 'ows']) e = etree.Element(nspath_eval('csw:Record', nsmap), nsmap=nsmap) etree.SubElement(e, nspath_eval('dc:identifier', nsmap)).text = kwargs['identifier'] etree.SubElement(e, nspath_eval('dc:title', nsmap)).text = kwargs['title'] if 'alternative' in kwargs: etree.SubElement(e, nspath_eval('dct:alternative', nsmap)).text = kwargs['alternative'] etree.SubElement(e, nspath_eval('dct:modified', nsmap)).text = modified etree.SubElement(e, nspath_eval('dct:abstract', nsmap)).text = kwargs['abstract'] etree.SubElement(e, nspath_eval('dc:type', nsmap)).text = kwargs['type'] etree.SubElement(e, nspath_eval('dc:format', nsmap)).text = kwargs['format'] etree.SubElement(e, nspath_eval('dc:source', nsmap)).text = kwargs['source'] if 'relation' in kwargs: etree.SubElement(e, nspath_eval('dc:relation', nsmap)).text = kwargs['relation'] if 'keywords' in kwargs: if kwargs['keywords'] is not None: for keyword in kwargs['keywords']: etree.SubElement(e, nspath_eval('dc:subject', nsmap)).text = keyword for link in kwargs['links']: etree.SubElement(e, nspath_eval('dct:references', nsmap), scheme=link[0]).text = link[1] bbox2 = loads(kwargs['wkt_geometry']).bounds bbox = etree.SubElement(e, nspath_eval('ows:BoundingBox', nsmap), crs='http://www.opengis.net/def/crs/EPSG/0/%s' % srs, dimensions='2') etree.SubElement(bbox, nspath_eval('ows:LowerCorner', nsmap)).text = '%s %s' % (bbox2[1], bbox2[0]) etree.SubElement(bbox, nspath_eval('ows:UpperCorner', nsmap)).text = '%s %s' % (bbox2[3], bbox2[2]) return etree.tostring(e, pretty_print=True)
[ "def", "create_metadata_record", "(", "*", "*", "kwargs", ")", ":", "if", "'srs'", "in", "kwargs", ":", "srs", "=", "kwargs", "[", "'srs'", "]", "else", ":", "srs", "=", "'4326'", "modified", "=", "'%sZ'", "%", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "isoformat", "(", ")", ".", "split", "(", "'.'", ")", "[", "0", "]", "nsmap", "=", "Namespaces", "(", ")", ".", "get_namespaces", "(", "[", "'csw'", ",", "'dc'", ",", "'dct'", ",", "'ows'", "]", ")", "e", "=", "etree", ".", "Element", "(", "nspath_eval", "(", "'csw:Record'", ",", "nsmap", ")", ",", "nsmap", "=", "nsmap", ")", "etree", ".", "SubElement", "(", "e", ",", "nspath_eval", "(", "'dc:identifier'", ",", "nsmap", ")", ")", ".", "text", "=", "kwargs", "[", "'identifier'", "]", "etree", ".", "SubElement", "(", "e", ",", "nspath_eval", "(", "'dc:title'", ",", "nsmap", ")", ")", ".", "text", "=", "kwargs", "[", "'title'", "]", "if", "'alternative'", "in", "kwargs", ":", "etree", ".", "SubElement", "(", "e", ",", "nspath_eval", "(", "'dct:alternative'", ",", "nsmap", ")", ")", ".", "text", "=", "kwargs", "[", "'alternative'", "]", "etree", ".", "SubElement", "(", "e", ",", "nspath_eval", "(", "'dct:modified'", ",", "nsmap", ")", ")", ".", "text", "=", "modified", "etree", ".", "SubElement", "(", "e", ",", "nspath_eval", "(", "'dct:abstract'", ",", "nsmap", ")", ")", ".", "text", "=", "kwargs", "[", "'abstract'", "]", "etree", ".", "SubElement", "(", "e", ",", "nspath_eval", "(", "'dc:type'", ",", "nsmap", ")", ")", ".", "text", "=", "kwargs", "[", "'type'", "]", "etree", ".", "SubElement", "(", "e", ",", "nspath_eval", "(", "'dc:format'", ",", "nsmap", ")", ")", ".", "text", "=", "kwargs", "[", "'format'", "]", "etree", ".", "SubElement", "(", "e", ",", "nspath_eval", "(", "'dc:source'", ",", "nsmap", ")", ")", ".", "text", "=", "kwargs", "[", "'source'", "]", "if", "'relation'", "in", "kwargs", ":", "etree", ".", "SubElement", "(", "e", ",", "nspath_eval", "(", "'dc:relation'", ",", "nsmap", ")", ")", ".", "text", "=", "kwargs", "[", "'relation'", "]", "if", "'keywords'", "in", "kwargs", ":", "if", "kwargs", "[", "'keywords'", "]", "is", "not", "None", ":", "for", "keyword", "in", "kwargs", "[", "'keywords'", "]", ":", "etree", ".", "SubElement", "(", "e", ",", "nspath_eval", "(", "'dc:subject'", ",", "nsmap", ")", ")", ".", "text", "=", "keyword", "for", "link", "in", "kwargs", "[", "'links'", "]", ":", "etree", ".", "SubElement", "(", "e", ",", "nspath_eval", "(", "'dct:references'", ",", "nsmap", ")", ",", "scheme", "=", "link", "[", "0", "]", ")", ".", "text", "=", "link", "[", "1", "]", "bbox2", "=", "loads", "(", "kwargs", "[", "'wkt_geometry'", "]", ")", ".", "bounds", "bbox", "=", "etree", ".", "SubElement", "(", "e", ",", "nspath_eval", "(", "'ows:BoundingBox'", ",", "nsmap", ")", ",", "crs", "=", "'http://www.opengis.net/def/crs/EPSG/0/%s'", "%", "srs", ",", "dimensions", "=", "'2'", ")", "etree", ".", "SubElement", "(", "bbox", ",", "nspath_eval", "(", "'ows:LowerCorner'", ",", "nsmap", ")", ")", ".", "text", "=", "'%s %s'", "%", "(", "bbox2", "[", "1", "]", ",", "bbox2", "[", "0", "]", ")", "etree", ".", "SubElement", "(", "bbox", ",", "nspath_eval", "(", "'ows:UpperCorner'", ",", "nsmap", ")", ")", ".", "text", "=", "'%s %s'", "%", "(", "bbox2", "[", "3", "]", ",", "bbox2", "[", "2", "]", ")", "return", "etree", ".", "tostring", "(", "e", ",", "pretty_print", "=", "True", ")" ]
Create a csw:Record XML document from harvested metadata
[ "Create", "a", "csw", ":", "Record", "XML", "document", "from", "harvested", "metadata" ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L997-L1042
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
gen_anytext
def gen_anytext(*args): """ Convenience function to create bag of words for anytext property """ bag = [] for term in args: if term is not None: if isinstance(term, list): for term2 in term: if term2 is not None: bag.append(term2) else: bag.append(term) return ' '.join(bag)
python
def gen_anytext(*args): """ Convenience function to create bag of words for anytext property """ bag = [] for term in args: if term is not None: if isinstance(term, list): for term2 in term: if term2 is not None: bag.append(term2) else: bag.append(term) return ' '.join(bag)
[ "def", "gen_anytext", "(", "*", "args", ")", ":", "bag", "=", "[", "]", "for", "term", "in", "args", ":", "if", "term", "is", "not", "None", ":", "if", "isinstance", "(", "term", ",", "list", ")", ":", "for", "term2", "in", "term", ":", "if", "term2", "is", "not", "None", ":", "bag", ".", "append", "(", "term2", ")", "else", ":", "bag", ".", "append", "(", "term", ")", "return", "' '", ".", "join", "(", "bag", ")" ]
Convenience function to create bag of words for anytext property
[ "Convenience", "function", "to", "create", "bag", "of", "words", "for", "anytext", "property" ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L1045-L1060
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
update_layers_wmts
def update_layers_wmts(service): """ Update layers for an OGC:WMTS service. Sample endpoint: http://map1.vis.earthdata.nasa.gov/wmts-geo/1.0.0/WMTSCapabilities.xml """ try: wmts = WebMapTileService(service.url) # set srs # WMTS is always in 4326 srs, created = SpatialReferenceSystem.objects.get_or_create(code='EPSG:4326') service.srs.add(srs) service.update_validity() layer_names = list(wmts.contents) layer_n = 0 total = len(layer_names) for layer_name in layer_names: ows_layer = wmts.contents[layer_name] LOGGER.debug('Updating layer %s' % ows_layer.name) layer, created = Layer.objects.get_or_create(name=ows_layer.name, service=service, catalog=service.catalog) if layer.active: links = [['OGC:WMTS', service.url], ['OGC:WMTS', settings.SITE_URL.rstrip('/') + '/' + layer.get_url_endpoint()]] layer.type = 'OGC:WMTS' layer.title = ows_layer.title layer.abstract = ows_layer.abstract # keywords # @tomkralidis wmts does not seem to support this attribute keywords = None if hasattr(ows_layer, 'keywords'): keywords = ows_layer.keywords for keyword in keywords: layer.keywords.add(keyword) layer.url = service.url layer.page_url = layer.get_absolute_url links.append([ 'WWW:LINK', settings.SITE_URL.rstrip('/') + layer.page_url ]) bbox = list(ows_layer.boundingBoxWGS84 or (-179.0, -89.0, 179.0, 89.0)) layer.bbox_x0 = bbox[0] layer.bbox_y0 = bbox[1] layer.bbox_x1 = bbox[2] layer.bbox_y1 = bbox[3] layer.wkt_geometry = bbox2wktpolygon(bbox) layer.xml = create_metadata_record( identifier=str(layer.uuid), source=service.url, links=links, format='OGC:WMS', type=layer.csw_type, relation=service.id_string, title=ows_layer.title, alternative=ows_layer.name, abstract=layer.abstract, keywords=keywords, wkt_geometry=layer.wkt_geometry ) layer.anytext = gen_anytext(layer.title, layer.abstract, keywords) layer.save() # dates add_mined_dates(layer) layer_n = layer_n + 1 # exits if DEBUG_SERVICES LOGGER.debug("Updating layer n. %s/%s" % (layer_n, total)) if DEBUG_SERVICES and layer_n == DEBUG_LAYER_NUMBER: return except Exception as err: message = "update_layers_wmts: {0}".format( err ) check = Check( content_object=service, success=False, response_time=0, message=message ) check.save()
python
def update_layers_wmts(service): """ Update layers for an OGC:WMTS service. Sample endpoint: http://map1.vis.earthdata.nasa.gov/wmts-geo/1.0.0/WMTSCapabilities.xml """ try: wmts = WebMapTileService(service.url) # set srs # WMTS is always in 4326 srs, created = SpatialReferenceSystem.objects.get_or_create(code='EPSG:4326') service.srs.add(srs) service.update_validity() layer_names = list(wmts.contents) layer_n = 0 total = len(layer_names) for layer_name in layer_names: ows_layer = wmts.contents[layer_name] LOGGER.debug('Updating layer %s' % ows_layer.name) layer, created = Layer.objects.get_or_create(name=ows_layer.name, service=service, catalog=service.catalog) if layer.active: links = [['OGC:WMTS', service.url], ['OGC:WMTS', settings.SITE_URL.rstrip('/') + '/' + layer.get_url_endpoint()]] layer.type = 'OGC:WMTS' layer.title = ows_layer.title layer.abstract = ows_layer.abstract # keywords # @tomkralidis wmts does not seem to support this attribute keywords = None if hasattr(ows_layer, 'keywords'): keywords = ows_layer.keywords for keyword in keywords: layer.keywords.add(keyword) layer.url = service.url layer.page_url = layer.get_absolute_url links.append([ 'WWW:LINK', settings.SITE_URL.rstrip('/') + layer.page_url ]) bbox = list(ows_layer.boundingBoxWGS84 or (-179.0, -89.0, 179.0, 89.0)) layer.bbox_x0 = bbox[0] layer.bbox_y0 = bbox[1] layer.bbox_x1 = bbox[2] layer.bbox_y1 = bbox[3] layer.wkt_geometry = bbox2wktpolygon(bbox) layer.xml = create_metadata_record( identifier=str(layer.uuid), source=service.url, links=links, format='OGC:WMS', type=layer.csw_type, relation=service.id_string, title=ows_layer.title, alternative=ows_layer.name, abstract=layer.abstract, keywords=keywords, wkt_geometry=layer.wkt_geometry ) layer.anytext = gen_anytext(layer.title, layer.abstract, keywords) layer.save() # dates add_mined_dates(layer) layer_n = layer_n + 1 # exits if DEBUG_SERVICES LOGGER.debug("Updating layer n. %s/%s" % (layer_n, total)) if DEBUG_SERVICES and layer_n == DEBUG_LAYER_NUMBER: return except Exception as err: message = "update_layers_wmts: {0}".format( err ) check = Check( content_object=service, success=False, response_time=0, message=message ) check.save()
[ "def", "update_layers_wmts", "(", "service", ")", ":", "try", ":", "wmts", "=", "WebMapTileService", "(", "service", ".", "url", ")", "# set srs", "# WMTS is always in 4326", "srs", ",", "created", "=", "SpatialReferenceSystem", ".", "objects", ".", "get_or_create", "(", "code", "=", "'EPSG:4326'", ")", "service", ".", "srs", ".", "add", "(", "srs", ")", "service", ".", "update_validity", "(", ")", "layer_names", "=", "list", "(", "wmts", ".", "contents", ")", "layer_n", "=", "0", "total", "=", "len", "(", "layer_names", ")", "for", "layer_name", "in", "layer_names", ":", "ows_layer", "=", "wmts", ".", "contents", "[", "layer_name", "]", "LOGGER", ".", "debug", "(", "'Updating layer %s'", "%", "ows_layer", ".", "name", ")", "layer", ",", "created", "=", "Layer", ".", "objects", ".", "get_or_create", "(", "name", "=", "ows_layer", ".", "name", ",", "service", "=", "service", ",", "catalog", "=", "service", ".", "catalog", ")", "if", "layer", ".", "active", ":", "links", "=", "[", "[", "'OGC:WMTS'", ",", "service", ".", "url", "]", ",", "[", "'OGC:WMTS'", ",", "settings", ".", "SITE_URL", ".", "rstrip", "(", "'/'", ")", "+", "'/'", "+", "layer", ".", "get_url_endpoint", "(", ")", "]", "]", "layer", ".", "type", "=", "'OGC:WMTS'", "layer", ".", "title", "=", "ows_layer", ".", "title", "layer", ".", "abstract", "=", "ows_layer", ".", "abstract", "# keywords", "# @tomkralidis wmts does not seem to support this attribute", "keywords", "=", "None", "if", "hasattr", "(", "ows_layer", ",", "'keywords'", ")", ":", "keywords", "=", "ows_layer", ".", "keywords", "for", "keyword", "in", "keywords", ":", "layer", ".", "keywords", ".", "add", "(", "keyword", ")", "layer", ".", "url", "=", "service", ".", "url", "layer", ".", "page_url", "=", "layer", ".", "get_absolute_url", "links", ".", "append", "(", "[", "'WWW:LINK'", ",", "settings", ".", "SITE_URL", ".", "rstrip", "(", "'/'", ")", "+", "layer", ".", "page_url", "]", ")", "bbox", "=", "list", "(", "ows_layer", ".", "boundingBoxWGS84", "or", "(", "-", "179.0", ",", "-", "89.0", ",", "179.0", ",", "89.0", ")", ")", "layer", ".", "bbox_x0", "=", "bbox", "[", "0", "]", "layer", ".", "bbox_y0", "=", "bbox", "[", "1", "]", "layer", ".", "bbox_x1", "=", "bbox", "[", "2", "]", "layer", ".", "bbox_y1", "=", "bbox", "[", "3", "]", "layer", ".", "wkt_geometry", "=", "bbox2wktpolygon", "(", "bbox", ")", "layer", ".", "xml", "=", "create_metadata_record", "(", "identifier", "=", "str", "(", "layer", ".", "uuid", ")", ",", "source", "=", "service", ".", "url", ",", "links", "=", "links", ",", "format", "=", "'OGC:WMS'", ",", "type", "=", "layer", ".", "csw_type", ",", "relation", "=", "service", ".", "id_string", ",", "title", "=", "ows_layer", ".", "title", ",", "alternative", "=", "ows_layer", ".", "name", ",", "abstract", "=", "layer", ".", "abstract", ",", "keywords", "=", "keywords", ",", "wkt_geometry", "=", "layer", ".", "wkt_geometry", ")", "layer", ".", "anytext", "=", "gen_anytext", "(", "layer", ".", "title", ",", "layer", ".", "abstract", ",", "keywords", ")", "layer", ".", "save", "(", ")", "# dates", "add_mined_dates", "(", "layer", ")", "layer_n", "=", "layer_n", "+", "1", "# exits if DEBUG_SERVICES", "LOGGER", ".", "debug", "(", "\"Updating layer n. %s/%s\"", "%", "(", "layer_n", ",", "total", ")", ")", "if", "DEBUG_SERVICES", "and", "layer_n", "==", "DEBUG_LAYER_NUMBER", ":", "return", "except", "Exception", "as", "err", ":", "message", "=", "\"update_layers_wmts: {0}\"", ".", "format", "(", "err", ")", "check", "=", "Check", "(", "content_object", "=", "service", ",", "success", "=", "False", ",", "response_time", "=", "0", ",", "message", "=", "message", ")", "check", ".", "save", "(", ")" ]
Update layers for an OGC:WMTS service. Sample endpoint: http://map1.vis.earthdata.nasa.gov/wmts-geo/1.0.0/WMTSCapabilities.xml
[ "Update", "layers", "for", "an", "OGC", ":", "WMTS", "service", ".", "Sample", "endpoint", ":", "http", ":", "//", "map1", ".", "vis", ".", "earthdata", ".", "nasa", ".", "gov", "/", "wmts", "-", "geo", "/", "1", ".", "0", ".", "0", "/", "WMTSCapabilities", ".", "xml" ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L1156-L1235
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
update_layers_geonode_wm
def update_layers_geonode_wm(service, num_layers=None): """ Update layers for a WorldMap instance. Sample endpoint: http://localhost:8000/ """ wm_api_url = urlparse.urljoin(service.url, 'worldmap/api/2.8/layer/?format=json') if num_layers: total = num_layers else: response = requests.get(wm_api_url) data = json.loads(response.content) total = data['meta']['total_count'] # set srs # WorldMap supports only 4326, 900913, 3857 for crs_code in ['EPSG:4326', 'EPSG:900913', 'EPSG:3857']: srs, created = SpatialReferenceSystem.objects.get_or_create(code=crs_code) service.srs.add(srs) service.update_validity() layer_n = 0 limit = 10 for i in range(0, total, limit): try: url = ( '%s&order_by=-date&offset=%s&limit=%s' % (wm_api_url, i, limit) ) LOGGER.debug('Fetching %s' % url) response = requests.get(url) data = json.loads(response.content) for row in data['objects']: typename = row['typename'] # name = typename.split(':')[1] name = typename uuid = row['uuid'] LOGGER.debug('Updating layer %s' % name) title = row['title'] abstract = row['abstract'] bbox = row['bbox'] page_url = urlparse.urljoin(service.url, 'data/%s' % name) category = '' if 'topic_category' in row: category = row['topic_category'] username = '' if 'owner_username' in row: username = row['owner_username'] temporal_extent_start = '' if 'temporal_extent_start' in row: temporal_extent_start = row['temporal_extent_start'] temporal_extent_end = '' if 'temporal_extent_end' in row: temporal_extent_end = row['temporal_extent_end'] # we use the geoserver virtual layer getcapabilities for wm endpoint # TODO we should port make geoserver port configurable some way... # endpoint = urlparse.urljoin(service.url, 'geoserver/geonode/%s/wms?' % name) endpoint = urlparse.urljoin(service.url, 'geoserver/wms?') endpoint = endpoint.replace('8000', '8080') print endpoint if 'is_public' in row: is_public = row['is_public'] layer, created = Layer.objects.get_or_create( service=service, catalog=service.catalog, name=name, uuid=uuid) if created: LOGGER.debug('Added a new layer in registry: %s, %s' % (name, uuid)) if layer.active: links = [['Hypermap:WorldMap', endpoint]] # update fields layer.type = 'Hypermap:WorldMap' layer.title = title layer.abstract = abstract layer.is_public = is_public layer.url = endpoint layer.page_url = page_url # category and owner username layer_wm, created = LayerWM.objects.get_or_create(layer=layer) layer_wm.category = category layer_wm.username = username layer_wm.temporal_extent_start = temporal_extent_start layer_wm.temporal_extent_end = temporal_extent_end layer_wm.save() # bbox [x0, y0, x1, y1] # check if it is a valid bbox (TODO improve this check) # bbox = bbox.replace('-inf', 'None') # bbox = bbox.replace('inf', 'None') # if bbox.count(',') == 3: # bbox_list = bbox[1:-1].split(',') # else: # bbox_list = [None, None, None, None] x0 = format_float(bbox[0]) x1 = format_float(bbox[1]) y0 = format_float(bbox[2]) y1 = format_float(bbox[3]) # In many cases for some reason to be fixed GeoServer has x coordinates flipped in WM. x0, x1 = flip_coordinates(x0, x1) y0, y1 = flip_coordinates(y0, y1) layer.bbox_x0 = x0 layer.bbox_y0 = y0 layer.bbox_x1 = x1 layer.bbox_y1 = y1 # keywords keywords = [] for keyword in row['keywords']: keywords.append(keyword['name']) layer.keywords.all().delete() for keyword in keywords: layer.keywords.add(keyword) layer.wkt_geometry = bbox2wktpolygon([x0, y0, x1, y1]) layer.xml = create_metadata_record( identifier=str(layer.uuid), source=endpoint, links=links, format='Hypermap:WorldMap', type=layer.csw_type, relation=service.id_string, title=layer.title, alternative=name, abstract=layer.abstract, keywords=keywords, wkt_geometry=layer.wkt_geometry ) layer.anytext = gen_anytext(layer.title, layer.abstract, keywords) layer.save() # dates add_mined_dates(layer) add_metadata_dates_to_layer([layer_wm.temporal_extent_start, layer_wm.temporal_extent_end], layer) layer_n = layer_n + 1 # exits if DEBUG_SERVICES LOGGER.debug("Updated layer n. %s/%s" % (layer_n, total)) if DEBUG_SERVICES and layer_n == DEBUG_LAYER_NUMBER: return except Exception as err: LOGGER.error('Error! %s' % err) # update deleted layers. For now we check the whole set of deleted layers # we should optimize it if the list will grow # TODO implement the actions application url = urlparse.urljoin(service.url, 'worldmap/api/2.8/actionlayerdelete/?format=json') LOGGER.debug('Fetching %s for detecting deleted layers' % url) try: response = requests.get(url) data = json.loads(response.content) for deleted_layer in data['objects']: if Layer.objects.filter(uuid=deleted_layer['args']).count() > 0: layer = Layer.objects.get(uuid=deleted_layer['args']) layer.was_deleted = True layer.save() LOGGER.debug('Layer %s marked as deleted' % layer.uuid) except Exception as err: LOGGER.error('Error! %s' % err)
python
def update_layers_geonode_wm(service, num_layers=None): """ Update layers for a WorldMap instance. Sample endpoint: http://localhost:8000/ """ wm_api_url = urlparse.urljoin(service.url, 'worldmap/api/2.8/layer/?format=json') if num_layers: total = num_layers else: response = requests.get(wm_api_url) data = json.loads(response.content) total = data['meta']['total_count'] # set srs # WorldMap supports only 4326, 900913, 3857 for crs_code in ['EPSG:4326', 'EPSG:900913', 'EPSG:3857']: srs, created = SpatialReferenceSystem.objects.get_or_create(code=crs_code) service.srs.add(srs) service.update_validity() layer_n = 0 limit = 10 for i in range(0, total, limit): try: url = ( '%s&order_by=-date&offset=%s&limit=%s' % (wm_api_url, i, limit) ) LOGGER.debug('Fetching %s' % url) response = requests.get(url) data = json.loads(response.content) for row in data['objects']: typename = row['typename'] # name = typename.split(':')[1] name = typename uuid = row['uuid'] LOGGER.debug('Updating layer %s' % name) title = row['title'] abstract = row['abstract'] bbox = row['bbox'] page_url = urlparse.urljoin(service.url, 'data/%s' % name) category = '' if 'topic_category' in row: category = row['topic_category'] username = '' if 'owner_username' in row: username = row['owner_username'] temporal_extent_start = '' if 'temporal_extent_start' in row: temporal_extent_start = row['temporal_extent_start'] temporal_extent_end = '' if 'temporal_extent_end' in row: temporal_extent_end = row['temporal_extent_end'] # we use the geoserver virtual layer getcapabilities for wm endpoint # TODO we should port make geoserver port configurable some way... # endpoint = urlparse.urljoin(service.url, 'geoserver/geonode/%s/wms?' % name) endpoint = urlparse.urljoin(service.url, 'geoserver/wms?') endpoint = endpoint.replace('8000', '8080') print endpoint if 'is_public' in row: is_public = row['is_public'] layer, created = Layer.objects.get_or_create( service=service, catalog=service.catalog, name=name, uuid=uuid) if created: LOGGER.debug('Added a new layer in registry: %s, %s' % (name, uuid)) if layer.active: links = [['Hypermap:WorldMap', endpoint]] # update fields layer.type = 'Hypermap:WorldMap' layer.title = title layer.abstract = abstract layer.is_public = is_public layer.url = endpoint layer.page_url = page_url # category and owner username layer_wm, created = LayerWM.objects.get_or_create(layer=layer) layer_wm.category = category layer_wm.username = username layer_wm.temporal_extent_start = temporal_extent_start layer_wm.temporal_extent_end = temporal_extent_end layer_wm.save() # bbox [x0, y0, x1, y1] # check if it is a valid bbox (TODO improve this check) # bbox = bbox.replace('-inf', 'None') # bbox = bbox.replace('inf', 'None') # if bbox.count(',') == 3: # bbox_list = bbox[1:-1].split(',') # else: # bbox_list = [None, None, None, None] x0 = format_float(bbox[0]) x1 = format_float(bbox[1]) y0 = format_float(bbox[2]) y1 = format_float(bbox[3]) # In many cases for some reason to be fixed GeoServer has x coordinates flipped in WM. x0, x1 = flip_coordinates(x0, x1) y0, y1 = flip_coordinates(y0, y1) layer.bbox_x0 = x0 layer.bbox_y0 = y0 layer.bbox_x1 = x1 layer.bbox_y1 = y1 # keywords keywords = [] for keyword in row['keywords']: keywords.append(keyword['name']) layer.keywords.all().delete() for keyword in keywords: layer.keywords.add(keyword) layer.wkt_geometry = bbox2wktpolygon([x0, y0, x1, y1]) layer.xml = create_metadata_record( identifier=str(layer.uuid), source=endpoint, links=links, format='Hypermap:WorldMap', type=layer.csw_type, relation=service.id_string, title=layer.title, alternative=name, abstract=layer.abstract, keywords=keywords, wkt_geometry=layer.wkt_geometry ) layer.anytext = gen_anytext(layer.title, layer.abstract, keywords) layer.save() # dates add_mined_dates(layer) add_metadata_dates_to_layer([layer_wm.temporal_extent_start, layer_wm.temporal_extent_end], layer) layer_n = layer_n + 1 # exits if DEBUG_SERVICES LOGGER.debug("Updated layer n. %s/%s" % (layer_n, total)) if DEBUG_SERVICES and layer_n == DEBUG_LAYER_NUMBER: return except Exception as err: LOGGER.error('Error! %s' % err) # update deleted layers. For now we check the whole set of deleted layers # we should optimize it if the list will grow # TODO implement the actions application url = urlparse.urljoin(service.url, 'worldmap/api/2.8/actionlayerdelete/?format=json') LOGGER.debug('Fetching %s for detecting deleted layers' % url) try: response = requests.get(url) data = json.loads(response.content) for deleted_layer in data['objects']: if Layer.objects.filter(uuid=deleted_layer['args']).count() > 0: layer = Layer.objects.get(uuid=deleted_layer['args']) layer.was_deleted = True layer.save() LOGGER.debug('Layer %s marked as deleted' % layer.uuid) except Exception as err: LOGGER.error('Error! %s' % err)
[ "def", "update_layers_geonode_wm", "(", "service", ",", "num_layers", "=", "None", ")", ":", "wm_api_url", "=", "urlparse", ".", "urljoin", "(", "service", ".", "url", ",", "'worldmap/api/2.8/layer/?format=json'", ")", "if", "num_layers", ":", "total", "=", "num_layers", "else", ":", "response", "=", "requests", ".", "get", "(", "wm_api_url", ")", "data", "=", "json", ".", "loads", "(", "response", ".", "content", ")", "total", "=", "data", "[", "'meta'", "]", "[", "'total_count'", "]", "# set srs", "# WorldMap supports only 4326, 900913, 3857", "for", "crs_code", "in", "[", "'EPSG:4326'", ",", "'EPSG:900913'", ",", "'EPSG:3857'", "]", ":", "srs", ",", "created", "=", "SpatialReferenceSystem", ".", "objects", ".", "get_or_create", "(", "code", "=", "crs_code", ")", "service", ".", "srs", ".", "add", "(", "srs", ")", "service", ".", "update_validity", "(", ")", "layer_n", "=", "0", "limit", "=", "10", "for", "i", "in", "range", "(", "0", ",", "total", ",", "limit", ")", ":", "try", ":", "url", "=", "(", "'%s&order_by=-date&offset=%s&limit=%s'", "%", "(", "wm_api_url", ",", "i", ",", "limit", ")", ")", "LOGGER", ".", "debug", "(", "'Fetching %s'", "%", "url", ")", "response", "=", "requests", ".", "get", "(", "url", ")", "data", "=", "json", ".", "loads", "(", "response", ".", "content", ")", "for", "row", "in", "data", "[", "'objects'", "]", ":", "typename", "=", "row", "[", "'typename'", "]", "# name = typename.split(':')[1]", "name", "=", "typename", "uuid", "=", "row", "[", "'uuid'", "]", "LOGGER", ".", "debug", "(", "'Updating layer %s'", "%", "name", ")", "title", "=", "row", "[", "'title'", "]", "abstract", "=", "row", "[", "'abstract'", "]", "bbox", "=", "row", "[", "'bbox'", "]", "page_url", "=", "urlparse", ".", "urljoin", "(", "service", ".", "url", ",", "'data/%s'", "%", "name", ")", "category", "=", "''", "if", "'topic_category'", "in", "row", ":", "category", "=", "row", "[", "'topic_category'", "]", "username", "=", "''", "if", "'owner_username'", "in", "row", ":", "username", "=", "row", "[", "'owner_username'", "]", "temporal_extent_start", "=", "''", "if", "'temporal_extent_start'", "in", "row", ":", "temporal_extent_start", "=", "row", "[", "'temporal_extent_start'", "]", "temporal_extent_end", "=", "''", "if", "'temporal_extent_end'", "in", "row", ":", "temporal_extent_end", "=", "row", "[", "'temporal_extent_end'", "]", "# we use the geoserver virtual layer getcapabilities for wm endpoint", "# TODO we should port make geoserver port configurable some way...", "# endpoint = urlparse.urljoin(service.url, 'geoserver/geonode/%s/wms?' % name)", "endpoint", "=", "urlparse", ".", "urljoin", "(", "service", ".", "url", ",", "'geoserver/wms?'", ")", "endpoint", "=", "endpoint", ".", "replace", "(", "'8000'", ",", "'8080'", ")", "print", "endpoint", "if", "'is_public'", "in", "row", ":", "is_public", "=", "row", "[", "'is_public'", "]", "layer", ",", "created", "=", "Layer", ".", "objects", ".", "get_or_create", "(", "service", "=", "service", ",", "catalog", "=", "service", ".", "catalog", ",", "name", "=", "name", ",", "uuid", "=", "uuid", ")", "if", "created", ":", "LOGGER", ".", "debug", "(", "'Added a new layer in registry: %s, %s'", "%", "(", "name", ",", "uuid", ")", ")", "if", "layer", ".", "active", ":", "links", "=", "[", "[", "'Hypermap:WorldMap'", ",", "endpoint", "]", "]", "# update fields", "layer", ".", "type", "=", "'Hypermap:WorldMap'", "layer", ".", "title", "=", "title", "layer", ".", "abstract", "=", "abstract", "layer", ".", "is_public", "=", "is_public", "layer", ".", "url", "=", "endpoint", "layer", ".", "page_url", "=", "page_url", "# category and owner username", "layer_wm", ",", "created", "=", "LayerWM", ".", "objects", ".", "get_or_create", "(", "layer", "=", "layer", ")", "layer_wm", ".", "category", "=", "category", "layer_wm", ".", "username", "=", "username", "layer_wm", ".", "temporal_extent_start", "=", "temporal_extent_start", "layer_wm", ".", "temporal_extent_end", "=", "temporal_extent_end", "layer_wm", ".", "save", "(", ")", "# bbox [x0, y0, x1, y1]", "# check if it is a valid bbox (TODO improve this check)", "# bbox = bbox.replace('-inf', 'None')", "# bbox = bbox.replace('inf', 'None')", "# if bbox.count(',') == 3:", "# bbox_list = bbox[1:-1].split(',')", "# else:", "# bbox_list = [None, None, None, None]", "x0", "=", "format_float", "(", "bbox", "[", "0", "]", ")", "x1", "=", "format_float", "(", "bbox", "[", "1", "]", ")", "y0", "=", "format_float", "(", "bbox", "[", "2", "]", ")", "y1", "=", "format_float", "(", "bbox", "[", "3", "]", ")", "# In many cases for some reason to be fixed GeoServer has x coordinates flipped in WM.", "x0", ",", "x1", "=", "flip_coordinates", "(", "x0", ",", "x1", ")", "y0", ",", "y1", "=", "flip_coordinates", "(", "y0", ",", "y1", ")", "layer", ".", "bbox_x0", "=", "x0", "layer", ".", "bbox_y0", "=", "y0", "layer", ".", "bbox_x1", "=", "x1", "layer", ".", "bbox_y1", "=", "y1", "# keywords", "keywords", "=", "[", "]", "for", "keyword", "in", "row", "[", "'keywords'", "]", ":", "keywords", ".", "append", "(", "keyword", "[", "'name'", "]", ")", "layer", ".", "keywords", ".", "all", "(", ")", ".", "delete", "(", ")", "for", "keyword", "in", "keywords", ":", "layer", ".", "keywords", ".", "add", "(", "keyword", ")", "layer", ".", "wkt_geometry", "=", "bbox2wktpolygon", "(", "[", "x0", ",", "y0", ",", "x1", ",", "y1", "]", ")", "layer", ".", "xml", "=", "create_metadata_record", "(", "identifier", "=", "str", "(", "layer", ".", "uuid", ")", ",", "source", "=", "endpoint", ",", "links", "=", "links", ",", "format", "=", "'Hypermap:WorldMap'", ",", "type", "=", "layer", ".", "csw_type", ",", "relation", "=", "service", ".", "id_string", ",", "title", "=", "layer", ".", "title", ",", "alternative", "=", "name", ",", "abstract", "=", "layer", ".", "abstract", ",", "keywords", "=", "keywords", ",", "wkt_geometry", "=", "layer", ".", "wkt_geometry", ")", "layer", ".", "anytext", "=", "gen_anytext", "(", "layer", ".", "title", ",", "layer", ".", "abstract", ",", "keywords", ")", "layer", ".", "save", "(", ")", "# dates", "add_mined_dates", "(", "layer", ")", "add_metadata_dates_to_layer", "(", "[", "layer_wm", ".", "temporal_extent_start", ",", "layer_wm", ".", "temporal_extent_end", "]", ",", "layer", ")", "layer_n", "=", "layer_n", "+", "1", "# exits if DEBUG_SERVICES", "LOGGER", ".", "debug", "(", "\"Updated layer n. %s/%s\"", "%", "(", "layer_n", ",", "total", ")", ")", "if", "DEBUG_SERVICES", "and", "layer_n", "==", "DEBUG_LAYER_NUMBER", ":", "return", "except", "Exception", "as", "err", ":", "LOGGER", ".", "error", "(", "'Error! %s'", "%", "err", ")", "# update deleted layers. For now we check the whole set of deleted layers", "# we should optimize it if the list will grow", "# TODO implement the actions application", "url", "=", "urlparse", ".", "urljoin", "(", "service", ".", "url", ",", "'worldmap/api/2.8/actionlayerdelete/?format=json'", ")", "LOGGER", ".", "debug", "(", "'Fetching %s for detecting deleted layers'", "%", "url", ")", "try", ":", "response", "=", "requests", ".", "get", "(", "url", ")", "data", "=", "json", ".", "loads", "(", "response", ".", "content", ")", "for", "deleted_layer", "in", "data", "[", "'objects'", "]", ":", "if", "Layer", ".", "objects", ".", "filter", "(", "uuid", "=", "deleted_layer", "[", "'args'", "]", ")", ".", "count", "(", ")", ">", "0", ":", "layer", "=", "Layer", ".", "objects", ".", "get", "(", "uuid", "=", "deleted_layer", "[", "'args'", "]", ")", "layer", ".", "was_deleted", "=", "True", "layer", ".", "save", "(", ")", "LOGGER", ".", "debug", "(", "'Layer %s marked as deleted'", "%", "layer", ".", "uuid", ")", "except", "Exception", "as", "err", ":", "LOGGER", ".", "error", "(", "'Error! %s'", "%", "err", ")" ]
Update layers for a WorldMap instance. Sample endpoint: http://localhost:8000/
[ "Update", "layers", "for", "a", "WorldMap", "instance", ".", "Sample", "endpoint", ":", "http", ":", "//", "localhost", ":", "8000", "/" ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L1238-L1390
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
update_layers_warper
def update_layers_warper(service): """ Update layers for a Warper service. Sample endpoint: http://warp.worldmap.harvard.edu/maps """ params = {'field': 'title', 'query': '', 'show_warped': '1', 'format': 'json'} headers = {'Content-Type': 'application/json', 'Accept': 'application/json'} request = requests.get(service.url, headers=headers, params=params) try: records = json.loads(request.content) total_pages = int(records['total_pages']) # set srs # Warper supports only 4326, 900913, 3857 for crs_code in ['EPSG:4326', 'EPSG:900913', 'EPSG:3857']: srs, created = SpatialReferenceSystem.objects.get_or_create(code=crs_code) service.srs.add(srs) service.update_validity() for i in range(1, total_pages + 1): params = {'field': 'title', 'query': '', 'show_warped': '1', 'format': 'json', 'page': i} request = requests.get(service.url, headers=headers, params=params) records = json.loads(request.content) LOGGER.debug('Fetched %s' % request.url) layers = records['items'] layer_n = 0 total = len(layers) for layer in layers: name = layer['id'] title = layer['title'] abstract = layer['description'] bbox = layer['bbox'] # dates dates = [] if 'published_date' in layer: dates.append(layer['published_date']) if 'date_depicted' in layer: dates.append(layer['date_depicted']) if 'depicts_year' in layer: dates.append(layer['depicts_year']) if 'issue_year' in layer: dates.append(layer['issue_year']) layer, created = Layer.objects.get_or_create(name=name, service=service, catalog=service.catalog) if layer.active: # update fields # links = [['OGC:WMTS', settings.SITE_URL.rstrip('/') + '/' + layer.get_url_endpoint()]] layer.type = 'Hypermap:WARPER' layer.title = title layer.abstract = abstract layer.is_public = True layer.url = '%s/wms/%s?' % (service.url, name) layer.page_url = '%s/%s' % (service.url, name) # bbox x0 = None y0 = None x1 = None y1 = None if bbox: bbox_list = bbox.split(',') x0 = format_float(bbox_list[0]) y0 = format_float(bbox_list[1]) x1 = format_float(bbox_list[2]) y1 = format_float(bbox_list[3]) layer.bbox_x0 = x0 layer.bbox_y0 = y0 layer.bbox_x1 = x1 layer.bbox_y1 = y1 layer.save() # dates add_mined_dates(layer) add_metadata_dates_to_layer(dates, layer) layer_n = layer_n + 1 # exits if DEBUG_SERVICES LOGGER.debug("Updating layer n. %s/%s" % (layer_n, total)) if DEBUG_SERVICES and layer_n == DEBUG_LAYER_NUMBER: return except Exception as err: message = "update_layers_warper: {0}. request={1} response={2}".format( err, service.url, request.text ) check = Check( content_object=service, success=False, response_time=0, message=message ) check.save()
python
def update_layers_warper(service): """ Update layers for a Warper service. Sample endpoint: http://warp.worldmap.harvard.edu/maps """ params = {'field': 'title', 'query': '', 'show_warped': '1', 'format': 'json'} headers = {'Content-Type': 'application/json', 'Accept': 'application/json'} request = requests.get(service.url, headers=headers, params=params) try: records = json.loads(request.content) total_pages = int(records['total_pages']) # set srs # Warper supports only 4326, 900913, 3857 for crs_code in ['EPSG:4326', 'EPSG:900913', 'EPSG:3857']: srs, created = SpatialReferenceSystem.objects.get_or_create(code=crs_code) service.srs.add(srs) service.update_validity() for i in range(1, total_pages + 1): params = {'field': 'title', 'query': '', 'show_warped': '1', 'format': 'json', 'page': i} request = requests.get(service.url, headers=headers, params=params) records = json.loads(request.content) LOGGER.debug('Fetched %s' % request.url) layers = records['items'] layer_n = 0 total = len(layers) for layer in layers: name = layer['id'] title = layer['title'] abstract = layer['description'] bbox = layer['bbox'] # dates dates = [] if 'published_date' in layer: dates.append(layer['published_date']) if 'date_depicted' in layer: dates.append(layer['date_depicted']) if 'depicts_year' in layer: dates.append(layer['depicts_year']) if 'issue_year' in layer: dates.append(layer['issue_year']) layer, created = Layer.objects.get_or_create(name=name, service=service, catalog=service.catalog) if layer.active: # update fields # links = [['OGC:WMTS', settings.SITE_URL.rstrip('/') + '/' + layer.get_url_endpoint()]] layer.type = 'Hypermap:WARPER' layer.title = title layer.abstract = abstract layer.is_public = True layer.url = '%s/wms/%s?' % (service.url, name) layer.page_url = '%s/%s' % (service.url, name) # bbox x0 = None y0 = None x1 = None y1 = None if bbox: bbox_list = bbox.split(',') x0 = format_float(bbox_list[0]) y0 = format_float(bbox_list[1]) x1 = format_float(bbox_list[2]) y1 = format_float(bbox_list[3]) layer.bbox_x0 = x0 layer.bbox_y0 = y0 layer.bbox_x1 = x1 layer.bbox_y1 = y1 layer.save() # dates add_mined_dates(layer) add_metadata_dates_to_layer(dates, layer) layer_n = layer_n + 1 # exits if DEBUG_SERVICES LOGGER.debug("Updating layer n. %s/%s" % (layer_n, total)) if DEBUG_SERVICES and layer_n == DEBUG_LAYER_NUMBER: return except Exception as err: message = "update_layers_warper: {0}. request={1} response={2}".format( err, service.url, request.text ) check = Check( content_object=service, success=False, response_time=0, message=message ) check.save()
[ "def", "update_layers_warper", "(", "service", ")", ":", "params", "=", "{", "'field'", ":", "'title'", ",", "'query'", ":", "''", ",", "'show_warped'", ":", "'1'", ",", "'format'", ":", "'json'", "}", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", ",", "'Accept'", ":", "'application/json'", "}", "request", "=", "requests", ".", "get", "(", "service", ".", "url", ",", "headers", "=", "headers", ",", "params", "=", "params", ")", "try", ":", "records", "=", "json", ".", "loads", "(", "request", ".", "content", ")", "total_pages", "=", "int", "(", "records", "[", "'total_pages'", "]", ")", "# set srs", "# Warper supports only 4326, 900913, 3857", "for", "crs_code", "in", "[", "'EPSG:4326'", ",", "'EPSG:900913'", ",", "'EPSG:3857'", "]", ":", "srs", ",", "created", "=", "SpatialReferenceSystem", ".", "objects", ".", "get_or_create", "(", "code", "=", "crs_code", ")", "service", ".", "srs", ".", "add", "(", "srs", ")", "service", ".", "update_validity", "(", ")", "for", "i", "in", "range", "(", "1", ",", "total_pages", "+", "1", ")", ":", "params", "=", "{", "'field'", ":", "'title'", ",", "'query'", ":", "''", ",", "'show_warped'", ":", "'1'", ",", "'format'", ":", "'json'", ",", "'page'", ":", "i", "}", "request", "=", "requests", ".", "get", "(", "service", ".", "url", ",", "headers", "=", "headers", ",", "params", "=", "params", ")", "records", "=", "json", ".", "loads", "(", "request", ".", "content", ")", "LOGGER", ".", "debug", "(", "'Fetched %s'", "%", "request", ".", "url", ")", "layers", "=", "records", "[", "'items'", "]", "layer_n", "=", "0", "total", "=", "len", "(", "layers", ")", "for", "layer", "in", "layers", ":", "name", "=", "layer", "[", "'id'", "]", "title", "=", "layer", "[", "'title'", "]", "abstract", "=", "layer", "[", "'description'", "]", "bbox", "=", "layer", "[", "'bbox'", "]", "# dates", "dates", "=", "[", "]", "if", "'published_date'", "in", "layer", ":", "dates", ".", "append", "(", "layer", "[", "'published_date'", "]", ")", "if", "'date_depicted'", "in", "layer", ":", "dates", ".", "append", "(", "layer", "[", "'date_depicted'", "]", ")", "if", "'depicts_year'", "in", "layer", ":", "dates", ".", "append", "(", "layer", "[", "'depicts_year'", "]", ")", "if", "'issue_year'", "in", "layer", ":", "dates", ".", "append", "(", "layer", "[", "'issue_year'", "]", ")", "layer", ",", "created", "=", "Layer", ".", "objects", ".", "get_or_create", "(", "name", "=", "name", ",", "service", "=", "service", ",", "catalog", "=", "service", ".", "catalog", ")", "if", "layer", ".", "active", ":", "# update fields", "# links = [['OGC:WMTS', settings.SITE_URL.rstrip('/') + '/' + layer.get_url_endpoint()]]", "layer", ".", "type", "=", "'Hypermap:WARPER'", "layer", ".", "title", "=", "title", "layer", ".", "abstract", "=", "abstract", "layer", ".", "is_public", "=", "True", "layer", ".", "url", "=", "'%s/wms/%s?'", "%", "(", "service", ".", "url", ",", "name", ")", "layer", ".", "page_url", "=", "'%s/%s'", "%", "(", "service", ".", "url", ",", "name", ")", "# bbox", "x0", "=", "None", "y0", "=", "None", "x1", "=", "None", "y1", "=", "None", "if", "bbox", ":", "bbox_list", "=", "bbox", ".", "split", "(", "','", ")", "x0", "=", "format_float", "(", "bbox_list", "[", "0", "]", ")", "y0", "=", "format_float", "(", "bbox_list", "[", "1", "]", ")", "x1", "=", "format_float", "(", "bbox_list", "[", "2", "]", ")", "y1", "=", "format_float", "(", "bbox_list", "[", "3", "]", ")", "layer", ".", "bbox_x0", "=", "x0", "layer", ".", "bbox_y0", "=", "y0", "layer", ".", "bbox_x1", "=", "x1", "layer", ".", "bbox_y1", "=", "y1", "layer", ".", "save", "(", ")", "# dates", "add_mined_dates", "(", "layer", ")", "add_metadata_dates_to_layer", "(", "dates", ",", "layer", ")", "layer_n", "=", "layer_n", "+", "1", "# exits if DEBUG_SERVICES", "LOGGER", ".", "debug", "(", "\"Updating layer n. %s/%s\"", "%", "(", "layer_n", ",", "total", ")", ")", "if", "DEBUG_SERVICES", "and", "layer_n", "==", "DEBUG_LAYER_NUMBER", ":", "return", "except", "Exception", "as", "err", ":", "message", "=", "\"update_layers_warper: {0}. request={1} response={2}\"", ".", "format", "(", "err", ",", "service", ".", "url", ",", "request", ".", "text", ")", "check", "=", "Check", "(", "content_object", "=", "service", ",", "success", "=", "False", ",", "response_time", "=", "0", ",", "message", "=", "message", ")", "check", ".", "save", "(", ")" ]
Update layers for a Warper service. Sample endpoint: http://warp.worldmap.harvard.edu/maps
[ "Update", "layers", "for", "a", "Warper", "service", ".", "Sample", "endpoint", ":", "http", ":", "//", "warp", ".", "worldmap", ".", "harvard", ".", "edu", "/", "maps" ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L1541-L1632
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
update_layers_esri_mapserver
def update_layers_esri_mapserver(service, greedy_opt=False): """ Update layers for an ESRI REST MapServer. Sample endpoint: https://gis.ngdc.noaa.gov/arcgis/rest/services/SampleWorldCities/MapServer/?f=json """ try: esri_service = ArcMapService(service.url) # set srs # both mapserver and imageserver exposes just one srs at the service level # not sure if other ones are supported, for now we just store this one # not sure why this is needed, for now commenting out # if wkt_text: # params = {'exact': 'True', 'error': 'True', 'mode': 'wkt', 'terms': wkt_text} # req = requests.get('http://prj2epsg.org/search.json', params=params) # object = json.loads(req.content) # srs = int(object['codes'][0]['code']) srs_code = esri_service.spatialReference.wkid srs, created = SpatialReferenceSystem.objects.get_or_create(code=srs_code) service.srs.add(srs) service.update_validity() # check if it has a WMS interface if 'supportedExtensions' in esri_service._json_struct and greedy_opt: if 'WMSServer' in esri_service._json_struct['supportedExtensions']: # we need to change the url # http://cga1.cga.harvard.edu/arcgis/rest/services/ecuador/ecuadordata/MapServer?f=pjson # http://cga1.cga.harvard.edu/arcgis/services/ecuador/ # ecuadordata/MapServer/WMSServer?request=GetCapabilities&service=WMS wms_url = service.url.replace('/rest/services/', '/services/') if '?f=pjson' in wms_url: wms_url = wms_url.replace('?f=pjson', 'WMSServer?') if '?f=json' in wms_url: wms_url = wms_url.replace('?f=json', 'WMSServer?') LOGGER.debug('This ESRI REST endpoint has an WMS interface to process: %s' % wms_url) # import here as otherwise is circular (TODO refactor) from utils import create_service_from_endpoint create_service_from_endpoint(wms_url, 'OGC:WMS', catalog=service.catalog) # now process the REST interface layer_n = 0 total = len(esri_service.layers) for esri_layer in esri_service.layers: # in some case the json is invalid # esri_layer._json_struct # {u'currentVersion': 10.01, # u'error': # {u'message': u'An unexpected error occurred processing the request.', u'code': 500, u'details': []}} if 'error' not in esri_layer._json_struct: LOGGER.debug('Updating layer %s' % esri_layer.name) layer, created = Layer.objects.get_or_create( name=esri_layer.id, service=service, catalog=service.catalog ) if layer.active: layer.type = 'ESRI:ArcGIS:MapServer' links = [[layer.type, service.url], ['OGC:WMTS', settings.SITE_URL.rstrip('/') + '/' + layer.get_url_endpoint()]] layer.title = esri_layer.name layer.abstract = esri_service.serviceDescription layer.url = service.url layer.page_url = layer.get_absolute_url links.append([ 'WWW:LINK', settings.SITE_URL.rstrip('/') + layer.page_url ]) try: layer.bbox_x0 = esri_layer.extent.xmin layer.bbox_y0 = esri_layer.extent.ymin layer.bbox_x1 = esri_layer.extent.xmax layer.bbox_y1 = esri_layer.extent.ymax except KeyError: pass try: layer.bbox_x0 = esri_layer._json_struct['extent']['xmin'] layer.bbox_y0 = esri_layer._json_struct['extent']['ymin'] layer.bbox_x1 = esri_layer._json_struct['extent']['xmax'] layer.bbox_y1 = esri_layer._json_struct['extent']['ymax'] except Exception: pass layer.wkt_geometry = bbox2wktpolygon([layer.bbox_x0, layer.bbox_y0, layer.bbox_x1, layer.bbox_y1]) layer.xml = create_metadata_record( identifier=str(layer.uuid), source=service.url, links=links, format='ESRI:ArcGIS:MapServer', type=layer.csw_type, relation=service.id_string, title=layer.title, alternative=layer.title, abstract=layer.abstract, wkt_geometry=layer.wkt_geometry ) layer.anytext = gen_anytext(layer.title, layer.abstract) layer.save() # dates add_mined_dates(layer) layer_n = layer_n + 1 # exits if DEBUG_SERVICES LOGGER.debug("Updating layer n. %s/%s" % (layer_n, total)) if DEBUG_SERVICES and layer_n == DEBUG_LAYER_NUMBER: return except Exception as err: message = "update_layers_esri_mapserver: {0}".format( err ) check = Check( content_object=service, success=False, response_time=0, message=message ) check.save()
python
def update_layers_esri_mapserver(service, greedy_opt=False): """ Update layers for an ESRI REST MapServer. Sample endpoint: https://gis.ngdc.noaa.gov/arcgis/rest/services/SampleWorldCities/MapServer/?f=json """ try: esri_service = ArcMapService(service.url) # set srs # both mapserver and imageserver exposes just one srs at the service level # not sure if other ones are supported, for now we just store this one # not sure why this is needed, for now commenting out # if wkt_text: # params = {'exact': 'True', 'error': 'True', 'mode': 'wkt', 'terms': wkt_text} # req = requests.get('http://prj2epsg.org/search.json', params=params) # object = json.loads(req.content) # srs = int(object['codes'][0]['code']) srs_code = esri_service.spatialReference.wkid srs, created = SpatialReferenceSystem.objects.get_or_create(code=srs_code) service.srs.add(srs) service.update_validity() # check if it has a WMS interface if 'supportedExtensions' in esri_service._json_struct and greedy_opt: if 'WMSServer' in esri_service._json_struct['supportedExtensions']: # we need to change the url # http://cga1.cga.harvard.edu/arcgis/rest/services/ecuador/ecuadordata/MapServer?f=pjson # http://cga1.cga.harvard.edu/arcgis/services/ecuador/ # ecuadordata/MapServer/WMSServer?request=GetCapabilities&service=WMS wms_url = service.url.replace('/rest/services/', '/services/') if '?f=pjson' in wms_url: wms_url = wms_url.replace('?f=pjson', 'WMSServer?') if '?f=json' in wms_url: wms_url = wms_url.replace('?f=json', 'WMSServer?') LOGGER.debug('This ESRI REST endpoint has an WMS interface to process: %s' % wms_url) # import here as otherwise is circular (TODO refactor) from utils import create_service_from_endpoint create_service_from_endpoint(wms_url, 'OGC:WMS', catalog=service.catalog) # now process the REST interface layer_n = 0 total = len(esri_service.layers) for esri_layer in esri_service.layers: # in some case the json is invalid # esri_layer._json_struct # {u'currentVersion': 10.01, # u'error': # {u'message': u'An unexpected error occurred processing the request.', u'code': 500, u'details': []}} if 'error' not in esri_layer._json_struct: LOGGER.debug('Updating layer %s' % esri_layer.name) layer, created = Layer.objects.get_or_create( name=esri_layer.id, service=service, catalog=service.catalog ) if layer.active: layer.type = 'ESRI:ArcGIS:MapServer' links = [[layer.type, service.url], ['OGC:WMTS', settings.SITE_URL.rstrip('/') + '/' + layer.get_url_endpoint()]] layer.title = esri_layer.name layer.abstract = esri_service.serviceDescription layer.url = service.url layer.page_url = layer.get_absolute_url links.append([ 'WWW:LINK', settings.SITE_URL.rstrip('/') + layer.page_url ]) try: layer.bbox_x0 = esri_layer.extent.xmin layer.bbox_y0 = esri_layer.extent.ymin layer.bbox_x1 = esri_layer.extent.xmax layer.bbox_y1 = esri_layer.extent.ymax except KeyError: pass try: layer.bbox_x0 = esri_layer._json_struct['extent']['xmin'] layer.bbox_y0 = esri_layer._json_struct['extent']['ymin'] layer.bbox_x1 = esri_layer._json_struct['extent']['xmax'] layer.bbox_y1 = esri_layer._json_struct['extent']['ymax'] except Exception: pass layer.wkt_geometry = bbox2wktpolygon([layer.bbox_x0, layer.bbox_y0, layer.bbox_x1, layer.bbox_y1]) layer.xml = create_metadata_record( identifier=str(layer.uuid), source=service.url, links=links, format='ESRI:ArcGIS:MapServer', type=layer.csw_type, relation=service.id_string, title=layer.title, alternative=layer.title, abstract=layer.abstract, wkt_geometry=layer.wkt_geometry ) layer.anytext = gen_anytext(layer.title, layer.abstract) layer.save() # dates add_mined_dates(layer) layer_n = layer_n + 1 # exits if DEBUG_SERVICES LOGGER.debug("Updating layer n. %s/%s" % (layer_n, total)) if DEBUG_SERVICES and layer_n == DEBUG_LAYER_NUMBER: return except Exception as err: message = "update_layers_esri_mapserver: {0}".format( err ) check = Check( content_object=service, success=False, response_time=0, message=message ) check.save()
[ "def", "update_layers_esri_mapserver", "(", "service", ",", "greedy_opt", "=", "False", ")", ":", "try", ":", "esri_service", "=", "ArcMapService", "(", "service", ".", "url", ")", "# set srs", "# both mapserver and imageserver exposes just one srs at the service level", "# not sure if other ones are supported, for now we just store this one", "# not sure why this is needed, for now commenting out", "# if wkt_text:", "# params = {'exact': 'True', 'error': 'True', 'mode': 'wkt', 'terms': wkt_text}", "# req = requests.get('http://prj2epsg.org/search.json', params=params)", "# object = json.loads(req.content)", "# srs = int(object['codes'][0]['code'])", "srs_code", "=", "esri_service", ".", "spatialReference", ".", "wkid", "srs", ",", "created", "=", "SpatialReferenceSystem", ".", "objects", ".", "get_or_create", "(", "code", "=", "srs_code", ")", "service", ".", "srs", ".", "add", "(", "srs", ")", "service", ".", "update_validity", "(", ")", "# check if it has a WMS interface", "if", "'supportedExtensions'", "in", "esri_service", ".", "_json_struct", "and", "greedy_opt", ":", "if", "'WMSServer'", "in", "esri_service", ".", "_json_struct", "[", "'supportedExtensions'", "]", ":", "# we need to change the url", "# http://cga1.cga.harvard.edu/arcgis/rest/services/ecuador/ecuadordata/MapServer?f=pjson", "# http://cga1.cga.harvard.edu/arcgis/services/ecuador/", "# ecuadordata/MapServer/WMSServer?request=GetCapabilities&service=WMS", "wms_url", "=", "service", ".", "url", ".", "replace", "(", "'/rest/services/'", ",", "'/services/'", ")", "if", "'?f=pjson'", "in", "wms_url", ":", "wms_url", "=", "wms_url", ".", "replace", "(", "'?f=pjson'", ",", "'WMSServer?'", ")", "if", "'?f=json'", "in", "wms_url", ":", "wms_url", "=", "wms_url", ".", "replace", "(", "'?f=json'", ",", "'WMSServer?'", ")", "LOGGER", ".", "debug", "(", "'This ESRI REST endpoint has an WMS interface to process: %s'", "%", "wms_url", ")", "# import here as otherwise is circular (TODO refactor)", "from", "utils", "import", "create_service_from_endpoint", "create_service_from_endpoint", "(", "wms_url", ",", "'OGC:WMS'", ",", "catalog", "=", "service", ".", "catalog", ")", "# now process the REST interface", "layer_n", "=", "0", "total", "=", "len", "(", "esri_service", ".", "layers", ")", "for", "esri_layer", "in", "esri_service", ".", "layers", ":", "# in some case the json is invalid", "# esri_layer._json_struct", "# {u'currentVersion': 10.01,", "# u'error':", "# {u'message': u'An unexpected error occurred processing the request.', u'code': 500, u'details': []}}", "if", "'error'", "not", "in", "esri_layer", ".", "_json_struct", ":", "LOGGER", ".", "debug", "(", "'Updating layer %s'", "%", "esri_layer", ".", "name", ")", "layer", ",", "created", "=", "Layer", ".", "objects", ".", "get_or_create", "(", "name", "=", "esri_layer", ".", "id", ",", "service", "=", "service", ",", "catalog", "=", "service", ".", "catalog", ")", "if", "layer", ".", "active", ":", "layer", ".", "type", "=", "'ESRI:ArcGIS:MapServer'", "links", "=", "[", "[", "layer", ".", "type", ",", "service", ".", "url", "]", ",", "[", "'OGC:WMTS'", ",", "settings", ".", "SITE_URL", ".", "rstrip", "(", "'/'", ")", "+", "'/'", "+", "layer", ".", "get_url_endpoint", "(", ")", "]", "]", "layer", ".", "title", "=", "esri_layer", ".", "name", "layer", ".", "abstract", "=", "esri_service", ".", "serviceDescription", "layer", ".", "url", "=", "service", ".", "url", "layer", ".", "page_url", "=", "layer", ".", "get_absolute_url", "links", ".", "append", "(", "[", "'WWW:LINK'", ",", "settings", ".", "SITE_URL", ".", "rstrip", "(", "'/'", ")", "+", "layer", ".", "page_url", "]", ")", "try", ":", "layer", ".", "bbox_x0", "=", "esri_layer", ".", "extent", ".", "xmin", "layer", ".", "bbox_y0", "=", "esri_layer", ".", "extent", ".", "ymin", "layer", ".", "bbox_x1", "=", "esri_layer", ".", "extent", ".", "xmax", "layer", ".", "bbox_y1", "=", "esri_layer", ".", "extent", ".", "ymax", "except", "KeyError", ":", "pass", "try", ":", "layer", ".", "bbox_x0", "=", "esri_layer", ".", "_json_struct", "[", "'extent'", "]", "[", "'xmin'", "]", "layer", ".", "bbox_y0", "=", "esri_layer", ".", "_json_struct", "[", "'extent'", "]", "[", "'ymin'", "]", "layer", ".", "bbox_x1", "=", "esri_layer", ".", "_json_struct", "[", "'extent'", "]", "[", "'xmax'", "]", "layer", ".", "bbox_y1", "=", "esri_layer", ".", "_json_struct", "[", "'extent'", "]", "[", "'ymax'", "]", "except", "Exception", ":", "pass", "layer", ".", "wkt_geometry", "=", "bbox2wktpolygon", "(", "[", "layer", ".", "bbox_x0", ",", "layer", ".", "bbox_y0", ",", "layer", ".", "bbox_x1", ",", "layer", ".", "bbox_y1", "]", ")", "layer", ".", "xml", "=", "create_metadata_record", "(", "identifier", "=", "str", "(", "layer", ".", "uuid", ")", ",", "source", "=", "service", ".", "url", ",", "links", "=", "links", ",", "format", "=", "'ESRI:ArcGIS:MapServer'", ",", "type", "=", "layer", ".", "csw_type", ",", "relation", "=", "service", ".", "id_string", ",", "title", "=", "layer", ".", "title", ",", "alternative", "=", "layer", ".", "title", ",", "abstract", "=", "layer", ".", "abstract", ",", "wkt_geometry", "=", "layer", ".", "wkt_geometry", ")", "layer", ".", "anytext", "=", "gen_anytext", "(", "layer", ".", "title", ",", "layer", ".", "abstract", ")", "layer", ".", "save", "(", ")", "# dates", "add_mined_dates", "(", "layer", ")", "layer_n", "=", "layer_n", "+", "1", "# exits if DEBUG_SERVICES", "LOGGER", ".", "debug", "(", "\"Updating layer n. %s/%s\"", "%", "(", "layer_n", ",", "total", ")", ")", "if", "DEBUG_SERVICES", "and", "layer_n", "==", "DEBUG_LAYER_NUMBER", ":", "return", "except", "Exception", "as", "err", ":", "message", "=", "\"update_layers_esri_mapserver: {0}\"", ".", "format", "(", "err", ")", "check", "=", "Check", "(", "content_object", "=", "service", ",", "success", "=", "False", ",", "response_time", "=", "0", ",", "message", "=", "message", ")", "check", ".", "save", "(", ")" ]
Update layers for an ESRI REST MapServer. Sample endpoint: https://gis.ngdc.noaa.gov/arcgis/rest/services/SampleWorldCities/MapServer/?f=json
[ "Update", "layers", "for", "an", "ESRI", "REST", "MapServer", ".", "Sample", "endpoint", ":", "https", ":", "//", "gis", ".", "ngdc", ".", "noaa", ".", "gov", "/", "arcgis", "/", "rest", "/", "services", "/", "SampleWorldCities", "/", "MapServer", "/", "?f", "=", "json" ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L1635-L1749
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
update_layers_esri_imageserver
def update_layers_esri_imageserver(service): """ Update layers for an ESRI REST ImageServer. Sample endpoint: https://gis.ngdc.noaa.gov/arcgis/rest/services/bag_bathymetry/ImageServer/?f=json """ try: esri_service = ArcImageService(service.url) # set srs # both mapserver and imageserver exposes just one srs at the service level # not sure if other ones are supported, for now we just store this one obj = json.loads(esri_service._contents) srs_code = obj['spatialReference']['wkid'] srs, created = SpatialReferenceSystem.objects.get_or_create(code=srs_code) service.srs.add(srs) service.update_validity() layer, created = Layer.objects.get_or_create(name=obj['name'], service=service, catalog=service.catalog) if layer.active: layer.type = 'ESRI:ArcGIS:ImageServer' links = [[layer.type, service.url], ['OGC:WMTS', settings.SITE_URL.rstrip('/') + '/' + layer.get_url_endpoint()]] layer.title = obj['name'] layer.abstract = esri_service.serviceDescription layer.url = service.url layer.bbox_x0 = str(obj['extent']['xmin']) layer.bbox_y0 = str(obj['extent']['ymin']) layer.bbox_x1 = str(obj['extent']['xmax']) layer.bbox_y1 = str(obj['extent']['ymax']) layer.page_url = layer.get_absolute_url links.append([ 'WWW:LINK', settings.SITE_URL.rstrip('/') + layer.page_url ]) layer.wkt_geometry = bbox2wktpolygon([layer.bbox_x0, layer.bbox_y0, layer.bbox_x1, layer.bbox_y1]) layer.xml = create_metadata_record( identifier=str(layer.uuid), source=service.url, links=links, format='ESRI:ArcGIS:ImageServer', type=layer.csw_type, relation=service.id_string, title=layer.title, alternative=layer.title, abstract=layer.abstract, wkt_geometry=layer.wkt_geometry ) layer.anytext = gen_anytext(layer.title, layer.abstract) layer.save() # dates add_mined_dates(layer) except Exception as err: message = "update_layers_esri_imageserver: {0}".format( err ) check = Check( content_object=service, success=False, response_time=0, message=message ) check.save()
python
def update_layers_esri_imageserver(service): """ Update layers for an ESRI REST ImageServer. Sample endpoint: https://gis.ngdc.noaa.gov/arcgis/rest/services/bag_bathymetry/ImageServer/?f=json """ try: esri_service = ArcImageService(service.url) # set srs # both mapserver and imageserver exposes just one srs at the service level # not sure if other ones are supported, for now we just store this one obj = json.loads(esri_service._contents) srs_code = obj['spatialReference']['wkid'] srs, created = SpatialReferenceSystem.objects.get_or_create(code=srs_code) service.srs.add(srs) service.update_validity() layer, created = Layer.objects.get_or_create(name=obj['name'], service=service, catalog=service.catalog) if layer.active: layer.type = 'ESRI:ArcGIS:ImageServer' links = [[layer.type, service.url], ['OGC:WMTS', settings.SITE_URL.rstrip('/') + '/' + layer.get_url_endpoint()]] layer.title = obj['name'] layer.abstract = esri_service.serviceDescription layer.url = service.url layer.bbox_x0 = str(obj['extent']['xmin']) layer.bbox_y0 = str(obj['extent']['ymin']) layer.bbox_x1 = str(obj['extent']['xmax']) layer.bbox_y1 = str(obj['extent']['ymax']) layer.page_url = layer.get_absolute_url links.append([ 'WWW:LINK', settings.SITE_URL.rstrip('/') + layer.page_url ]) layer.wkt_geometry = bbox2wktpolygon([layer.bbox_x0, layer.bbox_y0, layer.bbox_x1, layer.bbox_y1]) layer.xml = create_metadata_record( identifier=str(layer.uuid), source=service.url, links=links, format='ESRI:ArcGIS:ImageServer', type=layer.csw_type, relation=service.id_string, title=layer.title, alternative=layer.title, abstract=layer.abstract, wkt_geometry=layer.wkt_geometry ) layer.anytext = gen_anytext(layer.title, layer.abstract) layer.save() # dates add_mined_dates(layer) except Exception as err: message = "update_layers_esri_imageserver: {0}".format( err ) check = Check( content_object=service, success=False, response_time=0, message=message ) check.save()
[ "def", "update_layers_esri_imageserver", "(", "service", ")", ":", "try", ":", "esri_service", "=", "ArcImageService", "(", "service", ".", "url", ")", "# set srs", "# both mapserver and imageserver exposes just one srs at the service level", "# not sure if other ones are supported, for now we just store this one", "obj", "=", "json", ".", "loads", "(", "esri_service", ".", "_contents", ")", "srs_code", "=", "obj", "[", "'spatialReference'", "]", "[", "'wkid'", "]", "srs", ",", "created", "=", "SpatialReferenceSystem", ".", "objects", ".", "get_or_create", "(", "code", "=", "srs_code", ")", "service", ".", "srs", ".", "add", "(", "srs", ")", "service", ".", "update_validity", "(", ")", "layer", ",", "created", "=", "Layer", ".", "objects", ".", "get_or_create", "(", "name", "=", "obj", "[", "'name'", "]", ",", "service", "=", "service", ",", "catalog", "=", "service", ".", "catalog", ")", "if", "layer", ".", "active", ":", "layer", ".", "type", "=", "'ESRI:ArcGIS:ImageServer'", "links", "=", "[", "[", "layer", ".", "type", ",", "service", ".", "url", "]", ",", "[", "'OGC:WMTS'", ",", "settings", ".", "SITE_URL", ".", "rstrip", "(", "'/'", ")", "+", "'/'", "+", "layer", ".", "get_url_endpoint", "(", ")", "]", "]", "layer", ".", "title", "=", "obj", "[", "'name'", "]", "layer", ".", "abstract", "=", "esri_service", ".", "serviceDescription", "layer", ".", "url", "=", "service", ".", "url", "layer", ".", "bbox_x0", "=", "str", "(", "obj", "[", "'extent'", "]", "[", "'xmin'", "]", ")", "layer", ".", "bbox_y0", "=", "str", "(", "obj", "[", "'extent'", "]", "[", "'ymin'", "]", ")", "layer", ".", "bbox_x1", "=", "str", "(", "obj", "[", "'extent'", "]", "[", "'xmax'", "]", ")", "layer", ".", "bbox_y1", "=", "str", "(", "obj", "[", "'extent'", "]", "[", "'ymax'", "]", ")", "layer", ".", "page_url", "=", "layer", ".", "get_absolute_url", "links", ".", "append", "(", "[", "'WWW:LINK'", ",", "settings", ".", "SITE_URL", ".", "rstrip", "(", "'/'", ")", "+", "layer", ".", "page_url", "]", ")", "layer", ".", "wkt_geometry", "=", "bbox2wktpolygon", "(", "[", "layer", ".", "bbox_x0", ",", "layer", ".", "bbox_y0", ",", "layer", ".", "bbox_x1", ",", "layer", ".", "bbox_y1", "]", ")", "layer", ".", "xml", "=", "create_metadata_record", "(", "identifier", "=", "str", "(", "layer", ".", "uuid", ")", ",", "source", "=", "service", ".", "url", ",", "links", "=", "links", ",", "format", "=", "'ESRI:ArcGIS:ImageServer'", ",", "type", "=", "layer", ".", "csw_type", ",", "relation", "=", "service", ".", "id_string", ",", "title", "=", "layer", ".", "title", ",", "alternative", "=", "layer", ".", "title", ",", "abstract", "=", "layer", ".", "abstract", ",", "wkt_geometry", "=", "layer", ".", "wkt_geometry", ")", "layer", ".", "anytext", "=", "gen_anytext", "(", "layer", ".", "title", ",", "layer", ".", "abstract", ")", "layer", ".", "save", "(", ")", "# dates", "add_mined_dates", "(", "layer", ")", "except", "Exception", "as", "err", ":", "message", "=", "\"update_layers_esri_imageserver: {0}\"", ".", "format", "(", "err", ")", "check", "=", "Check", "(", "content_object", "=", "service", ",", "success", "=", "False", ",", "response_time", "=", "0", ",", "message", "=", "message", ")", "check", ".", "save", "(", ")" ]
Update layers for an ESRI REST ImageServer. Sample endpoint: https://gis.ngdc.noaa.gov/arcgis/rest/services/bag_bathymetry/ImageServer/?f=json
[ "Update", "layers", "for", "an", "ESRI", "REST", "ImageServer", ".", "Sample", "endpoint", ":", "https", ":", "//", "gis", ".", "ngdc", ".", "noaa", ".", "gov", "/", "arcgis", "/", "rest", "/", "services", "/", "bag_bathymetry", "/", "ImageServer", "/", "?f", "=", "json" ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L1752-L1813
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
endpointlist_post_save
def endpointlist_post_save(instance, *args, **kwargs): """ Used to process the lines of the endpoint list. """ with open(instance.upload.file.name, mode='rb') as f: lines = f.readlines() for url in lines: if len(url) > 255: LOGGER.debug('Skipping this endpoint, as it is more than 255 characters: %s' % url) else: if Endpoint.objects.filter(url=url, catalog=instance.catalog).count() == 0: endpoint = Endpoint(url=url, endpoint_list=instance) endpoint.catalog = instance.catalog endpoint.save() if not settings.REGISTRY_SKIP_CELERY: update_endpoints.delay(instance.id) else: update_endpoints(instance.id)
python
def endpointlist_post_save(instance, *args, **kwargs): """ Used to process the lines of the endpoint list. """ with open(instance.upload.file.name, mode='rb') as f: lines = f.readlines() for url in lines: if len(url) > 255: LOGGER.debug('Skipping this endpoint, as it is more than 255 characters: %s' % url) else: if Endpoint.objects.filter(url=url, catalog=instance.catalog).count() == 0: endpoint = Endpoint(url=url, endpoint_list=instance) endpoint.catalog = instance.catalog endpoint.save() if not settings.REGISTRY_SKIP_CELERY: update_endpoints.delay(instance.id) else: update_endpoints(instance.id)
[ "def", "endpointlist_post_save", "(", "instance", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "with", "open", "(", "instance", ".", "upload", ".", "file", ".", "name", ",", "mode", "=", "'rb'", ")", "as", "f", ":", "lines", "=", "f", ".", "readlines", "(", ")", "for", "url", "in", "lines", ":", "if", "len", "(", "url", ")", ">", "255", ":", "LOGGER", ".", "debug", "(", "'Skipping this endpoint, as it is more than 255 characters: %s'", "%", "url", ")", "else", ":", "if", "Endpoint", ".", "objects", ".", "filter", "(", "url", "=", "url", ",", "catalog", "=", "instance", ".", "catalog", ")", ".", "count", "(", ")", "==", "0", ":", "endpoint", "=", "Endpoint", "(", "url", "=", "url", ",", "endpoint_list", "=", "instance", ")", "endpoint", ".", "catalog", "=", "instance", ".", "catalog", "endpoint", ".", "save", "(", ")", "if", "not", "settings", ".", "REGISTRY_SKIP_CELERY", ":", "update_endpoints", ".", "delay", "(", "instance", ".", "id", ")", "else", ":", "update_endpoints", "(", "instance", ".", "id", ")" ]
Used to process the lines of the endpoint list.
[ "Used", "to", "process", "the", "lines", "of", "the", "endpoint", "list", "." ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L1818-L1835
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
service_pre_save
def service_pre_save(instance, *args, **kwargs): """ Used to do a service full check when saving it. """ # check if service is unique # we cannot use unique_together as it relies on a combination of fields # from different models (service, resource) exists = Service.objects.filter(url=instance.url, type=instance.type, catalog=instance.catalog).count() > 0 # TODO: When saving from the django admin, this should not be triggered. # Reference: http://stackoverflow.com/questions/11561722/django-what-is-the-role-of-modelstate if instance._state.adding and exists: raise Exception("There is already such a service. url={0} catalog={1}".format( instance.url, instance.catalog ))
python
def service_pre_save(instance, *args, **kwargs): """ Used to do a service full check when saving it. """ # check if service is unique # we cannot use unique_together as it relies on a combination of fields # from different models (service, resource) exists = Service.objects.filter(url=instance.url, type=instance.type, catalog=instance.catalog).count() > 0 # TODO: When saving from the django admin, this should not be triggered. # Reference: http://stackoverflow.com/questions/11561722/django-what-is-the-role-of-modelstate if instance._state.adding and exists: raise Exception("There is already such a service. url={0} catalog={1}".format( instance.url, instance.catalog ))
[ "def", "service_pre_save", "(", "instance", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# check if service is unique", "# we cannot use unique_together as it relies on a combination of fields", "# from different models (service, resource)", "exists", "=", "Service", ".", "objects", ".", "filter", "(", "url", "=", "instance", ".", "url", ",", "type", "=", "instance", ".", "type", ",", "catalog", "=", "instance", ".", "catalog", ")", ".", "count", "(", ")", ">", "0", "# TODO: When saving from the django admin, this should not be triggered.", "# Reference: http://stackoverflow.com/questions/11561722/django-what-is-the-role-of-modelstate", "if", "instance", ".", "_state", ".", "adding", "and", "exists", ":", "raise", "Exception", "(", "\"There is already such a service. url={0} catalog={1}\"", ".", "format", "(", "instance", ".", "url", ",", "instance", ".", "catalog", ")", ")" ]
Used to do a service full check when saving it.
[ "Used", "to", "do", "a", "service", "full", "check", "when", "saving", "it", "." ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L1851-L1868
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
service_post_save
def service_post_save(instance, *args, **kwargs): """ Used to do a service full check when saving it. """ # check service if instance.is_monitored and settings.REGISTRY_SKIP_CELERY: check_service(instance.id) elif instance.is_monitored: check_service.delay(instance.id)
python
def service_post_save(instance, *args, **kwargs): """ Used to do a service full check when saving it. """ # check service if instance.is_monitored and settings.REGISTRY_SKIP_CELERY: check_service(instance.id) elif instance.is_monitored: check_service.delay(instance.id)
[ "def", "service_post_save", "(", "instance", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# check service", "if", "instance", ".", "is_monitored", "and", "settings", ".", "REGISTRY_SKIP_CELERY", ":", "check_service", "(", "instance", ".", "id", ")", "elif", "instance", ".", "is_monitored", ":", "check_service", ".", "delay", "(", "instance", ".", "id", ")" ]
Used to do a service full check when saving it.
[ "Used", "to", "do", "a", "service", "full", "check", "when", "saving", "it", "." ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L1871-L1880
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
layer_pre_save
def layer_pre_save(instance, *args, **kwargs): """ Used to check layer validity. """ is_valid = True # we do not need to check validity for WM layers if not instance.service.type == 'Hypermap:WorldMap': # 0. a layer is invalid if its service its invalid as well if not instance.service.is_valid: is_valid = False LOGGER.debug('Layer with id %s is marked invalid because its service is invalid' % instance.id) # 1. a layer is invalid with an extent within (-2, -2, +2, +2) if instance.bbox_x0 > -2 and instance.bbox_x1 < 2 and instance.bbox_y0 > -2 and instance.bbox_y1 < 2: is_valid = False LOGGER.debug( 'Layer with id %s is marked invalid because its extent is within (-2, -2, +2, +2)' % instance.id ) instance.is_valid = is_valid
python
def layer_pre_save(instance, *args, **kwargs): """ Used to check layer validity. """ is_valid = True # we do not need to check validity for WM layers if not instance.service.type == 'Hypermap:WorldMap': # 0. a layer is invalid if its service its invalid as well if not instance.service.is_valid: is_valid = False LOGGER.debug('Layer with id %s is marked invalid because its service is invalid' % instance.id) # 1. a layer is invalid with an extent within (-2, -2, +2, +2) if instance.bbox_x0 > -2 and instance.bbox_x1 < 2 and instance.bbox_y0 > -2 and instance.bbox_y1 < 2: is_valid = False LOGGER.debug( 'Layer with id %s is marked invalid because its extent is within (-2, -2, +2, +2)' % instance.id ) instance.is_valid = is_valid
[ "def", "layer_pre_save", "(", "instance", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "is_valid", "=", "True", "# we do not need to check validity for WM layers", "if", "not", "instance", ".", "service", ".", "type", "==", "'Hypermap:WorldMap'", ":", "# 0. a layer is invalid if its service its invalid as well", "if", "not", "instance", ".", "service", ".", "is_valid", ":", "is_valid", "=", "False", "LOGGER", ".", "debug", "(", "'Layer with id %s is marked invalid because its service is invalid'", "%", "instance", ".", "id", ")", "# 1. a layer is invalid with an extent within (-2, -2, +2, +2)", "if", "instance", ".", "bbox_x0", ">", "-", "2", "and", "instance", ".", "bbox_x1", "<", "2", "and", "instance", ".", "bbox_y0", ">", "-", "2", "and", "instance", ".", "bbox_y1", "<", "2", ":", "is_valid", "=", "False", "LOGGER", ".", "debug", "(", "'Layer with id %s is marked invalid because its extent is within (-2, -2, +2, +2)'", "%", "instance", ".", "id", ")", "instance", ".", "is_valid", "=", "is_valid" ]
Used to check layer validity.
[ "Used", "to", "check", "layer", "validity", "." ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L1883-L1905
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
layer_post_save
def layer_post_save(instance, *args, **kwargs): """ Used to do a layer full check when saving it. """ if instance.is_monitored and instance.service.is_monitored: # index and monitor if not settings.REGISTRY_SKIP_CELERY: check_layer.delay(instance.id) else: check_layer(instance.id) else: # just index index_layer(instance.id)
python
def layer_post_save(instance, *args, **kwargs): """ Used to do a layer full check when saving it. """ if instance.is_monitored and instance.service.is_monitored: # index and monitor if not settings.REGISTRY_SKIP_CELERY: check_layer.delay(instance.id) else: check_layer(instance.id) else: # just index index_layer(instance.id)
[ "def", "layer_post_save", "(", "instance", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "instance", ".", "is_monitored", "and", "instance", ".", "service", ".", "is_monitored", ":", "# index and monitor", "if", "not", "settings", ".", "REGISTRY_SKIP_CELERY", ":", "check_layer", ".", "delay", "(", "instance", ".", "id", ")", "else", ":", "check_layer", "(", "instance", ".", "id", ")", "else", ":", "# just index", "index_layer", "(", "instance", ".", "id", ")" ]
Used to do a layer full check when saving it.
[ "Used", "to", "do", "a", "layer", "full", "check", "when", "saving", "it", "." ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L1908-L1918
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
issue_post_delete
def issue_post_delete(instance, *args, **kwargs): """ Used to do reindex layers/services when a issue is removed form them. """ LOGGER.debug('Re-adding layer/service to search engine index') if isinstance(instance.content_object, Service): if not settings.REGISTRY_SKIP_CELERY: index_service.delay(instance.content_object.id) else: index_service(instance.content_object.id) else: if not settings.REGISTRY_SKIP_CELERY: index_layer.delay(instance.content_object.id) else: index_layer(instance.content_object.id)
python
def issue_post_delete(instance, *args, **kwargs): """ Used to do reindex layers/services when a issue is removed form them. """ LOGGER.debug('Re-adding layer/service to search engine index') if isinstance(instance.content_object, Service): if not settings.REGISTRY_SKIP_CELERY: index_service.delay(instance.content_object.id) else: index_service(instance.content_object.id) else: if not settings.REGISTRY_SKIP_CELERY: index_layer.delay(instance.content_object.id) else: index_layer(instance.content_object.id)
[ "def", "issue_post_delete", "(", "instance", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "LOGGER", ".", "debug", "(", "'Re-adding layer/service to search engine index'", ")", "if", "isinstance", "(", "instance", ".", "content_object", ",", "Service", ")", ":", "if", "not", "settings", ".", "REGISTRY_SKIP_CELERY", ":", "index_service", ".", "delay", "(", "instance", ".", "content_object", ".", "id", ")", "else", ":", "index_service", "(", "instance", ".", "content_object", ".", "id", ")", "else", ":", "if", "not", "settings", ".", "REGISTRY_SKIP_CELERY", ":", "index_layer", ".", "delay", "(", "instance", ".", "content_object", ".", "id", ")", "else", ":", "index_layer", "(", "instance", ".", "content_object", ".", "id", ")" ]
Used to do reindex layers/services when a issue is removed form them.
[ "Used", "to", "do", "reindex", "layers", "/", "services", "when", "a", "issue", "is", "removed", "form", "them", "." ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L1921-L1935
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
Resource.get_checks_admin_reliability_warning_url
def get_checks_admin_reliability_warning_url(self): """ When service Realiability is going down users should go to the the check history to find problem causes. :return: admin url with check list for this instance """ # TODO: cache this. path = self.get_checks_admin_url() content_type = ContentType.objects.get_for_model(self) params = "?content_type__id__exact={0}&q={1}&success__exact=0".format( content_type.id, self.id ) url = path + params return url
python
def get_checks_admin_reliability_warning_url(self): """ When service Realiability is going down users should go to the the check history to find problem causes. :return: admin url with check list for this instance """ # TODO: cache this. path = self.get_checks_admin_url() content_type = ContentType.objects.get_for_model(self) params = "?content_type__id__exact={0}&q={1}&success__exact=0".format( content_type.id, self.id ) url = path + params return url
[ "def", "get_checks_admin_reliability_warning_url", "(", "self", ")", ":", "# TODO: cache this.", "path", "=", "self", ".", "get_checks_admin_url", "(", ")", "content_type", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "self", ")", "params", "=", "\"?content_type__id__exact={0}&q={1}&success__exact=0\"", ".", "format", "(", "content_type", ".", "id", ",", "self", ".", "id", ")", "url", "=", "path", "+", "params", "return", "url" ]
When service Realiability is going down users should go to the the check history to find problem causes. :return: admin url with check list for this instance
[ "When", "service", "Realiability", "is", "going", "down", "users", "should", "go", "to", "the", "the", "check", "history", "to", "find", "problem", "causes", ".", ":", "return", ":", "admin", "url", "with", "check", "list", "for", "this", "instance" ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L261-L275
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
Service.update_layers
def update_layers(self): """ Update layers for a service. """ signals.post_save.disconnect(layer_post_save, sender=Layer) try: LOGGER.debug('Updating layers for service id %s' % self.id) if self.type == 'OGC:WMS': update_layers_wms(self) elif self.type == 'OGC:WMTS': update_layers_wmts(self) elif self.type == 'ESRI:ArcGIS:MapServer': update_layers_esri_mapserver(self) elif self.type == 'ESRI:ArcGIS:ImageServer': update_layers_esri_imageserver(self) elif self.type == 'Hypermap:WorldMapLegacy': update_layers_wm_legacy(self) elif self.type == 'Hypermap:WorldMap': update_layers_geonode_wm(self) elif self.type == 'Hypermap:WARPER': update_layers_warper(self) except: LOGGER.error('Error updating layers for service %s' % self.uuid) signals.post_save.connect(layer_post_save, sender=Layer)
python
def update_layers(self): """ Update layers for a service. """ signals.post_save.disconnect(layer_post_save, sender=Layer) try: LOGGER.debug('Updating layers for service id %s' % self.id) if self.type == 'OGC:WMS': update_layers_wms(self) elif self.type == 'OGC:WMTS': update_layers_wmts(self) elif self.type == 'ESRI:ArcGIS:MapServer': update_layers_esri_mapserver(self) elif self.type == 'ESRI:ArcGIS:ImageServer': update_layers_esri_imageserver(self) elif self.type == 'Hypermap:WorldMapLegacy': update_layers_wm_legacy(self) elif self.type == 'Hypermap:WorldMap': update_layers_geonode_wm(self) elif self.type == 'Hypermap:WARPER': update_layers_warper(self) except: LOGGER.error('Error updating layers for service %s' % self.uuid) signals.post_save.connect(layer_post_save, sender=Layer)
[ "def", "update_layers", "(", "self", ")", ":", "signals", ".", "post_save", ".", "disconnect", "(", "layer_post_save", ",", "sender", "=", "Layer", ")", "try", ":", "LOGGER", ".", "debug", "(", "'Updating layers for service id %s'", "%", "self", ".", "id", ")", "if", "self", ".", "type", "==", "'OGC:WMS'", ":", "update_layers_wms", "(", "self", ")", "elif", "self", ".", "type", "==", "'OGC:WMTS'", ":", "update_layers_wmts", "(", "self", ")", "elif", "self", ".", "type", "==", "'ESRI:ArcGIS:MapServer'", ":", "update_layers_esri_mapserver", "(", "self", ")", "elif", "self", ".", "type", "==", "'ESRI:ArcGIS:ImageServer'", ":", "update_layers_esri_imageserver", "(", "self", ")", "elif", "self", ".", "type", "==", "'Hypermap:WorldMapLegacy'", ":", "update_layers_wm_legacy", "(", "self", ")", "elif", "self", ".", "type", "==", "'Hypermap:WorldMap'", ":", "update_layers_geonode_wm", "(", "self", ")", "elif", "self", ".", "type", "==", "'Hypermap:WARPER'", ":", "update_layers_warper", "(", "self", ")", "except", ":", "LOGGER", ".", "error", "(", "'Error updating layers for service %s'", "%", "self", ".", "uuid", ")", "signals", ".", "post_save", ".", "connect", "(", "layer_post_save", ",", "sender", "=", "Layer", ")" ]
Update layers for a service.
[ "Update", "layers", "for", "a", "service", "." ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L309-L336
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
Service.check_available
def check_available(self): """ Check for availability of a service and provide run metrics. """ success = True start_time = datetime.datetime.utcnow() message = '' LOGGER.debug('Checking service id %s' % self.id) try: title = None abstract = None keywords = [] wkt_geometry = None srs = '4326' if self.type == 'OGC:CSW': ows = CatalogueServiceWeb(self.url) title = ows.identification.title abstract = ows.identification.abstract keywords = ows.identification.keywords if self.type == 'OGC:WMS': ows = get_wms_version_negotiate(self.url) title = ows.identification.title abstract = ows.identification.abstract keywords = ows.identification.keywords for c in ows.contents: if ows.contents[c].parent is None: wkt_geometry = bbox2wktpolygon(ows.contents[c].boundingBoxWGS84) break if self.type == 'OGC:WMTS': ows = WebMapTileService(self.url) title = ows.identification.title abstract = ows.identification.abstract keywords = ows.identification.keywords if self.type == 'OSGeo:TMS': ows = TileMapService(self.url) title = ows.identification.title abstract = ows.identification.abstract keywords = ows.identification.keywords if self.type == 'ESRI:ArcGIS:MapServer': esri = ArcMapService(self.url) extent, srs = get_esri_extent(esri) title = esri.mapName if len(title) == 0: title = get_esri_service_name(self.url) wkt_geometry = bbox2wktpolygon([ extent['xmin'], extent['ymin'], extent['xmax'], extent['ymax'] ]) if self.type == 'ESRI:ArcGIS:ImageServer': esri = ArcImageService(self.url) extent, srs = get_esri_extent(esri) title = esri._json_struct['name'] if len(title) == 0: title = get_esri_service_name(self.url) wkt_geometry = bbox2wktpolygon([ extent['xmin'], extent['ymin'], extent['xmax'], extent['ymax'] ]) if self.type == 'Hypermap:WorldMap': urllib2.urlopen(self.url) if self.type == 'Hypermap:WorldMapLegacy': urllib2.urlopen(self.url) title = 'Harvard WorldMap Legacy' if self.type == 'Hypermap:WARPER': urllib2.urlopen(self.url) # update title without raising a signal and recursion if title: self.title = title Service.objects.filter(id=self.id).update(title=title) if abstract: self.abstract = abstract Service.objects.filter(id=self.id).update(abstract=abstract) if keywords: for kw in keywords: # FIXME: persist keywords to Django model self.keywords.add(kw) if wkt_geometry: self.wkt_geometry = wkt_geometry Service.objects.filter(id=self.id).update(wkt_geometry=wkt_geometry) xml = create_metadata_record( identifier=self.id_string, source=self.url, links=[[self.type, self.url]], format=self.type, type='service', title=title, abstract=abstract, keywords=keywords, wkt_geometry=self.wkt_geometry, srs=srs ) anytexts = gen_anytext(title, abstract, keywords) Service.objects.filter(id=self.id).update(anytext=anytexts, xml=xml, csw_type='service') except Exception, e: LOGGER.error(e, exc_info=True) message = str(e) success = False end_time = datetime.datetime.utcnow() delta = end_time - start_time response_time = '%s.%s' % (delta.seconds, delta.microseconds) check = Check( content_object=self, success=success, response_time=response_time, message=message ) check.save() LOGGER.debug('Service checked in %s seconds, status is %s' % (response_time, success))
python
def check_available(self): """ Check for availability of a service and provide run metrics. """ success = True start_time = datetime.datetime.utcnow() message = '' LOGGER.debug('Checking service id %s' % self.id) try: title = None abstract = None keywords = [] wkt_geometry = None srs = '4326' if self.type == 'OGC:CSW': ows = CatalogueServiceWeb(self.url) title = ows.identification.title abstract = ows.identification.abstract keywords = ows.identification.keywords if self.type == 'OGC:WMS': ows = get_wms_version_negotiate(self.url) title = ows.identification.title abstract = ows.identification.abstract keywords = ows.identification.keywords for c in ows.contents: if ows.contents[c].parent is None: wkt_geometry = bbox2wktpolygon(ows.contents[c].boundingBoxWGS84) break if self.type == 'OGC:WMTS': ows = WebMapTileService(self.url) title = ows.identification.title abstract = ows.identification.abstract keywords = ows.identification.keywords if self.type == 'OSGeo:TMS': ows = TileMapService(self.url) title = ows.identification.title abstract = ows.identification.abstract keywords = ows.identification.keywords if self.type == 'ESRI:ArcGIS:MapServer': esri = ArcMapService(self.url) extent, srs = get_esri_extent(esri) title = esri.mapName if len(title) == 0: title = get_esri_service_name(self.url) wkt_geometry = bbox2wktpolygon([ extent['xmin'], extent['ymin'], extent['xmax'], extent['ymax'] ]) if self.type == 'ESRI:ArcGIS:ImageServer': esri = ArcImageService(self.url) extent, srs = get_esri_extent(esri) title = esri._json_struct['name'] if len(title) == 0: title = get_esri_service_name(self.url) wkt_geometry = bbox2wktpolygon([ extent['xmin'], extent['ymin'], extent['xmax'], extent['ymax'] ]) if self.type == 'Hypermap:WorldMap': urllib2.urlopen(self.url) if self.type == 'Hypermap:WorldMapLegacy': urllib2.urlopen(self.url) title = 'Harvard WorldMap Legacy' if self.type == 'Hypermap:WARPER': urllib2.urlopen(self.url) # update title without raising a signal and recursion if title: self.title = title Service.objects.filter(id=self.id).update(title=title) if abstract: self.abstract = abstract Service.objects.filter(id=self.id).update(abstract=abstract) if keywords: for kw in keywords: # FIXME: persist keywords to Django model self.keywords.add(kw) if wkt_geometry: self.wkt_geometry = wkt_geometry Service.objects.filter(id=self.id).update(wkt_geometry=wkt_geometry) xml = create_metadata_record( identifier=self.id_string, source=self.url, links=[[self.type, self.url]], format=self.type, type='service', title=title, abstract=abstract, keywords=keywords, wkt_geometry=self.wkt_geometry, srs=srs ) anytexts = gen_anytext(title, abstract, keywords) Service.objects.filter(id=self.id).update(anytext=anytexts, xml=xml, csw_type='service') except Exception, e: LOGGER.error(e, exc_info=True) message = str(e) success = False end_time = datetime.datetime.utcnow() delta = end_time - start_time response_time = '%s.%s' % (delta.seconds, delta.microseconds) check = Check( content_object=self, success=success, response_time=response_time, message=message ) check.save() LOGGER.debug('Service checked in %s seconds, status is %s' % (response_time, success))
[ "def", "check_available", "(", "self", ")", ":", "success", "=", "True", "start_time", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "message", "=", "''", "LOGGER", ".", "debug", "(", "'Checking service id %s'", "%", "self", ".", "id", ")", "try", ":", "title", "=", "None", "abstract", "=", "None", "keywords", "=", "[", "]", "wkt_geometry", "=", "None", "srs", "=", "'4326'", "if", "self", ".", "type", "==", "'OGC:CSW'", ":", "ows", "=", "CatalogueServiceWeb", "(", "self", ".", "url", ")", "title", "=", "ows", ".", "identification", ".", "title", "abstract", "=", "ows", ".", "identification", ".", "abstract", "keywords", "=", "ows", ".", "identification", ".", "keywords", "if", "self", ".", "type", "==", "'OGC:WMS'", ":", "ows", "=", "get_wms_version_negotiate", "(", "self", ".", "url", ")", "title", "=", "ows", ".", "identification", ".", "title", "abstract", "=", "ows", ".", "identification", ".", "abstract", "keywords", "=", "ows", ".", "identification", ".", "keywords", "for", "c", "in", "ows", ".", "contents", ":", "if", "ows", ".", "contents", "[", "c", "]", ".", "parent", "is", "None", ":", "wkt_geometry", "=", "bbox2wktpolygon", "(", "ows", ".", "contents", "[", "c", "]", ".", "boundingBoxWGS84", ")", "break", "if", "self", ".", "type", "==", "'OGC:WMTS'", ":", "ows", "=", "WebMapTileService", "(", "self", ".", "url", ")", "title", "=", "ows", ".", "identification", ".", "title", "abstract", "=", "ows", ".", "identification", ".", "abstract", "keywords", "=", "ows", ".", "identification", ".", "keywords", "if", "self", ".", "type", "==", "'OSGeo:TMS'", ":", "ows", "=", "TileMapService", "(", "self", ".", "url", ")", "title", "=", "ows", ".", "identification", ".", "title", "abstract", "=", "ows", ".", "identification", ".", "abstract", "keywords", "=", "ows", ".", "identification", ".", "keywords", "if", "self", ".", "type", "==", "'ESRI:ArcGIS:MapServer'", ":", "esri", "=", "ArcMapService", "(", "self", ".", "url", ")", "extent", ",", "srs", "=", "get_esri_extent", "(", "esri", ")", "title", "=", "esri", ".", "mapName", "if", "len", "(", "title", ")", "==", "0", ":", "title", "=", "get_esri_service_name", "(", "self", ".", "url", ")", "wkt_geometry", "=", "bbox2wktpolygon", "(", "[", "extent", "[", "'xmin'", "]", ",", "extent", "[", "'ymin'", "]", ",", "extent", "[", "'xmax'", "]", ",", "extent", "[", "'ymax'", "]", "]", ")", "if", "self", ".", "type", "==", "'ESRI:ArcGIS:ImageServer'", ":", "esri", "=", "ArcImageService", "(", "self", ".", "url", ")", "extent", ",", "srs", "=", "get_esri_extent", "(", "esri", ")", "title", "=", "esri", ".", "_json_struct", "[", "'name'", "]", "if", "len", "(", "title", ")", "==", "0", ":", "title", "=", "get_esri_service_name", "(", "self", ".", "url", ")", "wkt_geometry", "=", "bbox2wktpolygon", "(", "[", "extent", "[", "'xmin'", "]", ",", "extent", "[", "'ymin'", "]", ",", "extent", "[", "'xmax'", "]", ",", "extent", "[", "'ymax'", "]", "]", ")", "if", "self", ".", "type", "==", "'Hypermap:WorldMap'", ":", "urllib2", ".", "urlopen", "(", "self", ".", "url", ")", "if", "self", ".", "type", "==", "'Hypermap:WorldMapLegacy'", ":", "urllib2", ".", "urlopen", "(", "self", ".", "url", ")", "title", "=", "'Harvard WorldMap Legacy'", "if", "self", ".", "type", "==", "'Hypermap:WARPER'", ":", "urllib2", ".", "urlopen", "(", "self", ".", "url", ")", "# update title without raising a signal and recursion", "if", "title", ":", "self", ".", "title", "=", "title", "Service", ".", "objects", ".", "filter", "(", "id", "=", "self", ".", "id", ")", ".", "update", "(", "title", "=", "title", ")", "if", "abstract", ":", "self", ".", "abstract", "=", "abstract", "Service", ".", "objects", ".", "filter", "(", "id", "=", "self", ".", "id", ")", ".", "update", "(", "abstract", "=", "abstract", ")", "if", "keywords", ":", "for", "kw", "in", "keywords", ":", "# FIXME: persist keywords to Django model", "self", ".", "keywords", ".", "add", "(", "kw", ")", "if", "wkt_geometry", ":", "self", ".", "wkt_geometry", "=", "wkt_geometry", "Service", ".", "objects", ".", "filter", "(", "id", "=", "self", ".", "id", ")", ".", "update", "(", "wkt_geometry", "=", "wkt_geometry", ")", "xml", "=", "create_metadata_record", "(", "identifier", "=", "self", ".", "id_string", ",", "source", "=", "self", ".", "url", ",", "links", "=", "[", "[", "self", ".", "type", ",", "self", ".", "url", "]", "]", ",", "format", "=", "self", ".", "type", ",", "type", "=", "'service'", ",", "title", "=", "title", ",", "abstract", "=", "abstract", ",", "keywords", "=", "keywords", ",", "wkt_geometry", "=", "self", ".", "wkt_geometry", ",", "srs", "=", "srs", ")", "anytexts", "=", "gen_anytext", "(", "title", ",", "abstract", ",", "keywords", ")", "Service", ".", "objects", ".", "filter", "(", "id", "=", "self", ".", "id", ")", ".", "update", "(", "anytext", "=", "anytexts", ",", "xml", "=", "xml", ",", "csw_type", "=", "'service'", ")", "except", "Exception", ",", "e", ":", "LOGGER", ".", "error", "(", "e", ",", "exc_info", "=", "True", ")", "message", "=", "str", "(", "e", ")", "success", "=", "False", "end_time", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "delta", "=", "end_time", "-", "start_time", "response_time", "=", "'%s.%s'", "%", "(", "delta", ".", "seconds", ",", "delta", ".", "microseconds", ")", "check", "=", "Check", "(", "content_object", "=", "self", ",", "success", "=", "success", ",", "response_time", "=", "response_time", ",", "message", "=", "message", ")", "check", ".", "save", "(", ")", "LOGGER", ".", "debug", "(", "'Service checked in %s seconds, status is %s'", "%", "(", "response_time", ",", "success", ")", ")" ]
Check for availability of a service and provide run metrics.
[ "Check", "for", "availability", "of", "a", "service", "and", "provide", "run", "metrics", "." ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L338-L453
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
Service.update_validity
def update_validity(self): """ Update validity of a service. """ # WM is always valid if self.type == 'Hypermap:WorldMap': return signals.post_save.disconnect(service_post_save, sender=Service) try: # some service now must be considered invalid: # 0. any service not exposed in SUPPORTED_SRS # 1. any WMTS service # 2. all of the NOAA layers is_valid = True # 0. any service not exposed in SUPPORTED_SRS if self.srs.filter(code__in=SUPPORTED_SRS).count() == 0: LOGGER.debug('Service with id %s is marked invalid because in not exposed in SUPPORTED_SRS' % self.id) is_valid = False # 1. any WMTS service if self.type == 'OGC:WMTS': LOGGER.debug('Service with id %s is marked invalid because it is of type OGC:WMTS' % self.id) is_valid = False # 2. all of the NOAA layers if 'noaa' in self.url.lower(): LOGGER.debug('Service with id %s is marked invalid because it is from NOAA' % self.id) is_valid = False # now we save the service self.is_valid = is_valid self.save() except: LOGGER.error('Error updating validity of the service!') signals.post_save.connect(service_post_save, sender=Service)
python
def update_validity(self): """ Update validity of a service. """ # WM is always valid if self.type == 'Hypermap:WorldMap': return signals.post_save.disconnect(service_post_save, sender=Service) try: # some service now must be considered invalid: # 0. any service not exposed in SUPPORTED_SRS # 1. any WMTS service # 2. all of the NOAA layers is_valid = True # 0. any service not exposed in SUPPORTED_SRS if self.srs.filter(code__in=SUPPORTED_SRS).count() == 0: LOGGER.debug('Service with id %s is marked invalid because in not exposed in SUPPORTED_SRS' % self.id) is_valid = False # 1. any WMTS service if self.type == 'OGC:WMTS': LOGGER.debug('Service with id %s is marked invalid because it is of type OGC:WMTS' % self.id) is_valid = False # 2. all of the NOAA layers if 'noaa' in self.url.lower(): LOGGER.debug('Service with id %s is marked invalid because it is from NOAA' % self.id) is_valid = False # now we save the service self.is_valid = is_valid self.save() except: LOGGER.error('Error updating validity of the service!') signals.post_save.connect(service_post_save, sender=Service)
[ "def", "update_validity", "(", "self", ")", ":", "# WM is always valid", "if", "self", ".", "type", "==", "'Hypermap:WorldMap'", ":", "return", "signals", ".", "post_save", ".", "disconnect", "(", "service_post_save", ",", "sender", "=", "Service", ")", "try", ":", "# some service now must be considered invalid:", "# 0. any service not exposed in SUPPORTED_SRS", "# 1. any WMTS service", "# 2. all of the NOAA layers", "is_valid", "=", "True", "# 0. any service not exposed in SUPPORTED_SRS", "if", "self", ".", "srs", ".", "filter", "(", "code__in", "=", "SUPPORTED_SRS", ")", ".", "count", "(", ")", "==", "0", ":", "LOGGER", ".", "debug", "(", "'Service with id %s is marked invalid because in not exposed in SUPPORTED_SRS'", "%", "self", ".", "id", ")", "is_valid", "=", "False", "# 1. any WMTS service", "if", "self", ".", "type", "==", "'OGC:WMTS'", ":", "LOGGER", ".", "debug", "(", "'Service with id %s is marked invalid because it is of type OGC:WMTS'", "%", "self", ".", "id", ")", "is_valid", "=", "False", "# 2. all of the NOAA layers", "if", "'noaa'", "in", "self", ".", "url", ".", "lower", "(", ")", ":", "LOGGER", ".", "debug", "(", "'Service with id %s is marked invalid because it is from NOAA'", "%", "self", ".", "id", ")", "is_valid", "=", "False", "# now we save the service", "self", ".", "is_valid", "=", "is_valid", "self", ".", "save", "(", ")", "except", ":", "LOGGER", ".", "error", "(", "'Error updating validity of the service!'", ")", "signals", ".", "post_save", ".", "connect", "(", "service_post_save", ",", "sender", "=", "Service", ")" ]
Update validity of a service.
[ "Update", "validity", "of", "a", "service", "." ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L455-L497
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
Catalog.get_search_url
def get_search_url(self): """ resolve the search url no matter if local or remote. :return: url or exception """ if self.is_remote: return self.url return reverse('search_api', args=[self.slug])
python
def get_search_url(self): """ resolve the search url no matter if local or remote. :return: url or exception """ if self.is_remote: return self.url return reverse('search_api', args=[self.slug])
[ "def", "get_search_url", "(", "self", ")", ":", "if", "self", ".", "is_remote", ":", "return", "self", ".", "url", "return", "reverse", "(", "'search_api'", ",", "args", "=", "[", "self", ".", "slug", "]", ")" ]
resolve the search url no matter if local or remote. :return: url or exception
[ "resolve", "the", "search", "url", "no", "matter", "if", "local", "or", "remote", ".", ":", "return", ":", "url", "or", "exception" ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L525-L534
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
Layer.get_url_endpoint
def get_url_endpoint(self): """ Returns the Hypermap endpoint for a layer. This endpoint will be the WMTS MapProxy endpoint, only for WM we use the original endpoint. """ endpoint = self.url if self.type not in ('Hypermap:WorldMap',): endpoint = 'registry/%s/layer/%s/map/wmts/1.0.0/WMTSCapabilities.xml' % ( self.catalog.slug, self.id ) return endpoint
python
def get_url_endpoint(self): """ Returns the Hypermap endpoint for a layer. This endpoint will be the WMTS MapProxy endpoint, only for WM we use the original endpoint. """ endpoint = self.url if self.type not in ('Hypermap:WorldMap',): endpoint = 'registry/%s/layer/%s/map/wmts/1.0.0/WMTSCapabilities.xml' % ( self.catalog.slug, self.id ) return endpoint
[ "def", "get_url_endpoint", "(", "self", ")", ":", "endpoint", "=", "self", ".", "url", "if", "self", ".", "type", "not", "in", "(", "'Hypermap:WorldMap'", ",", ")", ":", "endpoint", "=", "'registry/%s/layer/%s/map/wmts/1.0.0/WMTSCapabilities.xml'", "%", "(", "self", ".", "catalog", ".", "slug", ",", "self", ".", "id", ")", "return", "endpoint" ]
Returns the Hypermap endpoint for a layer. This endpoint will be the WMTS MapProxy endpoint, only for WM we use the original endpoint.
[ "Returns", "the", "Hypermap", "endpoint", "for", "a", "layer", ".", "This", "endpoint", "will", "be", "the", "WMTS", "MapProxy", "endpoint", "only", "for", "WM", "we", "use", "the", "original", "endpoint", "." ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L573-L584
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
Layer.check_available
def check_available(self): """ Check for availability of a layer and provide run metrics. """ success = True start_time = datetime.datetime.utcnow() message = '' LOGGER.debug('Checking layer id %s' % self.id) signals.post_save.disconnect(layer_post_save, sender=Layer) try: self.update_thumbnail() except ValueError, err: # caused by update_thumbnail() # self.href is empty in arcserver.ExportMap if str(err).startswith("unknown url type:"): LOGGER.debug('Thumbnail can not be updated: %s' % str(err)) except Exception, err: message = str(err) success = False signals.post_save.connect(layer_post_save, sender=Layer) end_time = datetime.datetime.utcnow() delta = end_time - start_time response_time = '%s.%s' % (delta.seconds, delta.microseconds) check = Check( content_object=self, success=success, response_time=response_time, message=message ) check.save() LOGGER.debug('Layer checked in %s seconds, status is %s' % (response_time, success)) return success, message
python
def check_available(self): """ Check for availability of a layer and provide run metrics. """ success = True start_time = datetime.datetime.utcnow() message = '' LOGGER.debug('Checking layer id %s' % self.id) signals.post_save.disconnect(layer_post_save, sender=Layer) try: self.update_thumbnail() except ValueError, err: # caused by update_thumbnail() # self.href is empty in arcserver.ExportMap if str(err).startswith("unknown url type:"): LOGGER.debug('Thumbnail can not be updated: %s' % str(err)) except Exception, err: message = str(err) success = False signals.post_save.connect(layer_post_save, sender=Layer) end_time = datetime.datetime.utcnow() delta = end_time - start_time response_time = '%s.%s' % (delta.seconds, delta.microseconds) check = Check( content_object=self, success=success, response_time=response_time, message=message ) check.save() LOGGER.debug('Layer checked in %s seconds, status is %s' % (response_time, success)) return success, message
[ "def", "check_available", "(", "self", ")", ":", "success", "=", "True", "start_time", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "message", "=", "''", "LOGGER", ".", "debug", "(", "'Checking layer id %s'", "%", "self", ".", "id", ")", "signals", ".", "post_save", ".", "disconnect", "(", "layer_post_save", ",", "sender", "=", "Layer", ")", "try", ":", "self", ".", "update_thumbnail", "(", ")", "except", "ValueError", ",", "err", ":", "# caused by update_thumbnail()", "# self.href is empty in arcserver.ExportMap", "if", "str", "(", "err", ")", ".", "startswith", "(", "\"unknown url type:\"", ")", ":", "LOGGER", ".", "debug", "(", "'Thumbnail can not be updated: %s'", "%", "str", "(", "err", ")", ")", "except", "Exception", ",", "err", ":", "message", "=", "str", "(", "err", ")", "success", "=", "False", "signals", ".", "post_save", ".", "connect", "(", "layer_post_save", ",", "sender", "=", "Layer", ")", "end_time", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "delta", "=", "end_time", "-", "start_time", "response_time", "=", "'%s.%s'", "%", "(", "delta", ".", "seconds", ",", "delta", ".", "microseconds", ")", "check", "=", "Check", "(", "content_object", "=", "self", ",", "success", "=", "success", ",", "response_time", "=", "response_time", ",", "message", "=", "message", ")", "check", ".", "save", "(", ")", "LOGGER", ".", "debug", "(", "'Layer checked in %s seconds, status is %s'", "%", "(", "response_time", ",", "success", ")", ")", "return", "success", ",", "message" ]
Check for availability of a layer and provide run metrics.
[ "Check", "for", "availability", "of", "a", "layer", "and", "provide", "run", "metrics", "." ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L800-L837
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
Layer.registry_tags
def registry_tags(self, query_string='{http://gis.harvard.edu/HHypermap/registry/0.1}property'): """ Get extra metadata tagged with a registry keyword. For example: <registry:property name="nomination/serviceOwner" value="True"/> <registry:property name="nominator/name" value="Random Person"/> <registry:property name="nominator/email" value="contact@example.com"/> <registry:property name="lastmodifiedby" value="2016-10-23"/> <registry:property name="updateFreq" value="as needed"/> <registry:property name="mission" value="RIO"/> <registry:property name="authentication" value="Open"/> <registry:property name="crisis" value="False"/> <registry:property name="intlAgreement/multi" value="none"/> <registry:property name="intlAgreement/bilateral" value="none"/> <registry:property name="classificationRecord/classification" value="Unclassified"/> <registry:property name="classificationData/classification" value="Unclassified"/> <registry:property name="serviceName/classification/classification" value="Unclassified"/> <registry:property name="serviceName/classification/classifiedBy" value="TNT"/> <registry:property name="description/classification/classification" value="Unclassified"/> <registry:property name="description/classification/classifiedBy" value="TNT"/> <registry:property name="ContactInformation/Primary/owner" value="Help Desk"/> <registry:property name="ContactInformation/Primary/organization" value="Three Letter One"/> <registry:property name="ContactInformation/Email" value="contact@example.com"/> <registry:property name="ContactInformation/Phone" value="Toll-free: 1 800 555-5555"/> <registry:property name="license/restrictions" value="none"/> <registry:property name="license/copyright" value="Private. For testing purposes."/> """ from pycsw.core.etree import etree parsed = etree.fromstring(self.xml, etree.XMLParser(resolve_entities=False)) registry_tags = parsed.findall(query_string) registry_dict = {} for tag in registry_tags: try: registry_dict[tag.attrib['name']] = tag.attrib['value'] except Exception, e: LOGGER.error(e, exc_info=True) return registry_dict
python
def registry_tags(self, query_string='{http://gis.harvard.edu/HHypermap/registry/0.1}property'): """ Get extra metadata tagged with a registry keyword. For example: <registry:property name="nomination/serviceOwner" value="True"/> <registry:property name="nominator/name" value="Random Person"/> <registry:property name="nominator/email" value="contact@example.com"/> <registry:property name="lastmodifiedby" value="2016-10-23"/> <registry:property name="updateFreq" value="as needed"/> <registry:property name="mission" value="RIO"/> <registry:property name="authentication" value="Open"/> <registry:property name="crisis" value="False"/> <registry:property name="intlAgreement/multi" value="none"/> <registry:property name="intlAgreement/bilateral" value="none"/> <registry:property name="classificationRecord/classification" value="Unclassified"/> <registry:property name="classificationData/classification" value="Unclassified"/> <registry:property name="serviceName/classification/classification" value="Unclassified"/> <registry:property name="serviceName/classification/classifiedBy" value="TNT"/> <registry:property name="description/classification/classification" value="Unclassified"/> <registry:property name="description/classification/classifiedBy" value="TNT"/> <registry:property name="ContactInformation/Primary/owner" value="Help Desk"/> <registry:property name="ContactInformation/Primary/organization" value="Three Letter One"/> <registry:property name="ContactInformation/Email" value="contact@example.com"/> <registry:property name="ContactInformation/Phone" value="Toll-free: 1 800 555-5555"/> <registry:property name="license/restrictions" value="none"/> <registry:property name="license/copyright" value="Private. For testing purposes."/> """ from pycsw.core.etree import etree parsed = etree.fromstring(self.xml, etree.XMLParser(resolve_entities=False)) registry_tags = parsed.findall(query_string) registry_dict = {} for tag in registry_tags: try: registry_dict[tag.attrib['name']] = tag.attrib['value'] except Exception, e: LOGGER.error(e, exc_info=True) return registry_dict
[ "def", "registry_tags", "(", "self", ",", "query_string", "=", "'{http://gis.harvard.edu/HHypermap/registry/0.1}property'", ")", ":", "from", "pycsw", ".", "core", ".", "etree", "import", "etree", "parsed", "=", "etree", ".", "fromstring", "(", "self", ".", "xml", ",", "etree", ".", "XMLParser", "(", "resolve_entities", "=", "False", ")", ")", "registry_tags", "=", "parsed", ".", "findall", "(", "query_string", ")", "registry_dict", "=", "{", "}", "for", "tag", "in", "registry_tags", ":", "try", ":", "registry_dict", "[", "tag", ".", "attrib", "[", "'name'", "]", "]", "=", "tag", ".", "attrib", "[", "'value'", "]", "except", "Exception", ",", "e", ":", "LOGGER", ".", "error", "(", "e", ",", "exc_info", "=", "True", ")", "return", "registry_dict" ]
Get extra metadata tagged with a registry keyword. For example: <registry:property name="nomination/serviceOwner" value="True"/> <registry:property name="nominator/name" value="Random Person"/> <registry:property name="nominator/email" value="contact@example.com"/> <registry:property name="lastmodifiedby" value="2016-10-23"/> <registry:property name="updateFreq" value="as needed"/> <registry:property name="mission" value="RIO"/> <registry:property name="authentication" value="Open"/> <registry:property name="crisis" value="False"/> <registry:property name="intlAgreement/multi" value="none"/> <registry:property name="intlAgreement/bilateral" value="none"/> <registry:property name="classificationRecord/classification" value="Unclassified"/> <registry:property name="classificationData/classification" value="Unclassified"/> <registry:property name="serviceName/classification/classification" value="Unclassified"/> <registry:property name="serviceName/classification/classifiedBy" value="TNT"/> <registry:property name="description/classification/classification" value="Unclassified"/> <registry:property name="description/classification/classifiedBy" value="TNT"/> <registry:property name="ContactInformation/Primary/owner" value="Help Desk"/> <registry:property name="ContactInformation/Primary/organization" value="Three Letter One"/> <registry:property name="ContactInformation/Email" value="contact@example.com"/> <registry:property name="ContactInformation/Phone" value="Toll-free: 1 800 555-5555"/> <registry:property name="license/restrictions" value="none"/> <registry:property name="license/copyright" value="Private. For testing purposes."/>
[ "Get", "extra", "metadata", "tagged", "with", "a", "registry", "keyword", ".", "For", "example", ":", "<registry", ":", "property", "name", "=", "nomination", "/", "serviceOwner", "value", "=", "True", "/", ">", "<registry", ":", "property", "name", "=", "nominator", "/", "name", "value", "=", "Random", "Person", "/", ">", "<registry", ":", "property", "name", "=", "nominator", "/", "email", "value", "=", "contact@example", ".", "com", "/", ">", "<registry", ":", "property", "name", "=", "lastmodifiedby", "value", "=", "2016", "-", "10", "-", "23", "/", ">", "<registry", ":", "property", "name", "=", "updateFreq", "value", "=", "as", "needed", "/", ">", "<registry", ":", "property", "name", "=", "mission", "value", "=", "RIO", "/", ">", "<registry", ":", "property", "name", "=", "authentication", "value", "=", "Open", "/", ">", "<registry", ":", "property", "name", "=", "crisis", "value", "=", "False", "/", ">", "<registry", ":", "property", "name", "=", "intlAgreement", "/", "multi", "value", "=", "none", "/", ">", "<registry", ":", "property", "name", "=", "intlAgreement", "/", "bilateral", "value", "=", "none", "/", ">", "<registry", ":", "property", "name", "=", "classificationRecord", "/", "classification", "value", "=", "Unclassified", "/", ">", "<registry", ":", "property", "name", "=", "classificationData", "/", "classification", "value", "=", "Unclassified", "/", ">", "<registry", ":", "property", "name", "=", "serviceName", "/", "classification", "/", "classification", "value", "=", "Unclassified", "/", ">", "<registry", ":", "property", "name", "=", "serviceName", "/", "classification", "/", "classifiedBy", "value", "=", "TNT", "/", ">", "<registry", ":", "property", "name", "=", "description", "/", "classification", "/", "classification", "value", "=", "Unclassified", "/", ">", "<registry", ":", "property", "name", "=", "description", "/", "classification", "/", "classifiedBy", "value", "=", "TNT", "/", ">", "<registry", ":", "property", "name", "=", "ContactInformation", "/", "Primary", "/", "owner", "value", "=", "Help", "Desk", "/", ">", "<registry", ":", "property", "name", "=", "ContactInformation", "/", "Primary", "/", "organization", "value", "=", "Three", "Letter", "One", "/", ">", "<registry", ":", "property", "name", "=", "ContactInformation", "/", "Email", "value", "=", "contact@example", ".", "com", "/", ">", "<registry", ":", "property", "name", "=", "ContactInformation", "/", "Phone", "value", "=", "Toll", "-", "free", ":", "1", "800", "555", "-", "5555", "/", ">", "<registry", ":", "property", "name", "=", "license", "/", "restrictions", "value", "=", "none", "/", ">", "<registry", ":", "property", "name", "=", "license", "/", "copyright", "value", "=", "Private", ".", "For", "testing", "purposes", ".", "/", ">" ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L839-L879
sethmlarson/trytravis
trytravis.py
_input_github_repo
def _input_github_repo(url=None): """ Grabs input from the user and saves it as their trytravis target repo """ if url is None: url = user_input('Input the URL of the GitHub repository ' 'to use as a `trytravis` repository: ') url = url.strip() http_match = _HTTPS_REGEX.match(url) ssh_match = _SSH_REGEX.match(url) if not http_match and not ssh_match: raise RuntimeError('That URL doesn\'t look like a valid ' 'GitHub URL. We expect something ' 'of the form: `https://github.com/[USERNAME]/' '[REPOSITORY]` or `ssh://git@github.com/' '[USERNAME]/[REPOSITORY]') # Make sure that the user actually made a new repository on GitHub. if http_match: _, name = http_match.groups() else: _, name = ssh_match.groups() if 'trytravis' not in name: raise RuntimeError('You must have `trytravis` in the name of your ' 'repository. This is a security feature to reduce ' 'chances of running git push -f on a repository ' 'you don\'t mean to.') # Make sure that the user actually wants to use this repository. accept = user_input('Remember that `trytravis` will make commits on your ' 'behalf to `%s`. Are you sure you wish to use this ' 'repository? Type `y` or `yes` to accept: ' % url) if accept.lower() not in ['y', 'yes']: raise RuntimeError('Operation aborted by user.') if not os.path.isdir(config_dir): os.makedirs(config_dir) with open(os.path.join(config_dir, 'repo'), 'w+') as f: f.truncate() f.write(url) print('Repository saved successfully.')
python
def _input_github_repo(url=None): """ Grabs input from the user and saves it as their trytravis target repo """ if url is None: url = user_input('Input the URL of the GitHub repository ' 'to use as a `trytravis` repository: ') url = url.strip() http_match = _HTTPS_REGEX.match(url) ssh_match = _SSH_REGEX.match(url) if not http_match and not ssh_match: raise RuntimeError('That URL doesn\'t look like a valid ' 'GitHub URL. We expect something ' 'of the form: `https://github.com/[USERNAME]/' '[REPOSITORY]` or `ssh://git@github.com/' '[USERNAME]/[REPOSITORY]') # Make sure that the user actually made a new repository on GitHub. if http_match: _, name = http_match.groups() else: _, name = ssh_match.groups() if 'trytravis' not in name: raise RuntimeError('You must have `trytravis` in the name of your ' 'repository. This is a security feature to reduce ' 'chances of running git push -f on a repository ' 'you don\'t mean to.') # Make sure that the user actually wants to use this repository. accept = user_input('Remember that `trytravis` will make commits on your ' 'behalf to `%s`. Are you sure you wish to use this ' 'repository? Type `y` or `yes` to accept: ' % url) if accept.lower() not in ['y', 'yes']: raise RuntimeError('Operation aborted by user.') if not os.path.isdir(config_dir): os.makedirs(config_dir) with open(os.path.join(config_dir, 'repo'), 'w+') as f: f.truncate() f.write(url) print('Repository saved successfully.')
[ "def", "_input_github_repo", "(", "url", "=", "None", ")", ":", "if", "url", "is", "None", ":", "url", "=", "user_input", "(", "'Input the URL of the GitHub repository '", "'to use as a `trytravis` repository: '", ")", "url", "=", "url", ".", "strip", "(", ")", "http_match", "=", "_HTTPS_REGEX", ".", "match", "(", "url", ")", "ssh_match", "=", "_SSH_REGEX", ".", "match", "(", "url", ")", "if", "not", "http_match", "and", "not", "ssh_match", ":", "raise", "RuntimeError", "(", "'That URL doesn\\'t look like a valid '", "'GitHub URL. We expect something '", "'of the form: `https://github.com/[USERNAME]/'", "'[REPOSITORY]` or `ssh://git@github.com/'", "'[USERNAME]/[REPOSITORY]'", ")", "# Make sure that the user actually made a new repository on GitHub.", "if", "http_match", ":", "_", ",", "name", "=", "http_match", ".", "groups", "(", ")", "else", ":", "_", ",", "name", "=", "ssh_match", ".", "groups", "(", ")", "if", "'trytravis'", "not", "in", "name", ":", "raise", "RuntimeError", "(", "'You must have `trytravis` in the name of your '", "'repository. This is a security feature to reduce '", "'chances of running git push -f on a repository '", "'you don\\'t mean to.'", ")", "# Make sure that the user actually wants to use this repository.", "accept", "=", "user_input", "(", "'Remember that `trytravis` will make commits on your '", "'behalf to `%s`. Are you sure you wish to use this '", "'repository? Type `y` or `yes` to accept: '", "%", "url", ")", "if", "accept", ".", "lower", "(", ")", "not", "in", "[", "'y'", ",", "'yes'", "]", ":", "raise", "RuntimeError", "(", "'Operation aborted by user.'", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "config_dir", ")", ":", "os", ".", "makedirs", "(", "config_dir", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "config_dir", ",", "'repo'", ")", ",", "'w+'", ")", "as", "f", ":", "f", ".", "truncate", "(", ")", "f", ".", "write", "(", "url", ")", "print", "(", "'Repository saved successfully.'", ")" ]
Grabs input from the user and saves it as their trytravis target repo
[ "Grabs", "input", "from", "the", "user", "and", "saves", "it", "as", "their", "trytravis", "target", "repo" ]
train
https://github.com/sethmlarson/trytravis/blob/d92ed708fe71d8db93a6df8077d23ee39ec0364e/trytravis.py#L86-L125
sethmlarson/trytravis
trytravis.py
_load_github_repo
def _load_github_repo(): """ Loads the GitHub repository from the users config. """ if 'TRAVIS' in os.environ: raise RuntimeError('Detected that we are running in Travis. ' 'Stopping to prevent infinite loops.') try: with open(os.path.join(config_dir, 'repo'), 'r') as f: return f.read() except (OSError, IOError): raise RuntimeError('Could not find your repository. ' 'Have you ran `trytravis --repo`?')
python
def _load_github_repo(): """ Loads the GitHub repository from the users config. """ if 'TRAVIS' in os.environ: raise RuntimeError('Detected that we are running in Travis. ' 'Stopping to prevent infinite loops.') try: with open(os.path.join(config_dir, 'repo'), 'r') as f: return f.read() except (OSError, IOError): raise RuntimeError('Could not find your repository. ' 'Have you ran `trytravis --repo`?')
[ "def", "_load_github_repo", "(", ")", ":", "if", "'TRAVIS'", "in", "os", ".", "environ", ":", "raise", "RuntimeError", "(", "'Detected that we are running in Travis. '", "'Stopping to prevent infinite loops.'", ")", "try", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "config_dir", ",", "'repo'", ")", ",", "'r'", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")", "except", "(", "OSError", ",", "IOError", ")", ":", "raise", "RuntimeError", "(", "'Could not find your repository. '", "'Have you ran `trytravis --repo`?'", ")" ]
Loads the GitHub repository from the users config.
[ "Loads", "the", "GitHub", "repository", "from", "the", "users", "config", "." ]
train
https://github.com/sethmlarson/trytravis/blob/d92ed708fe71d8db93a6df8077d23ee39ec0364e/trytravis.py#L128-L138
sethmlarson/trytravis
trytravis.py
_submit_changes_to_github_repo
def _submit_changes_to_github_repo(path, url): """ Temporarily commits local changes and submits them to the GitHub repository that the user has specified. Then reverts the changes to the git repository if a commit was necessary. """ try: repo = git.Repo(path) except Exception: raise RuntimeError('Couldn\'t locate a repository at `%s`.' % path) commited = False try: try: repo.delete_remote('trytravis') except Exception: pass print('Adding a temporary remote to ' '`%s`...' % url) remote = repo.create_remote('trytravis', url) print('Adding all local changes...') repo.git.add('--all') try: print('Committing local changes...') timestamp = datetime.datetime.now().isoformat() repo.git.commit(m='trytravis-' + timestamp) commited = True except git.exc.GitCommandError as e: if 'nothing to commit' in str(e): commited = False else: raise commit = repo.head.commit.hexsha committed_at = repo.head.commit.committed_datetime print('Pushing to `trytravis` remote...') remote.push(force=True) finally: if commited: print('Reverting to old state...') repo.git.reset('HEAD^') try: repo.delete_remote('trytravis') except Exception: pass return commit, committed_at
python
def _submit_changes_to_github_repo(path, url): """ Temporarily commits local changes and submits them to the GitHub repository that the user has specified. Then reverts the changes to the git repository if a commit was necessary. """ try: repo = git.Repo(path) except Exception: raise RuntimeError('Couldn\'t locate a repository at `%s`.' % path) commited = False try: try: repo.delete_remote('trytravis') except Exception: pass print('Adding a temporary remote to ' '`%s`...' % url) remote = repo.create_remote('trytravis', url) print('Adding all local changes...') repo.git.add('--all') try: print('Committing local changes...') timestamp = datetime.datetime.now().isoformat() repo.git.commit(m='trytravis-' + timestamp) commited = True except git.exc.GitCommandError as e: if 'nothing to commit' in str(e): commited = False else: raise commit = repo.head.commit.hexsha committed_at = repo.head.commit.committed_datetime print('Pushing to `trytravis` remote...') remote.push(force=True) finally: if commited: print('Reverting to old state...') repo.git.reset('HEAD^') try: repo.delete_remote('trytravis') except Exception: pass return commit, committed_at
[ "def", "_submit_changes_to_github_repo", "(", "path", ",", "url", ")", ":", "try", ":", "repo", "=", "git", ".", "Repo", "(", "path", ")", "except", "Exception", ":", "raise", "RuntimeError", "(", "'Couldn\\'t locate a repository at `%s`.'", "%", "path", ")", "commited", "=", "False", "try", ":", "try", ":", "repo", ".", "delete_remote", "(", "'trytravis'", ")", "except", "Exception", ":", "pass", "print", "(", "'Adding a temporary remote to '", "'`%s`...'", "%", "url", ")", "remote", "=", "repo", ".", "create_remote", "(", "'trytravis'", ",", "url", ")", "print", "(", "'Adding all local changes...'", ")", "repo", ".", "git", ".", "add", "(", "'--all'", ")", "try", ":", "print", "(", "'Committing local changes...'", ")", "timestamp", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "isoformat", "(", ")", "repo", ".", "git", ".", "commit", "(", "m", "=", "'trytravis-'", "+", "timestamp", ")", "commited", "=", "True", "except", "git", ".", "exc", ".", "GitCommandError", "as", "e", ":", "if", "'nothing to commit'", "in", "str", "(", "e", ")", ":", "commited", "=", "False", "else", ":", "raise", "commit", "=", "repo", ".", "head", ".", "commit", ".", "hexsha", "committed_at", "=", "repo", ".", "head", ".", "commit", ".", "committed_datetime", "print", "(", "'Pushing to `trytravis` remote...'", ")", "remote", ".", "push", "(", "force", "=", "True", ")", "finally", ":", "if", "commited", ":", "print", "(", "'Reverting to old state...'", ")", "repo", ".", "git", ".", "reset", "(", "'HEAD^'", ")", "try", ":", "repo", ".", "delete_remote", "(", "'trytravis'", ")", "except", "Exception", ":", "pass", "return", "commit", ",", "committed_at" ]
Temporarily commits local changes and submits them to the GitHub repository that the user has specified. Then reverts the changes to the git repository if a commit was necessary.
[ "Temporarily", "commits", "local", "changes", "and", "submits", "them", "to", "the", "GitHub", "repository", "that", "the", "user", "has", "specified", ".", "Then", "reverts", "the", "changes", "to", "the", "git", "repository", "if", "a", "commit", "was", "necessary", "." ]
train
https://github.com/sethmlarson/trytravis/blob/d92ed708fe71d8db93a6df8077d23ee39ec0364e/trytravis.py#L141-L185
sethmlarson/trytravis
trytravis.py
_wait_for_travis_build
def _wait_for_travis_build(url, commit, committed_at): """ Waits for a Travis build to appear with the given commit SHA """ print('Waiting for a Travis build to appear ' 'for `%s` after `%s`...' % (commit, committed_at)) import requests slug = _slug_from_url(url) start_time = time.time() build_id = None while time.time() - start_time < 60: with requests.get('https://api.travis-ci.org/repos/%s/builds' % slug, headers=_travis_headers()) as r: if not r.ok: raise RuntimeError('Could not reach the Travis API ' 'endpoint. Additional information: ' '%s' % str(r.content)) # Search through all commits and builds to find our build. commit_to_sha = {} json = r.json() for travis_commit in sorted(json['commits'], key=lambda x: x['committed_at']): travis_committed_at = datetime.datetime.strptime( travis_commit['committed_at'], '%Y-%m-%dT%H:%M:%SZ' ).replace(tzinfo=utc) if travis_committed_at < committed_at: continue commit_to_sha[travis_commit['id']] = travis_commit['sha'] for build in json['builds']: if (build['commit_id'] in commit_to_sha and commit_to_sha[build['commit_id']] == commit): build_id = build['id'] print('Travis build id: `%d`' % build_id) print('Travis build URL: `https://travis-ci.org/' '%s/builds/%d`' % (slug, build_id)) if build_id is not None: break time.sleep(3.0) else: raise RuntimeError('Timed out while waiting for a Travis build ' 'to start. Is Travis configured for `%s`?' % url) return build_id
python
def _wait_for_travis_build(url, commit, committed_at): """ Waits for a Travis build to appear with the given commit SHA """ print('Waiting for a Travis build to appear ' 'for `%s` after `%s`...' % (commit, committed_at)) import requests slug = _slug_from_url(url) start_time = time.time() build_id = None while time.time() - start_time < 60: with requests.get('https://api.travis-ci.org/repos/%s/builds' % slug, headers=_travis_headers()) as r: if not r.ok: raise RuntimeError('Could not reach the Travis API ' 'endpoint. Additional information: ' '%s' % str(r.content)) # Search through all commits and builds to find our build. commit_to_sha = {} json = r.json() for travis_commit in sorted(json['commits'], key=lambda x: x['committed_at']): travis_committed_at = datetime.datetime.strptime( travis_commit['committed_at'], '%Y-%m-%dT%H:%M:%SZ' ).replace(tzinfo=utc) if travis_committed_at < committed_at: continue commit_to_sha[travis_commit['id']] = travis_commit['sha'] for build in json['builds']: if (build['commit_id'] in commit_to_sha and commit_to_sha[build['commit_id']] == commit): build_id = build['id'] print('Travis build id: `%d`' % build_id) print('Travis build URL: `https://travis-ci.org/' '%s/builds/%d`' % (slug, build_id)) if build_id is not None: break time.sleep(3.0) else: raise RuntimeError('Timed out while waiting for a Travis build ' 'to start. Is Travis configured for `%s`?' % url) return build_id
[ "def", "_wait_for_travis_build", "(", "url", ",", "commit", ",", "committed_at", ")", ":", "print", "(", "'Waiting for a Travis build to appear '", "'for `%s` after `%s`...'", "%", "(", "commit", ",", "committed_at", ")", ")", "import", "requests", "slug", "=", "_slug_from_url", "(", "url", ")", "start_time", "=", "time", ".", "time", "(", ")", "build_id", "=", "None", "while", "time", ".", "time", "(", ")", "-", "start_time", "<", "60", ":", "with", "requests", ".", "get", "(", "'https://api.travis-ci.org/repos/%s/builds'", "%", "slug", ",", "headers", "=", "_travis_headers", "(", ")", ")", "as", "r", ":", "if", "not", "r", ".", "ok", ":", "raise", "RuntimeError", "(", "'Could not reach the Travis API '", "'endpoint. Additional information: '", "'%s'", "%", "str", "(", "r", ".", "content", ")", ")", "# Search through all commits and builds to find our build.", "commit_to_sha", "=", "{", "}", "json", "=", "r", ".", "json", "(", ")", "for", "travis_commit", "in", "sorted", "(", "json", "[", "'commits'", "]", ",", "key", "=", "lambda", "x", ":", "x", "[", "'committed_at'", "]", ")", ":", "travis_committed_at", "=", "datetime", ".", "datetime", ".", "strptime", "(", "travis_commit", "[", "'committed_at'", "]", ",", "'%Y-%m-%dT%H:%M:%SZ'", ")", ".", "replace", "(", "tzinfo", "=", "utc", ")", "if", "travis_committed_at", "<", "committed_at", ":", "continue", "commit_to_sha", "[", "travis_commit", "[", "'id'", "]", "]", "=", "travis_commit", "[", "'sha'", "]", "for", "build", "in", "json", "[", "'builds'", "]", ":", "if", "(", "build", "[", "'commit_id'", "]", "in", "commit_to_sha", "and", "commit_to_sha", "[", "build", "[", "'commit_id'", "]", "]", "==", "commit", ")", ":", "build_id", "=", "build", "[", "'id'", "]", "print", "(", "'Travis build id: `%d`'", "%", "build_id", ")", "print", "(", "'Travis build URL: `https://travis-ci.org/'", "'%s/builds/%d`'", "%", "(", "slug", ",", "build_id", ")", ")", "if", "build_id", "is", "not", "None", ":", "break", "time", ".", "sleep", "(", "3.0", ")", "else", ":", "raise", "RuntimeError", "(", "'Timed out while waiting for a Travis build '", "'to start. Is Travis configured for `%s`?'", "%", "url", ")", "return", "build_id" ]
Waits for a Travis build to appear with the given commit SHA
[ "Waits", "for", "a", "Travis", "build", "to", "appear", "with", "the", "given", "commit", "SHA" ]
train
https://github.com/sethmlarson/trytravis/blob/d92ed708fe71d8db93a6df8077d23ee39ec0364e/trytravis.py#L188-L234
sethmlarson/trytravis
trytravis.py
_watch_travis_build
def _watch_travis_build(build_id): """ Watches and progressively outputs information about a given Travis build """ import requests try: build_size = None # type: int running = True while running: with requests.get('https://api.travis-ci.org/builds/%d' % build_id, headers=_travis_headers()) as r: json = r.json() if build_size is not None: if build_size > 1: sys.stdout.write('\r\x1b[%dA' % build_size) else: sys.stdout.write('\r') build_size = len(json['jobs']) running = False current_number = 1 for job in json['jobs']: # pragma: no coverage color, state, is_running = _travis_job_state(job['state']) if is_running: running = True platform = job['config']['os'] if platform == 'osx': platform = ' osx ' env = job['config'].get('env', '') sudo = 's' if job['config'].get('sudo', True) else 'c' lang = job['config'].get('language', 'generic') padding = ' ' * (len(str(build_size)) - len(str(current_number))) number = str(current_number) + padding current_number += 1 job_display = '#' + ' '.join([number, state, platform, sudo, lang, env]) print(color + job_display + colorama.Style.RESET_ALL) time.sleep(3.0) except KeyboardInterrupt: pass
python
def _watch_travis_build(build_id): """ Watches and progressively outputs information about a given Travis build """ import requests try: build_size = None # type: int running = True while running: with requests.get('https://api.travis-ci.org/builds/%d' % build_id, headers=_travis_headers()) as r: json = r.json() if build_size is not None: if build_size > 1: sys.stdout.write('\r\x1b[%dA' % build_size) else: sys.stdout.write('\r') build_size = len(json['jobs']) running = False current_number = 1 for job in json['jobs']: # pragma: no coverage color, state, is_running = _travis_job_state(job['state']) if is_running: running = True platform = job['config']['os'] if platform == 'osx': platform = ' osx ' env = job['config'].get('env', '') sudo = 's' if job['config'].get('sudo', True) else 'c' lang = job['config'].get('language', 'generic') padding = ' ' * (len(str(build_size)) - len(str(current_number))) number = str(current_number) + padding current_number += 1 job_display = '#' + ' '.join([number, state, platform, sudo, lang, env]) print(color + job_display + colorama.Style.RESET_ALL) time.sleep(3.0) except KeyboardInterrupt: pass
[ "def", "_watch_travis_build", "(", "build_id", ")", ":", "import", "requests", "try", ":", "build_size", "=", "None", "# type: int", "running", "=", "True", "while", "running", ":", "with", "requests", ".", "get", "(", "'https://api.travis-ci.org/builds/%d'", "%", "build_id", ",", "headers", "=", "_travis_headers", "(", ")", ")", "as", "r", ":", "json", "=", "r", ".", "json", "(", ")", "if", "build_size", "is", "not", "None", ":", "if", "build_size", ">", "1", ":", "sys", ".", "stdout", ".", "write", "(", "'\\r\\x1b[%dA'", "%", "build_size", ")", "else", ":", "sys", ".", "stdout", ".", "write", "(", "'\\r'", ")", "build_size", "=", "len", "(", "json", "[", "'jobs'", "]", ")", "running", "=", "False", "current_number", "=", "1", "for", "job", "in", "json", "[", "'jobs'", "]", ":", "# pragma: no coverage", "color", ",", "state", ",", "is_running", "=", "_travis_job_state", "(", "job", "[", "'state'", "]", ")", "if", "is_running", ":", "running", "=", "True", "platform", "=", "job", "[", "'config'", "]", "[", "'os'", "]", "if", "platform", "==", "'osx'", ":", "platform", "=", "' osx '", "env", "=", "job", "[", "'config'", "]", ".", "get", "(", "'env'", ",", "''", ")", "sudo", "=", "'s'", "if", "job", "[", "'config'", "]", ".", "get", "(", "'sudo'", ",", "True", ")", "else", "'c'", "lang", "=", "job", "[", "'config'", "]", ".", "get", "(", "'language'", ",", "'generic'", ")", "padding", "=", "' '", "*", "(", "len", "(", "str", "(", "build_size", ")", ")", "-", "len", "(", "str", "(", "current_number", ")", ")", ")", "number", "=", "str", "(", "current_number", ")", "+", "padding", "current_number", "+=", "1", "job_display", "=", "'#'", "+", "' '", ".", "join", "(", "[", "number", ",", "state", ",", "platform", ",", "sudo", ",", "lang", ",", "env", "]", ")", "print", "(", "color", "+", "job_display", "+", "colorama", ".", "Style", ".", "RESET_ALL", ")", "time", ".", "sleep", "(", "3.0", ")", "except", "KeyboardInterrupt", ":", "pass" ]
Watches and progressively outputs information about a given Travis build
[ "Watches", "and", "progressively", "outputs", "information", "about", "a", "given", "Travis", "build" ]
train
https://github.com/sethmlarson/trytravis/blob/d92ed708fe71d8db93a6df8077d23ee39ec0364e/trytravis.py#L237-L286
sethmlarson/trytravis
trytravis.py
_travis_job_state
def _travis_job_state(state): """ Converts a Travis state into a state character, color, and whether it's still running or a stopped state. """ if state in [None, 'queued', 'created', 'received']: return colorama.Fore.YELLOW, '*', True elif state in ['started', 'running']: return colorama.Fore.LIGHTYELLOW_EX, '*', True elif state == 'passed': return colorama.Fore.LIGHTGREEN_EX, 'P', False elif state == 'failed': return colorama.Fore.LIGHTRED_EX, 'X', False elif state == 'errored': return colorama.Fore.LIGHTRED_EX, '!', False elif state == 'canceled': return colorama.Fore.LIGHTBLACK_EX, 'X', False else: raise RuntimeError('unknown state: %s' % str(state))
python
def _travis_job_state(state): """ Converts a Travis state into a state character, color, and whether it's still running or a stopped state. """ if state in [None, 'queued', 'created', 'received']: return colorama.Fore.YELLOW, '*', True elif state in ['started', 'running']: return colorama.Fore.LIGHTYELLOW_EX, '*', True elif state == 'passed': return colorama.Fore.LIGHTGREEN_EX, 'P', False elif state == 'failed': return colorama.Fore.LIGHTRED_EX, 'X', False elif state == 'errored': return colorama.Fore.LIGHTRED_EX, '!', False elif state == 'canceled': return colorama.Fore.LIGHTBLACK_EX, 'X', False else: raise RuntimeError('unknown state: %s' % str(state))
[ "def", "_travis_job_state", "(", "state", ")", ":", "if", "state", "in", "[", "None", ",", "'queued'", ",", "'created'", ",", "'received'", "]", ":", "return", "colorama", ".", "Fore", ".", "YELLOW", ",", "'*'", ",", "True", "elif", "state", "in", "[", "'started'", ",", "'running'", "]", ":", "return", "colorama", ".", "Fore", ".", "LIGHTYELLOW_EX", ",", "'*'", ",", "True", "elif", "state", "==", "'passed'", ":", "return", "colorama", ".", "Fore", ".", "LIGHTGREEN_EX", ",", "'P'", ",", "False", "elif", "state", "==", "'failed'", ":", "return", "colorama", ".", "Fore", ".", "LIGHTRED_EX", ",", "'X'", ",", "False", "elif", "state", "==", "'errored'", ":", "return", "colorama", ".", "Fore", ".", "LIGHTRED_EX", ",", "'!'", ",", "False", "elif", "state", "==", "'canceled'", ":", "return", "colorama", ".", "Fore", ".", "LIGHTBLACK_EX", ",", "'X'", ",", "False", "else", ":", "raise", "RuntimeError", "(", "'unknown state: %s'", "%", "str", "(", "state", ")", ")" ]
Converts a Travis state into a state character, color, and whether it's still running or a stopped state.
[ "Converts", "a", "Travis", "state", "into", "a", "state", "character", "color", "and", "whether", "it", "s", "still", "running", "or", "a", "stopped", "state", "." ]
train
https://github.com/sethmlarson/trytravis/blob/d92ed708fe71d8db93a6df8077d23ee39ec0364e/trytravis.py#L289-L305
sethmlarson/trytravis
trytravis.py
_slug_from_url
def _slug_from_url(url): """ Parses a project slug out of either an HTTPS or SSH URL. """ http_match = _HTTPS_REGEX.match(url) ssh_match = _SSH_REGEX.match(url) if not http_match and not ssh_match: raise RuntimeError('Could not parse the URL (`%s`) ' 'for your repository.' % url) if http_match: return '/'.join(http_match.groups()) else: return '/'.join(ssh_match.groups())
python
def _slug_from_url(url): """ Parses a project slug out of either an HTTPS or SSH URL. """ http_match = _HTTPS_REGEX.match(url) ssh_match = _SSH_REGEX.match(url) if not http_match and not ssh_match: raise RuntimeError('Could not parse the URL (`%s`) ' 'for your repository.' % url) if http_match: return '/'.join(http_match.groups()) else: return '/'.join(ssh_match.groups())
[ "def", "_slug_from_url", "(", "url", ")", ":", "http_match", "=", "_HTTPS_REGEX", ".", "match", "(", "url", ")", "ssh_match", "=", "_SSH_REGEX", ".", "match", "(", "url", ")", "if", "not", "http_match", "and", "not", "ssh_match", ":", "raise", "RuntimeError", "(", "'Could not parse the URL (`%s`) '", "'for your repository.'", "%", "url", ")", "if", "http_match", ":", "return", "'/'", ".", "join", "(", "http_match", ".", "groups", "(", ")", ")", "else", ":", "return", "'/'", ".", "join", "(", "ssh_match", ".", "groups", "(", ")", ")" ]
Parses a project slug out of either an HTTPS or SSH URL.
[ "Parses", "a", "project", "slug", "out", "of", "either", "an", "HTTPS", "or", "SSH", "URL", "." ]
train
https://github.com/sethmlarson/trytravis/blob/d92ed708fe71d8db93a6df8077d23ee39ec0364e/trytravis.py#L308-L318
sethmlarson/trytravis
trytravis.py
_version_string
def _version_string(): """ Gets the output for `trytravis --version`. """ platform_system = platform.system() if platform_system == 'Linux': os_name, os_version, _ = platform.dist() else: os_name = platform_system os_version = platform.version() python_version = platform.python_version() return 'trytravis %s (%s %s, python %s)' % (__version__, os_name.lower(), os_version, python_version)
python
def _version_string(): """ Gets the output for `trytravis --version`. """ platform_system = platform.system() if platform_system == 'Linux': os_name, os_version, _ = platform.dist() else: os_name = platform_system os_version = platform.version() python_version = platform.python_version() return 'trytravis %s (%s %s, python %s)' % (__version__, os_name.lower(), os_version, python_version)
[ "def", "_version_string", "(", ")", ":", "platform_system", "=", "platform", ".", "system", "(", ")", "if", "platform_system", "==", "'Linux'", ":", "os_name", ",", "os_version", ",", "_", "=", "platform", ".", "dist", "(", ")", "else", ":", "os_name", "=", "platform_system", "os_version", "=", "platform", ".", "version", "(", ")", "python_version", "=", "platform", ".", "python_version", "(", ")", "return", "'trytravis %s (%s %s, python %s)'", "%", "(", "__version__", ",", "os_name", ".", "lower", "(", ")", ",", "os_version", ",", "python_version", ")" ]
Gets the output for `trytravis --version`.
[ "Gets", "the", "output", "for", "trytravis", "--", "version", "." ]
train
https://github.com/sethmlarson/trytravis/blob/d92ed708fe71d8db93a6df8077d23ee39ec0364e/trytravis.py#L321-L333
sethmlarson/trytravis
trytravis.py
_main
def _main(argv): """ Function that acts just like main() except doesn't catch exceptions. """ repo_input_argv = len(argv) == 2 and argv[0] in ['--repo', '-r', '-R'] # We only support a single argv parameter. if len(argv) > 1 and not repo_input_argv: _main(['--help']) # Parse the command and do the right thing. if len(argv) == 1 or repo_input_argv: arg = argv[0] # Help/usage if arg in ['-h', '--help', '-H']: print(_USAGE) # Version elif arg in ['-v', '--version', '-V']: print(_version_string()) # Token elif arg in ['-r', '--repo', '-R']: if len(argv) == 2: url = argv[1] else: url = None _input_github_repo(url) # No wait elif arg in ['--no-wait', '-nw']: url = _load_github_repo() commit, committed = _submit_changes_to_github_repo(os.getcwd(), url) build_id = _wait_for_travis_build(url, commit, committed) # Help string else: _main(['--help']) # No arguments means we're trying to submit to Travis. elif len(argv) == 0: url = _load_github_repo() commit, committed = _submit_changes_to_github_repo(os.getcwd(), url) build_id = _wait_for_travis_build(url, commit, committed) _watch_travis_build(build_id)
python
def _main(argv): """ Function that acts just like main() except doesn't catch exceptions. """ repo_input_argv = len(argv) == 2 and argv[0] in ['--repo', '-r', '-R'] # We only support a single argv parameter. if len(argv) > 1 and not repo_input_argv: _main(['--help']) # Parse the command and do the right thing. if len(argv) == 1 or repo_input_argv: arg = argv[0] # Help/usage if arg in ['-h', '--help', '-H']: print(_USAGE) # Version elif arg in ['-v', '--version', '-V']: print(_version_string()) # Token elif arg in ['-r', '--repo', '-R']: if len(argv) == 2: url = argv[1] else: url = None _input_github_repo(url) # No wait elif arg in ['--no-wait', '-nw']: url = _load_github_repo() commit, committed = _submit_changes_to_github_repo(os.getcwd(), url) build_id = _wait_for_travis_build(url, commit, committed) # Help string else: _main(['--help']) # No arguments means we're trying to submit to Travis. elif len(argv) == 0: url = _load_github_repo() commit, committed = _submit_changes_to_github_repo(os.getcwd(), url) build_id = _wait_for_travis_build(url, commit, committed) _watch_travis_build(build_id)
[ "def", "_main", "(", "argv", ")", ":", "repo_input_argv", "=", "len", "(", "argv", ")", "==", "2", "and", "argv", "[", "0", "]", "in", "[", "'--repo'", ",", "'-r'", ",", "'-R'", "]", "# We only support a single argv parameter.", "if", "len", "(", "argv", ")", ">", "1", "and", "not", "repo_input_argv", ":", "_main", "(", "[", "'--help'", "]", ")", "# Parse the command and do the right thing.", "if", "len", "(", "argv", ")", "==", "1", "or", "repo_input_argv", ":", "arg", "=", "argv", "[", "0", "]", "# Help/usage", "if", "arg", "in", "[", "'-h'", ",", "'--help'", ",", "'-H'", "]", ":", "print", "(", "_USAGE", ")", "# Version", "elif", "arg", "in", "[", "'-v'", ",", "'--version'", ",", "'-V'", "]", ":", "print", "(", "_version_string", "(", ")", ")", "# Token", "elif", "arg", "in", "[", "'-r'", ",", "'--repo'", ",", "'-R'", "]", ":", "if", "len", "(", "argv", ")", "==", "2", ":", "url", "=", "argv", "[", "1", "]", "else", ":", "url", "=", "None", "_input_github_repo", "(", "url", ")", "# No wait", "elif", "arg", "in", "[", "'--no-wait'", ",", "'-nw'", "]", ":", "url", "=", "_load_github_repo", "(", ")", "commit", ",", "committed", "=", "_submit_changes_to_github_repo", "(", "os", ".", "getcwd", "(", ")", ",", "url", ")", "build_id", "=", "_wait_for_travis_build", "(", "url", ",", "commit", ",", "committed", ")", "# Help string", "else", ":", "_main", "(", "[", "'--help'", "]", ")", "# No arguments means we're trying to submit to Travis.", "elif", "len", "(", "argv", ")", "==", "0", ":", "url", "=", "_load_github_repo", "(", ")", "commit", ",", "committed", "=", "_submit_changes_to_github_repo", "(", "os", ".", "getcwd", "(", ")", ",", "url", ")", "build_id", "=", "_wait_for_travis_build", "(", "url", ",", "commit", ",", "committed", ")", "_watch_travis_build", "(", "build_id", ")" ]
Function that acts just like main() except doesn't catch exceptions.
[ "Function", "that", "acts", "just", "like", "main", "()", "except", "doesn", "t", "catch", "exceptions", "." ]
train
https://github.com/sethmlarson/trytravis/blob/d92ed708fe71d8db93a6df8077d23ee39ec0364e/trytravis.py#L343-L388
sethmlarson/trytravis
trytravis.py
main
def main(argv=None): # pragma: no coverage """ Main entry point when the user runs the `trytravis` command. """ try: colorama.init() if argv is None: argv = sys.argv[1:] _main(argv) except RuntimeError as e: print(colorama.Fore.RED + 'ERROR: ' + str(e) + colorama.Style.RESET_ALL) sys.exit(1) else: sys.exit(0)
python
def main(argv=None): # pragma: no coverage """ Main entry point when the user runs the `trytravis` command. """ try: colorama.init() if argv is None: argv = sys.argv[1:] _main(argv) except RuntimeError as e: print(colorama.Fore.RED + 'ERROR: ' + str(e) + colorama.Style.RESET_ALL) sys.exit(1) else: sys.exit(0)
[ "def", "main", "(", "argv", "=", "None", ")", ":", "# pragma: no coverage", "try", ":", "colorama", ".", "init", "(", ")", "if", "argv", "is", "None", ":", "argv", "=", "sys", ".", "argv", "[", "1", ":", "]", "_main", "(", "argv", ")", "except", "RuntimeError", "as", "e", ":", "print", "(", "colorama", ".", "Fore", ".", "RED", "+", "'ERROR: '", "+", "str", "(", "e", ")", "+", "colorama", ".", "Style", ".", "RESET_ALL", ")", "sys", ".", "exit", "(", "1", ")", "else", ":", "sys", ".", "exit", "(", "0", ")" ]
Main entry point when the user runs the `trytravis` command.
[ "Main", "entry", "point", "when", "the", "user", "runs", "the", "trytravis", "command", "." ]
train
https://github.com/sethmlarson/trytravis/blob/d92ed708fe71d8db93a6df8077d23ee39ec0364e/trytravis.py#L391-L403
cga-harvard/Hypermap-Registry
hypermap/search/views.py
csw_global_dispatch
def csw_global_dispatch(request, url=None, catalog_id=None): """pycsw wrapper""" if request.user.is_authenticated(): # turn on CSW-T settings.REGISTRY_PYCSW['manager']['transactions'] = 'true' env = request.META.copy() # TODO: remove this workaround # HH should be able to pass env['wsgi.input'] without hanging # details at https://github.com/cga-harvard/HHypermap/issues/94 if request.method == 'POST': from StringIO import StringIO env['wsgi.input'] = StringIO(request.body) env.update({'local.app_root': os.path.dirname(__file__), 'REQUEST_URI': request.build_absolute_uri()}) # if this is a catalog based CSW, then update settings if url is not None: settings.REGISTRY_PYCSW['server']['url'] = url if catalog_id is not None: settings.REGISTRY_PYCSW['repository']['filter'] = 'catalog_id = %d' % catalog_id csw = server.Csw(settings.REGISTRY_PYCSW, env) content = csw.dispatch_wsgi() # pycsw 2.0 has an API break: # pycsw < 2.0: content = xml_response # pycsw >= 2.0: content = [http_status_code, content] # deal with the API break if isinstance(content, list): # pycsw 2.0+ content = content[1] response = HttpResponse(content, content_type=csw.contenttype) # TODO: Fix before 1.0 release. CORS should not be enabled blindly like this. response['Access-Control-Allow-Origin'] = '*' return response
python
def csw_global_dispatch(request, url=None, catalog_id=None): """pycsw wrapper""" if request.user.is_authenticated(): # turn on CSW-T settings.REGISTRY_PYCSW['manager']['transactions'] = 'true' env = request.META.copy() # TODO: remove this workaround # HH should be able to pass env['wsgi.input'] without hanging # details at https://github.com/cga-harvard/HHypermap/issues/94 if request.method == 'POST': from StringIO import StringIO env['wsgi.input'] = StringIO(request.body) env.update({'local.app_root': os.path.dirname(__file__), 'REQUEST_URI': request.build_absolute_uri()}) # if this is a catalog based CSW, then update settings if url is not None: settings.REGISTRY_PYCSW['server']['url'] = url if catalog_id is not None: settings.REGISTRY_PYCSW['repository']['filter'] = 'catalog_id = %d' % catalog_id csw = server.Csw(settings.REGISTRY_PYCSW, env) content = csw.dispatch_wsgi() # pycsw 2.0 has an API break: # pycsw < 2.0: content = xml_response # pycsw >= 2.0: content = [http_status_code, content] # deal with the API break if isinstance(content, list): # pycsw 2.0+ content = content[1] response = HttpResponse(content, content_type=csw.contenttype) # TODO: Fix before 1.0 release. CORS should not be enabled blindly like this. response['Access-Control-Allow-Origin'] = '*' return response
[ "def", "csw_global_dispatch", "(", "request", ",", "url", "=", "None", ",", "catalog_id", "=", "None", ")", ":", "if", "request", ".", "user", ".", "is_authenticated", "(", ")", ":", "# turn on CSW-T", "settings", ".", "REGISTRY_PYCSW", "[", "'manager'", "]", "[", "'transactions'", "]", "=", "'true'", "env", "=", "request", ".", "META", ".", "copy", "(", ")", "# TODO: remove this workaround", "# HH should be able to pass env['wsgi.input'] without hanging", "# details at https://github.com/cga-harvard/HHypermap/issues/94", "if", "request", ".", "method", "==", "'POST'", ":", "from", "StringIO", "import", "StringIO", "env", "[", "'wsgi.input'", "]", "=", "StringIO", "(", "request", ".", "body", ")", "env", ".", "update", "(", "{", "'local.app_root'", ":", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'REQUEST_URI'", ":", "request", ".", "build_absolute_uri", "(", ")", "}", ")", "# if this is a catalog based CSW, then update settings", "if", "url", "is", "not", "None", ":", "settings", ".", "REGISTRY_PYCSW", "[", "'server'", "]", "[", "'url'", "]", "=", "url", "if", "catalog_id", "is", "not", "None", ":", "settings", ".", "REGISTRY_PYCSW", "[", "'repository'", "]", "[", "'filter'", "]", "=", "'catalog_id = %d'", "%", "catalog_id", "csw", "=", "server", ".", "Csw", "(", "settings", ".", "REGISTRY_PYCSW", ",", "env", ")", "content", "=", "csw", ".", "dispatch_wsgi", "(", ")", "# pycsw 2.0 has an API break:", "# pycsw < 2.0: content = xml_response", "# pycsw >= 2.0: content = [http_status_code, content]", "# deal with the API break", "if", "isinstance", "(", "content", ",", "list", ")", ":", "# pycsw 2.0+", "content", "=", "content", "[", "1", "]", "response", "=", "HttpResponse", "(", "content", ",", "content_type", "=", "csw", ".", "contenttype", ")", "# TODO: Fix before 1.0 release. CORS should not be enabled blindly like this.", "response", "[", "'Access-Control-Allow-Origin'", "]", "=", "'*'", "return", "response" ]
pycsw wrapper
[ "pycsw", "wrapper" ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/search/views.py#L19-L59
cga-harvard/Hypermap-Registry
hypermap/search/views.py
csw_global_dispatch_by_catalog
def csw_global_dispatch_by_catalog(request, catalog_slug): """pycsw wrapper for catalogs""" catalog = get_object_or_404(Catalog, slug=catalog_slug) if catalog: # define catalog specific settings url = settings.SITE_URL.rstrip('/') + request.path.rstrip('/') return csw_global_dispatch(request, url=url, catalog_id=catalog.id)
python
def csw_global_dispatch_by_catalog(request, catalog_slug): """pycsw wrapper for catalogs""" catalog = get_object_or_404(Catalog, slug=catalog_slug) if catalog: # define catalog specific settings url = settings.SITE_URL.rstrip('/') + request.path.rstrip('/') return csw_global_dispatch(request, url=url, catalog_id=catalog.id)
[ "def", "csw_global_dispatch_by_catalog", "(", "request", ",", "catalog_slug", ")", ":", "catalog", "=", "get_object_or_404", "(", "Catalog", ",", "slug", "=", "catalog_slug", ")", "if", "catalog", ":", "# define catalog specific settings", "url", "=", "settings", ".", "SITE_URL", ".", "rstrip", "(", "'/'", ")", "+", "request", ".", "path", ".", "rstrip", "(", "'/'", ")", "return", "csw_global_dispatch", "(", "request", ",", "url", "=", "url", ",", "catalog_id", "=", "catalog", ".", "id", ")" ]
pycsw wrapper for catalogs
[ "pycsw", "wrapper", "for", "catalogs" ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/search/views.py#L63-L70
cga-harvard/Hypermap-Registry
hypermap/search/views.py
opensearch_dispatch
def opensearch_dispatch(request): """OpenSearch wrapper""" ctx = { 'shortname': settings.REGISTRY_PYCSW['metadata:main']['identification_title'], 'description': settings.REGISTRY_PYCSW['metadata:main']['identification_abstract'], 'developer': settings.REGISTRY_PYCSW['metadata:main']['contact_name'], 'contact': settings.REGISTRY_PYCSW['metadata:main']['contact_email'], 'attribution': settings.REGISTRY_PYCSW['metadata:main']['provider_name'], 'tags': settings.REGISTRY_PYCSW['metadata:main']['identification_keywords'].replace(',', ' '), 'url': settings.SITE_URL.rstrip('/') } return render_to_response('search/opensearch_description.xml', ctx, content_type='application/opensearchdescription+xml')
python
def opensearch_dispatch(request): """OpenSearch wrapper""" ctx = { 'shortname': settings.REGISTRY_PYCSW['metadata:main']['identification_title'], 'description': settings.REGISTRY_PYCSW['metadata:main']['identification_abstract'], 'developer': settings.REGISTRY_PYCSW['metadata:main']['contact_name'], 'contact': settings.REGISTRY_PYCSW['metadata:main']['contact_email'], 'attribution': settings.REGISTRY_PYCSW['metadata:main']['provider_name'], 'tags': settings.REGISTRY_PYCSW['metadata:main']['identification_keywords'].replace(',', ' '), 'url': settings.SITE_URL.rstrip('/') } return render_to_response('search/opensearch_description.xml', ctx, content_type='application/opensearchdescription+xml')
[ "def", "opensearch_dispatch", "(", "request", ")", ":", "ctx", "=", "{", "'shortname'", ":", "settings", ".", "REGISTRY_PYCSW", "[", "'metadata:main'", "]", "[", "'identification_title'", "]", ",", "'description'", ":", "settings", ".", "REGISTRY_PYCSW", "[", "'metadata:main'", "]", "[", "'identification_abstract'", "]", ",", "'developer'", ":", "settings", ".", "REGISTRY_PYCSW", "[", "'metadata:main'", "]", "[", "'contact_name'", "]", ",", "'contact'", ":", "settings", ".", "REGISTRY_PYCSW", "[", "'metadata:main'", "]", "[", "'contact_email'", "]", ",", "'attribution'", ":", "settings", ".", "REGISTRY_PYCSW", "[", "'metadata:main'", "]", "[", "'provider_name'", "]", ",", "'tags'", ":", "settings", ".", "REGISTRY_PYCSW", "[", "'metadata:main'", "]", "[", "'identification_keywords'", "]", ".", "replace", "(", "','", ",", "' '", ")", ",", "'url'", ":", "settings", ".", "SITE_URL", ".", "rstrip", "(", "'/'", ")", "}", "return", "render_to_response", "(", "'search/opensearch_description.xml'", ",", "ctx", ",", "content_type", "=", "'application/opensearchdescription+xml'", ")" ]
OpenSearch wrapper
[ "OpenSearch", "wrapper" ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/search/views.py#L73-L87
cga-harvard/Hypermap-Registry
hypermap/aggregator/elasticsearch_client.py
ESHypermap.good_coords
def good_coords(coords): """ passed a string array """ if (len(coords) != 4): return False for coord in coords[0:3]: try: num = float(coord) if (math.isnan(num)): return False if (math.isinf(num)): return False except ValueError: return False return True
python
def good_coords(coords): """ passed a string array """ if (len(coords) != 4): return False for coord in coords[0:3]: try: num = float(coord) if (math.isnan(num)): return False if (math.isinf(num)): return False except ValueError: return False return True
[ "def", "good_coords", "(", "coords", ")", ":", "if", "(", "len", "(", "coords", ")", "!=", "4", ")", ":", "return", "False", "for", "coord", "in", "coords", "[", "0", ":", "3", "]", ":", "try", ":", "num", "=", "float", "(", "coord", ")", "if", "(", "math", ".", "isnan", "(", "num", ")", ")", ":", "return", "False", "if", "(", "math", ".", "isinf", "(", "num", ")", ")", ":", "return", "False", "except", "ValueError", ":", "return", "False", "return", "True" ]
passed a string array
[ "passed", "a", "string", "array" ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/elasticsearch_client.py#L36-L49
cga-harvard/Hypermap-Registry
hypermap/aggregator/elasticsearch_client.py
ESHypermap.clear_es
def clear_es(): """Clear all indexes in the es core""" # TODO: should receive a catalog slug. ESHypermap.es.indices.delete(ESHypermap.index_name, ignore=[400, 404]) LOGGER.debug('Elasticsearch: Index cleared')
python
def clear_es(): """Clear all indexes in the es core""" # TODO: should receive a catalog slug. ESHypermap.es.indices.delete(ESHypermap.index_name, ignore=[400, 404]) LOGGER.debug('Elasticsearch: Index cleared')
[ "def", "clear_es", "(", ")", ":", "# TODO: should receive a catalog slug.", "ESHypermap", ".", "es", ".", "indices", ".", "delete", "(", "ESHypermap", ".", "index_name", ",", "ignore", "=", "[", "400", ",", "404", "]", ")", "LOGGER", ".", "debug", "(", "'Elasticsearch: Index cleared'", ")" ]
Clear all indexes in the es core
[ "Clear", "all", "indexes", "in", "the", "es", "core" ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/elasticsearch_client.py#L221-L225
cga-harvard/Hypermap-Registry
hypermap/aggregator/elasticsearch_client.py
ESHypermap.create_indices
def create_indices(catalog_slug): """Create ES core indices """ # TODO: enable auto_create_index in the ES nodes to make this implicit. # https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#index-creation # http://support.searchly.com/customer/en/portal/questions/ # 16312889-is-automatic-index-creation-disabled-?new=16312889 mapping = { "mappings": { "layer": { "properties": { "layer_geoshape": { "type": "geo_shape", "tree": "quadtree", "precision": REGISTRY_MAPPING_PRECISION } } } } } ESHypermap.es.indices.create(catalog_slug, ignore=[400, 404], body=mapping)
python
def create_indices(catalog_slug): """Create ES core indices """ # TODO: enable auto_create_index in the ES nodes to make this implicit. # https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#index-creation # http://support.searchly.com/customer/en/portal/questions/ # 16312889-is-automatic-index-creation-disabled-?new=16312889 mapping = { "mappings": { "layer": { "properties": { "layer_geoshape": { "type": "geo_shape", "tree": "quadtree", "precision": REGISTRY_MAPPING_PRECISION } } } } } ESHypermap.es.indices.create(catalog_slug, ignore=[400, 404], body=mapping)
[ "def", "create_indices", "(", "catalog_slug", ")", ":", "# TODO: enable auto_create_index in the ES nodes to make this implicit.", "# https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#index-creation", "# http://support.searchly.com/customer/en/portal/questions/", "# 16312889-is-automatic-index-creation-disabled-?new=16312889", "mapping", "=", "{", "\"mappings\"", ":", "{", "\"layer\"", ":", "{", "\"properties\"", ":", "{", "\"layer_geoshape\"", ":", "{", "\"type\"", ":", "\"geo_shape\"", ",", "\"tree\"", ":", "\"quadtree\"", ",", "\"precision\"", ":", "REGISTRY_MAPPING_PRECISION", "}", "}", "}", "}", "}", "ESHypermap", ".", "es", ".", "indices", ".", "create", "(", "catalog_slug", ",", "ignore", "=", "[", "400", ",", "404", "]", ",", "body", "=", "mapping", ")" ]
Create ES core indices
[ "Create", "ES", "core", "indices" ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/elasticsearch_client.py#L228-L247
cga-harvard/Hypermap-Registry
pavement.py
kill_process
def kill_process(procname, scriptname): """kill WSGI processes that may be running in development""" # from http://stackoverflow.com/a/2940878 import signal import subprocess p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE) out, err = p.communicate() for line in out.decode().splitlines(): if procname in line and scriptname in line: pid = int(line.split()[1]) info('Stopping %s %s %d' % (procname, scriptname, pid)) os.kill(pid, signal.SIGKILL)
python
def kill_process(procname, scriptname): """kill WSGI processes that may be running in development""" # from http://stackoverflow.com/a/2940878 import signal import subprocess p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE) out, err = p.communicate() for line in out.decode().splitlines(): if procname in line and scriptname in line: pid = int(line.split()[1]) info('Stopping %s %s %d' % (procname, scriptname, pid)) os.kill(pid, signal.SIGKILL)
[ "def", "kill_process", "(", "procname", ",", "scriptname", ")", ":", "# from http://stackoverflow.com/a/2940878", "import", "signal", "import", "subprocess", "p", "=", "subprocess", ".", "Popen", "(", "[", "'ps'", ",", "'aux'", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "out", ",", "err", "=", "p", ".", "communicate", "(", ")", "for", "line", "in", "out", ".", "decode", "(", ")", ".", "splitlines", "(", ")", ":", "if", "procname", "in", "line", "and", "scriptname", "in", "line", ":", "pid", "=", "int", "(", "line", ".", "split", "(", ")", "[", "1", "]", ")", "info", "(", "'Stopping %s %s %d'", "%", "(", "procname", ",", "scriptname", ",", "pid", ")", ")", "os", ".", "kill", "(", "pid", ",", "signal", ".", "SIGKILL", ")" ]
kill WSGI processes that may be running in development
[ "kill", "WSGI", "processes", "that", "may", "be", "running", "in", "development" ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/pavement.py#L50-L64
cga-harvard/Hypermap-Registry
hypermap/aggregator/populate_database.py
populate_initial_services
def populate_initial_services(): """ Populate a fresh installed Hypermap instances with basic services. """ services_list = ( ( 'Harvard WorldMap', 'Harvard WorldMap open source web geospatial platform', 'Hypermap:WorldMap', 'http://worldmap.harvard.edu' ), ( 'NYPL MapWarper', 'The New York Public Library (NYPL) MapWarper web site', 'Hypermap:WARPER', 'http://maps.nypl.org/warper/maps' ), ( 'Map Warper', 'The MapWarper web site developed, hosted and maintained by Tim Waters', 'Hypermap:WARPER', 'http://mapwarper.net/maps' ), ( 'WorldMap Warp', 'The MapWarper instance part of the Harvard WorldMap project', 'Hypermap:WARPER', 'http://warp.worldmap.harvard.edu/maps' ), ( 'WFP GeoNode', 'World Food Programme GeoNode', 'OGC:WMS', 'http://geonode.wfp.org/geoserver/ows?' ), ( 'NASA EARTHDATA', 'NASA EARTHDATA, powered by EOSDIS', 'OGC:WMTS', 'http://map1.vis.earthdata.nasa.gov/wmts-geo/1.0.0/WMTSCapabilities.xml' ), ) esri_endpoint = 'https://gis.ngdc.noaa.gov/arcgis/rest/services' LOGGER.debug('*** Importing esri endpoint: %s' % esri_endpoint) create_services_from_endpoint(esri_endpoint) for service in services_list: LOGGER.debug('*** Importing %s' % service[0]) service = Service( title=service[0], abstract=service[1], type=service[2], url=service[3] ) service.save()
python
def populate_initial_services(): """ Populate a fresh installed Hypermap instances with basic services. """ services_list = ( ( 'Harvard WorldMap', 'Harvard WorldMap open source web geospatial platform', 'Hypermap:WorldMap', 'http://worldmap.harvard.edu' ), ( 'NYPL MapWarper', 'The New York Public Library (NYPL) MapWarper web site', 'Hypermap:WARPER', 'http://maps.nypl.org/warper/maps' ), ( 'Map Warper', 'The MapWarper web site developed, hosted and maintained by Tim Waters', 'Hypermap:WARPER', 'http://mapwarper.net/maps' ), ( 'WorldMap Warp', 'The MapWarper instance part of the Harvard WorldMap project', 'Hypermap:WARPER', 'http://warp.worldmap.harvard.edu/maps' ), ( 'WFP GeoNode', 'World Food Programme GeoNode', 'OGC:WMS', 'http://geonode.wfp.org/geoserver/ows?' ), ( 'NASA EARTHDATA', 'NASA EARTHDATA, powered by EOSDIS', 'OGC:WMTS', 'http://map1.vis.earthdata.nasa.gov/wmts-geo/1.0.0/WMTSCapabilities.xml' ), ) esri_endpoint = 'https://gis.ngdc.noaa.gov/arcgis/rest/services' LOGGER.debug('*** Importing esri endpoint: %s' % esri_endpoint) create_services_from_endpoint(esri_endpoint) for service in services_list: LOGGER.debug('*** Importing %s' % service[0]) service = Service( title=service[0], abstract=service[1], type=service[2], url=service[3] ) service.save()
[ "def", "populate_initial_services", "(", ")", ":", "services_list", "=", "(", "(", "'Harvard WorldMap'", ",", "'Harvard WorldMap open source web geospatial platform'", ",", "'Hypermap:WorldMap'", ",", "'http://worldmap.harvard.edu'", ")", ",", "(", "'NYPL MapWarper'", ",", "'The New York Public Library (NYPL) MapWarper web site'", ",", "'Hypermap:WARPER'", ",", "'http://maps.nypl.org/warper/maps'", ")", ",", "(", "'Map Warper'", ",", "'The MapWarper web site developed, hosted and maintained by Tim Waters'", ",", "'Hypermap:WARPER'", ",", "'http://mapwarper.net/maps'", ")", ",", "(", "'WorldMap Warp'", ",", "'The MapWarper instance part of the Harvard WorldMap project'", ",", "'Hypermap:WARPER'", ",", "'http://warp.worldmap.harvard.edu/maps'", ")", ",", "(", "'WFP GeoNode'", ",", "'World Food Programme GeoNode'", ",", "'OGC:WMS'", ",", "'http://geonode.wfp.org/geoserver/ows?'", ")", ",", "(", "'NASA EARTHDATA'", ",", "'NASA EARTHDATA, powered by EOSDIS'", ",", "'OGC:WMTS'", ",", "'http://map1.vis.earthdata.nasa.gov/wmts-geo/1.0.0/WMTSCapabilities.xml'", ")", ",", ")", "esri_endpoint", "=", "'https://gis.ngdc.noaa.gov/arcgis/rest/services'", "LOGGER", ".", "debug", "(", "'*** Importing esri endpoint: %s'", "%", "esri_endpoint", ")", "create_services_from_endpoint", "(", "esri_endpoint", ")", "for", "service", "in", "services_list", ":", "LOGGER", ".", "debug", "(", "'*** Importing %s'", "%", "service", "[", "0", "]", ")", "service", "=", "Service", "(", "title", "=", "service", "[", "0", "]", ",", "abstract", "=", "service", "[", "1", "]", ",", "type", "=", "service", "[", "2", "]", ",", "url", "=", "service", "[", "3", "]", ")", "service", ".", "save", "(", ")" ]
Populate a fresh installed Hypermap instances with basic services.
[ "Populate", "a", "fresh", "installed", "Hypermap", "instances", "with", "basic", "services", "." ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/populate_database.py#L10-L65
cga-harvard/Hypermap-Registry
hypermap/search_api/views.py
elasticsearch
def elasticsearch(serializer, catalog): """ https://www.elastic.co/guide/en/elasticsearch/reference/current/_the_search_api.html :param serializer: :return: """ search_engine_endpoint = "{0}/{1}/_search".format(SEARCH_URL, catalog.slug) q_text = serializer.validated_data.get("q_text") q_time = serializer.validated_data.get("q_time") q_geo = serializer.validated_data.get("q_geo") q_user = serializer.validated_data.get("q_user") d_docs_sort = serializer.validated_data.get("d_docs_sort") d_docs_limit = int(serializer.validated_data.get("d_docs_limit")) d_docs_page = int(serializer.validated_data.get("d_docs_page")) a_text_limit = serializer.validated_data.get("a_text_limit") a_user_limit = serializer.validated_data.get("a_user_limit") a_time_gap = serializer.validated_data.get("a_time_gap") a_time_limit = serializer.validated_data.get("a_time_limit") original_response = serializer.validated_data.get("original_response") # Dict for search on Elastic engine must_array = [] filter_dic = {} aggs_dic = {} # get ES version to make the query builder to be backward compatible with # diffs versions. # TODO: move this to a proper place. maybe ES client?. # TODO: cache it to avoid overwhelm ES with this call. # TODO: ask for ES_VERSION when building queries with an elegant way. ES_VERSION = 2 response = requests.get(SEARCH_URL) if response.ok: # looks ugly but will work on normal ES response for "/". ES_VERSION = int(response.json()["version"]["number"][0]) # String searching if q_text: # Wrapping query string into a query filter. if ES_VERSION >= 2: query_string = { "query_string": { "query": q_text } } else: query_string = { "query": { "query_string": { "query": q_text } } } # add string searching must_array.append(query_string) if q_time: # check if q_time exists q_time = str(q_time) # check string shortener = q_time[1:-1] shortener = shortener.split(" TO ") gte = shortener[0] # greater than lte = shortener[1] # less than layer_date = {} if gte == '*' and lte != '*': layer_date["lte"] = lte range_time = { "layer_date": layer_date } range_time = {"range": range_time} must_array.append(range_time) if gte != '*' and lte == '*': layer_date["gte"] = gte range_time = { "layer_date": layer_date } range_time = {"range": range_time} must_array.append(range_time) if gte != '*' and lte != '*': layer_date["gte"] = gte layer_date["lte"] = lte range_time = { "layer_date": layer_date } range_time = {"range": range_time} must_array.append(range_time) # geo_shape searching if q_geo: q_geo = str(q_geo) q_geo = q_geo[1:-1] Ymin, Xmin = q_geo.split(" TO ")[0].split(",") Ymax, Xmax = q_geo.split(" TO ")[1].split(",") geoshape_query = { "layer_geoshape": { "shape": { "type": "envelope", "coordinates": [[Xmin, Ymax], [Xmax, Ymin]] }, "relation": "intersects" } } filter_dic["geo_shape"] = geoshape_query if q_user: # Using q_user user_searching = { "match": { "layer_originator": q_user } } must_array.append(user_searching) if ES_VERSION >= 2: dic_query = { "query": { "bool": { "must": must_array, "filter": filter_dic } } } else: dic_query = { "query": { "filtered": { "filter": { "bool": { "must": must_array, "should": filter_dic } } } } } # Page if d_docs_limit: dic_query["size"] = d_docs_limit if d_docs_page: dic_query["from"] = d_docs_limit * d_docs_page - d_docs_limit if d_docs_sort == "score": dic_query["sort"] = {"_score": {"order": "desc"}} if d_docs_sort == "time": dic_query["sort"] = {"layer_date": {"order": "desc"}} if d_docs_sort == "distance": if q_geo: # distance_x = float(((float(Xmin) - float(Xmax)) ** 2.0) ** (0.5)) # distance_y = float(((float(Ymin) - float(Ymax)) ** 2.0) ** (0.5)) msg = ("Sorting by distance is different on ElasticSearch than Solr, because this" "feature on elastic is unavailable to geo_shape type.ElasticSearch docs said:" "Due to the complex input structure and index representation of shapes," "it is not currently possible to sort shapes or retrieve their fields directly." "The geo_shape value is only retrievable through the _source field." " Link: https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-shape.html") return {"error": {"msg": msg}} else: msg = "q_qeo MUST BE NO ZERO if you wanna sort by distance" return {"error": {"msg": msg}} if a_text_limit: # getting most frequently occurring users. text_limit = { "terms": { "field": "abstract", "size": a_text_limit } } aggs_dic['popular_text'] = text_limit if a_user_limit: # getting most frequently occurring users. users_limit = { "terms": { "field": "layer_originator", "size": a_user_limit } } aggs_dic['popular_users'] = users_limit if a_time_limit: # TODO: Work in progress, a_time_limit is incomplete. # TODO: when times are * it does not work. also a a_time_gap is not required. if q_time: if not a_time_gap: # getting time limit histogram. time_limt = { "date_range": { "field": "layer_date", "format": "yyyy-MM-dd'T'HH:mm:ssZ", "ranges": [ {"from": gte, "to": lte} ] } } aggs_dic['range'] = time_limt else: pass else: msg = "If you want to use a_time_limit feature, q_time MUST BE initialized" return {"error": {"msg": msg}} if a_time_gap: interval = gap_to_elastic(a_time_gap) time_gap = { "date_histogram": { "field": "layer_date", "format": "yyyy-MM-dd'T'HH:mm:ssZ", "interval": interval } } aggs_dic['articles_over_time'] = time_gap # adding aggreations on body query if aggs_dic: dic_query['aggs'] = aggs_dic try: res = requests.post(search_engine_endpoint, data=json.dumps(dic_query)) except Exception as e: return 500, {"error": {"msg": str(e)}} es_response = res.json() if original_response: return es_response data = {} if 'error' in es_response: data["error"] = es_response["error"] return 400, data data["request_url"] = res.url data["request_body"] = json.dumps(dic_query) data["a.matchDocs"] = es_response['hits']['total'] docs = [] # aggreations response: facets searching if 'aggregations' in es_response: aggs = es_response['aggregations'] # getting the most frequently occurring users. if 'popular_users' in aggs: a_users_list_array = [] users_resp = aggs["popular_users"]["buckets"] for item in users_resp: temp = {} temp['count'] = item['doc_count'] temp['value'] = item['key'] a_users_list_array.append(temp) data["a.user"] = a_users_list_array # getting most frequently ocurring words if 'popular_text' in aggs: a_text_list_array = [] text_resp = es_response["aggregations"]["popular_text"]["buckets"] for item in text_resp: temp = {} temp['count'] = item['doc_count'] temp['value'] = item['key'] a_text_list_array.append(temp) data["a.text"] = a_text_list_array if 'articles_over_time' in aggs: gap_count = [] a_gap = {} gap_resp = aggs["articles_over_time"]["buckets"] start = "*" end = "*" if len(gap_resp) > 0: start = gap_resp[0]['key_as_string'].replace('+0000', 'z') end = gap_resp[-1]['key_as_string'].replace('+0000', 'z') a_gap['start'] = start a_gap['end'] = end a_gap['gap'] = a_time_gap for item in gap_resp: temp = {} if item['doc_count'] != 0: temp['count'] = item['doc_count'] temp['value'] = item['key_as_string'].replace('+0000', 'z') gap_count.append(temp) a_gap['counts'] = gap_count data['a.time'] = a_gap if 'range' in aggs: # Work in progress # Pay attention in the following code lines: Make it better!!!! time_count = [] time_resp = aggs["range"]["buckets"] a_time = {} a_time['start'] = gte a_time['end'] = lte a_time['gap'] = None for item in time_resp: temp = {} if item['doc_count'] != 0: temp['count'] = item['doc_count'] temp['value'] = item['key'].replace('+0000', 'z') time_count.append(temp) a_time['counts'] = time_count data['a.time'] = a_time if not int(d_docs_limit) == 0: for item in es_response['hits']['hits']: # data temp = item['_source']['abstract'] temp = temp.replace(u'\u201c', "\"") temp = temp.replace(u'\u201d', "\"") temp = temp.replace('"', "\"") temp = temp.replace("'", "\'") temp = temp.replace(u'\u2019', "\'") item['_source']['abstract'] = temp docs.append(item['_source']) data["d.docs"] = docs return data
python
def elasticsearch(serializer, catalog): """ https://www.elastic.co/guide/en/elasticsearch/reference/current/_the_search_api.html :param serializer: :return: """ search_engine_endpoint = "{0}/{1}/_search".format(SEARCH_URL, catalog.slug) q_text = serializer.validated_data.get("q_text") q_time = serializer.validated_data.get("q_time") q_geo = serializer.validated_data.get("q_geo") q_user = serializer.validated_data.get("q_user") d_docs_sort = serializer.validated_data.get("d_docs_sort") d_docs_limit = int(serializer.validated_data.get("d_docs_limit")) d_docs_page = int(serializer.validated_data.get("d_docs_page")) a_text_limit = serializer.validated_data.get("a_text_limit") a_user_limit = serializer.validated_data.get("a_user_limit") a_time_gap = serializer.validated_data.get("a_time_gap") a_time_limit = serializer.validated_data.get("a_time_limit") original_response = serializer.validated_data.get("original_response") # Dict for search on Elastic engine must_array = [] filter_dic = {} aggs_dic = {} # get ES version to make the query builder to be backward compatible with # diffs versions. # TODO: move this to a proper place. maybe ES client?. # TODO: cache it to avoid overwhelm ES with this call. # TODO: ask for ES_VERSION when building queries with an elegant way. ES_VERSION = 2 response = requests.get(SEARCH_URL) if response.ok: # looks ugly but will work on normal ES response for "/". ES_VERSION = int(response.json()["version"]["number"][0]) # String searching if q_text: # Wrapping query string into a query filter. if ES_VERSION >= 2: query_string = { "query_string": { "query": q_text } } else: query_string = { "query": { "query_string": { "query": q_text } } } # add string searching must_array.append(query_string) if q_time: # check if q_time exists q_time = str(q_time) # check string shortener = q_time[1:-1] shortener = shortener.split(" TO ") gte = shortener[0] # greater than lte = shortener[1] # less than layer_date = {} if gte == '*' and lte != '*': layer_date["lte"] = lte range_time = { "layer_date": layer_date } range_time = {"range": range_time} must_array.append(range_time) if gte != '*' and lte == '*': layer_date["gte"] = gte range_time = { "layer_date": layer_date } range_time = {"range": range_time} must_array.append(range_time) if gte != '*' and lte != '*': layer_date["gte"] = gte layer_date["lte"] = lte range_time = { "layer_date": layer_date } range_time = {"range": range_time} must_array.append(range_time) # geo_shape searching if q_geo: q_geo = str(q_geo) q_geo = q_geo[1:-1] Ymin, Xmin = q_geo.split(" TO ")[0].split(",") Ymax, Xmax = q_geo.split(" TO ")[1].split(",") geoshape_query = { "layer_geoshape": { "shape": { "type": "envelope", "coordinates": [[Xmin, Ymax], [Xmax, Ymin]] }, "relation": "intersects" } } filter_dic["geo_shape"] = geoshape_query if q_user: # Using q_user user_searching = { "match": { "layer_originator": q_user } } must_array.append(user_searching) if ES_VERSION >= 2: dic_query = { "query": { "bool": { "must": must_array, "filter": filter_dic } } } else: dic_query = { "query": { "filtered": { "filter": { "bool": { "must": must_array, "should": filter_dic } } } } } # Page if d_docs_limit: dic_query["size"] = d_docs_limit if d_docs_page: dic_query["from"] = d_docs_limit * d_docs_page - d_docs_limit if d_docs_sort == "score": dic_query["sort"] = {"_score": {"order": "desc"}} if d_docs_sort == "time": dic_query["sort"] = {"layer_date": {"order": "desc"}} if d_docs_sort == "distance": if q_geo: # distance_x = float(((float(Xmin) - float(Xmax)) ** 2.0) ** (0.5)) # distance_y = float(((float(Ymin) - float(Ymax)) ** 2.0) ** (0.5)) msg = ("Sorting by distance is different on ElasticSearch than Solr, because this" "feature on elastic is unavailable to geo_shape type.ElasticSearch docs said:" "Due to the complex input structure and index representation of shapes," "it is not currently possible to sort shapes or retrieve their fields directly." "The geo_shape value is only retrievable through the _source field." " Link: https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-shape.html") return {"error": {"msg": msg}} else: msg = "q_qeo MUST BE NO ZERO if you wanna sort by distance" return {"error": {"msg": msg}} if a_text_limit: # getting most frequently occurring users. text_limit = { "terms": { "field": "abstract", "size": a_text_limit } } aggs_dic['popular_text'] = text_limit if a_user_limit: # getting most frequently occurring users. users_limit = { "terms": { "field": "layer_originator", "size": a_user_limit } } aggs_dic['popular_users'] = users_limit if a_time_limit: # TODO: Work in progress, a_time_limit is incomplete. # TODO: when times are * it does not work. also a a_time_gap is not required. if q_time: if not a_time_gap: # getting time limit histogram. time_limt = { "date_range": { "field": "layer_date", "format": "yyyy-MM-dd'T'HH:mm:ssZ", "ranges": [ {"from": gte, "to": lte} ] } } aggs_dic['range'] = time_limt else: pass else: msg = "If you want to use a_time_limit feature, q_time MUST BE initialized" return {"error": {"msg": msg}} if a_time_gap: interval = gap_to_elastic(a_time_gap) time_gap = { "date_histogram": { "field": "layer_date", "format": "yyyy-MM-dd'T'HH:mm:ssZ", "interval": interval } } aggs_dic['articles_over_time'] = time_gap # adding aggreations on body query if aggs_dic: dic_query['aggs'] = aggs_dic try: res = requests.post(search_engine_endpoint, data=json.dumps(dic_query)) except Exception as e: return 500, {"error": {"msg": str(e)}} es_response = res.json() if original_response: return es_response data = {} if 'error' in es_response: data["error"] = es_response["error"] return 400, data data["request_url"] = res.url data["request_body"] = json.dumps(dic_query) data["a.matchDocs"] = es_response['hits']['total'] docs = [] # aggreations response: facets searching if 'aggregations' in es_response: aggs = es_response['aggregations'] # getting the most frequently occurring users. if 'popular_users' in aggs: a_users_list_array = [] users_resp = aggs["popular_users"]["buckets"] for item in users_resp: temp = {} temp['count'] = item['doc_count'] temp['value'] = item['key'] a_users_list_array.append(temp) data["a.user"] = a_users_list_array # getting most frequently ocurring words if 'popular_text' in aggs: a_text_list_array = [] text_resp = es_response["aggregations"]["popular_text"]["buckets"] for item in text_resp: temp = {} temp['count'] = item['doc_count'] temp['value'] = item['key'] a_text_list_array.append(temp) data["a.text"] = a_text_list_array if 'articles_over_time' in aggs: gap_count = [] a_gap = {} gap_resp = aggs["articles_over_time"]["buckets"] start = "*" end = "*" if len(gap_resp) > 0: start = gap_resp[0]['key_as_string'].replace('+0000', 'z') end = gap_resp[-1]['key_as_string'].replace('+0000', 'z') a_gap['start'] = start a_gap['end'] = end a_gap['gap'] = a_time_gap for item in gap_resp: temp = {} if item['doc_count'] != 0: temp['count'] = item['doc_count'] temp['value'] = item['key_as_string'].replace('+0000', 'z') gap_count.append(temp) a_gap['counts'] = gap_count data['a.time'] = a_gap if 'range' in aggs: # Work in progress # Pay attention in the following code lines: Make it better!!!! time_count = [] time_resp = aggs["range"]["buckets"] a_time = {} a_time['start'] = gte a_time['end'] = lte a_time['gap'] = None for item in time_resp: temp = {} if item['doc_count'] != 0: temp['count'] = item['doc_count'] temp['value'] = item['key'].replace('+0000', 'z') time_count.append(temp) a_time['counts'] = time_count data['a.time'] = a_time if not int(d_docs_limit) == 0: for item in es_response['hits']['hits']: # data temp = item['_source']['abstract'] temp = temp.replace(u'\u201c', "\"") temp = temp.replace(u'\u201d', "\"") temp = temp.replace('"', "\"") temp = temp.replace("'", "\'") temp = temp.replace(u'\u2019', "\'") item['_source']['abstract'] = temp docs.append(item['_source']) data["d.docs"] = docs return data
[ "def", "elasticsearch", "(", "serializer", ",", "catalog", ")", ":", "search_engine_endpoint", "=", "\"{0}/{1}/_search\"", ".", "format", "(", "SEARCH_URL", ",", "catalog", ".", "slug", ")", "q_text", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"q_text\"", ")", "q_time", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"q_time\"", ")", "q_geo", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"q_geo\"", ")", "q_user", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"q_user\"", ")", "d_docs_sort", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"d_docs_sort\"", ")", "d_docs_limit", "=", "int", "(", "serializer", ".", "validated_data", ".", "get", "(", "\"d_docs_limit\"", ")", ")", "d_docs_page", "=", "int", "(", "serializer", ".", "validated_data", ".", "get", "(", "\"d_docs_page\"", ")", ")", "a_text_limit", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"a_text_limit\"", ")", "a_user_limit", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"a_user_limit\"", ")", "a_time_gap", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"a_time_gap\"", ")", "a_time_limit", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"a_time_limit\"", ")", "original_response", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"original_response\"", ")", "# Dict for search on Elastic engine", "must_array", "=", "[", "]", "filter_dic", "=", "{", "}", "aggs_dic", "=", "{", "}", "# get ES version to make the query builder to be backward compatible with", "# diffs versions.", "# TODO: move this to a proper place. maybe ES client?.", "# TODO: cache it to avoid overwhelm ES with this call.", "# TODO: ask for ES_VERSION when building queries with an elegant way.", "ES_VERSION", "=", "2", "response", "=", "requests", ".", "get", "(", "SEARCH_URL", ")", "if", "response", ".", "ok", ":", "# looks ugly but will work on normal ES response for \"/\".", "ES_VERSION", "=", "int", "(", "response", ".", "json", "(", ")", "[", "\"version\"", "]", "[", "\"number\"", "]", "[", "0", "]", ")", "# String searching", "if", "q_text", ":", "# Wrapping query string into a query filter.", "if", "ES_VERSION", ">=", "2", ":", "query_string", "=", "{", "\"query_string\"", ":", "{", "\"query\"", ":", "q_text", "}", "}", "else", ":", "query_string", "=", "{", "\"query\"", ":", "{", "\"query_string\"", ":", "{", "\"query\"", ":", "q_text", "}", "}", "}", "# add string searching", "must_array", ".", "append", "(", "query_string", ")", "if", "q_time", ":", "# check if q_time exists", "q_time", "=", "str", "(", "q_time", ")", "# check string", "shortener", "=", "q_time", "[", "1", ":", "-", "1", "]", "shortener", "=", "shortener", ".", "split", "(", "\" TO \"", ")", "gte", "=", "shortener", "[", "0", "]", "# greater than", "lte", "=", "shortener", "[", "1", "]", "# less than", "layer_date", "=", "{", "}", "if", "gte", "==", "'*'", "and", "lte", "!=", "'*'", ":", "layer_date", "[", "\"lte\"", "]", "=", "lte", "range_time", "=", "{", "\"layer_date\"", ":", "layer_date", "}", "range_time", "=", "{", "\"range\"", ":", "range_time", "}", "must_array", ".", "append", "(", "range_time", ")", "if", "gte", "!=", "'*'", "and", "lte", "==", "'*'", ":", "layer_date", "[", "\"gte\"", "]", "=", "gte", "range_time", "=", "{", "\"layer_date\"", ":", "layer_date", "}", "range_time", "=", "{", "\"range\"", ":", "range_time", "}", "must_array", ".", "append", "(", "range_time", ")", "if", "gte", "!=", "'*'", "and", "lte", "!=", "'*'", ":", "layer_date", "[", "\"gte\"", "]", "=", "gte", "layer_date", "[", "\"lte\"", "]", "=", "lte", "range_time", "=", "{", "\"layer_date\"", ":", "layer_date", "}", "range_time", "=", "{", "\"range\"", ":", "range_time", "}", "must_array", ".", "append", "(", "range_time", ")", "# geo_shape searching", "if", "q_geo", ":", "q_geo", "=", "str", "(", "q_geo", ")", "q_geo", "=", "q_geo", "[", "1", ":", "-", "1", "]", "Ymin", ",", "Xmin", "=", "q_geo", ".", "split", "(", "\" TO \"", ")", "[", "0", "]", ".", "split", "(", "\",\"", ")", "Ymax", ",", "Xmax", "=", "q_geo", ".", "split", "(", "\" TO \"", ")", "[", "1", "]", ".", "split", "(", "\",\"", ")", "geoshape_query", "=", "{", "\"layer_geoshape\"", ":", "{", "\"shape\"", ":", "{", "\"type\"", ":", "\"envelope\"", ",", "\"coordinates\"", ":", "[", "[", "Xmin", ",", "Ymax", "]", ",", "[", "Xmax", ",", "Ymin", "]", "]", "}", ",", "\"relation\"", ":", "\"intersects\"", "}", "}", "filter_dic", "[", "\"geo_shape\"", "]", "=", "geoshape_query", "if", "q_user", ":", "# Using q_user", "user_searching", "=", "{", "\"match\"", ":", "{", "\"layer_originator\"", ":", "q_user", "}", "}", "must_array", ".", "append", "(", "user_searching", ")", "if", "ES_VERSION", ">=", "2", ":", "dic_query", "=", "{", "\"query\"", ":", "{", "\"bool\"", ":", "{", "\"must\"", ":", "must_array", ",", "\"filter\"", ":", "filter_dic", "}", "}", "}", "else", ":", "dic_query", "=", "{", "\"query\"", ":", "{", "\"filtered\"", ":", "{", "\"filter\"", ":", "{", "\"bool\"", ":", "{", "\"must\"", ":", "must_array", ",", "\"should\"", ":", "filter_dic", "}", "}", "}", "}", "}", "# Page", "if", "d_docs_limit", ":", "dic_query", "[", "\"size\"", "]", "=", "d_docs_limit", "if", "d_docs_page", ":", "dic_query", "[", "\"from\"", "]", "=", "d_docs_limit", "*", "d_docs_page", "-", "d_docs_limit", "if", "d_docs_sort", "==", "\"score\"", ":", "dic_query", "[", "\"sort\"", "]", "=", "{", "\"_score\"", ":", "{", "\"order\"", ":", "\"desc\"", "}", "}", "if", "d_docs_sort", "==", "\"time\"", ":", "dic_query", "[", "\"sort\"", "]", "=", "{", "\"layer_date\"", ":", "{", "\"order\"", ":", "\"desc\"", "}", "}", "if", "d_docs_sort", "==", "\"distance\"", ":", "if", "q_geo", ":", "# distance_x = float(((float(Xmin) - float(Xmax)) ** 2.0) ** (0.5))", "# distance_y = float(((float(Ymin) - float(Ymax)) ** 2.0) ** (0.5))", "msg", "=", "(", "\"Sorting by distance is different on ElasticSearch than Solr, because this\"", "\"feature on elastic is unavailable to geo_shape type.ElasticSearch docs said:\"", "\"Due to the complex input structure and index representation of shapes,\"", "\"it is not currently possible to sort shapes or retrieve their fields directly.\"", "\"The geo_shape value is only retrievable through the _source field.\"", "\" Link: https://www.elastic.co/guide/en/elasticsearch/reference/current/geo-shape.html\"", ")", "return", "{", "\"error\"", ":", "{", "\"msg\"", ":", "msg", "}", "}", "else", ":", "msg", "=", "\"q_qeo MUST BE NO ZERO if you wanna sort by distance\"", "return", "{", "\"error\"", ":", "{", "\"msg\"", ":", "msg", "}", "}", "if", "a_text_limit", ":", "# getting most frequently occurring users.", "text_limit", "=", "{", "\"terms\"", ":", "{", "\"field\"", ":", "\"abstract\"", ",", "\"size\"", ":", "a_text_limit", "}", "}", "aggs_dic", "[", "'popular_text'", "]", "=", "text_limit", "if", "a_user_limit", ":", "# getting most frequently occurring users.", "users_limit", "=", "{", "\"terms\"", ":", "{", "\"field\"", ":", "\"layer_originator\"", ",", "\"size\"", ":", "a_user_limit", "}", "}", "aggs_dic", "[", "'popular_users'", "]", "=", "users_limit", "if", "a_time_limit", ":", "# TODO: Work in progress, a_time_limit is incomplete.", "# TODO: when times are * it does not work. also a a_time_gap is not required.", "if", "q_time", ":", "if", "not", "a_time_gap", ":", "# getting time limit histogram.", "time_limt", "=", "{", "\"date_range\"", ":", "{", "\"field\"", ":", "\"layer_date\"", ",", "\"format\"", ":", "\"yyyy-MM-dd'T'HH:mm:ssZ\"", ",", "\"ranges\"", ":", "[", "{", "\"from\"", ":", "gte", ",", "\"to\"", ":", "lte", "}", "]", "}", "}", "aggs_dic", "[", "'range'", "]", "=", "time_limt", "else", ":", "pass", "else", ":", "msg", "=", "\"If you want to use a_time_limit feature, q_time MUST BE initialized\"", "return", "{", "\"error\"", ":", "{", "\"msg\"", ":", "msg", "}", "}", "if", "a_time_gap", ":", "interval", "=", "gap_to_elastic", "(", "a_time_gap", ")", "time_gap", "=", "{", "\"date_histogram\"", ":", "{", "\"field\"", ":", "\"layer_date\"", ",", "\"format\"", ":", "\"yyyy-MM-dd'T'HH:mm:ssZ\"", ",", "\"interval\"", ":", "interval", "}", "}", "aggs_dic", "[", "'articles_over_time'", "]", "=", "time_gap", "# adding aggreations on body query", "if", "aggs_dic", ":", "dic_query", "[", "'aggs'", "]", "=", "aggs_dic", "try", ":", "res", "=", "requests", ".", "post", "(", "search_engine_endpoint", ",", "data", "=", "json", ".", "dumps", "(", "dic_query", ")", ")", "except", "Exception", "as", "e", ":", "return", "500", ",", "{", "\"error\"", ":", "{", "\"msg\"", ":", "str", "(", "e", ")", "}", "}", "es_response", "=", "res", ".", "json", "(", ")", "if", "original_response", ":", "return", "es_response", "data", "=", "{", "}", "if", "'error'", "in", "es_response", ":", "data", "[", "\"error\"", "]", "=", "es_response", "[", "\"error\"", "]", "return", "400", ",", "data", "data", "[", "\"request_url\"", "]", "=", "res", ".", "url", "data", "[", "\"request_body\"", "]", "=", "json", ".", "dumps", "(", "dic_query", ")", "data", "[", "\"a.matchDocs\"", "]", "=", "es_response", "[", "'hits'", "]", "[", "'total'", "]", "docs", "=", "[", "]", "# aggreations response: facets searching", "if", "'aggregations'", "in", "es_response", ":", "aggs", "=", "es_response", "[", "'aggregations'", "]", "# getting the most frequently occurring users.", "if", "'popular_users'", "in", "aggs", ":", "a_users_list_array", "=", "[", "]", "users_resp", "=", "aggs", "[", "\"popular_users\"", "]", "[", "\"buckets\"", "]", "for", "item", "in", "users_resp", ":", "temp", "=", "{", "}", "temp", "[", "'count'", "]", "=", "item", "[", "'doc_count'", "]", "temp", "[", "'value'", "]", "=", "item", "[", "'key'", "]", "a_users_list_array", ".", "append", "(", "temp", ")", "data", "[", "\"a.user\"", "]", "=", "a_users_list_array", "# getting most frequently ocurring words", "if", "'popular_text'", "in", "aggs", ":", "a_text_list_array", "=", "[", "]", "text_resp", "=", "es_response", "[", "\"aggregations\"", "]", "[", "\"popular_text\"", "]", "[", "\"buckets\"", "]", "for", "item", "in", "text_resp", ":", "temp", "=", "{", "}", "temp", "[", "'count'", "]", "=", "item", "[", "'doc_count'", "]", "temp", "[", "'value'", "]", "=", "item", "[", "'key'", "]", "a_text_list_array", ".", "append", "(", "temp", ")", "data", "[", "\"a.text\"", "]", "=", "a_text_list_array", "if", "'articles_over_time'", "in", "aggs", ":", "gap_count", "=", "[", "]", "a_gap", "=", "{", "}", "gap_resp", "=", "aggs", "[", "\"articles_over_time\"", "]", "[", "\"buckets\"", "]", "start", "=", "\"*\"", "end", "=", "\"*\"", "if", "len", "(", "gap_resp", ")", ">", "0", ":", "start", "=", "gap_resp", "[", "0", "]", "[", "'key_as_string'", "]", ".", "replace", "(", "'+0000'", ",", "'z'", ")", "end", "=", "gap_resp", "[", "-", "1", "]", "[", "'key_as_string'", "]", ".", "replace", "(", "'+0000'", ",", "'z'", ")", "a_gap", "[", "'start'", "]", "=", "start", "a_gap", "[", "'end'", "]", "=", "end", "a_gap", "[", "'gap'", "]", "=", "a_time_gap", "for", "item", "in", "gap_resp", ":", "temp", "=", "{", "}", "if", "item", "[", "'doc_count'", "]", "!=", "0", ":", "temp", "[", "'count'", "]", "=", "item", "[", "'doc_count'", "]", "temp", "[", "'value'", "]", "=", "item", "[", "'key_as_string'", "]", ".", "replace", "(", "'+0000'", ",", "'z'", ")", "gap_count", ".", "append", "(", "temp", ")", "a_gap", "[", "'counts'", "]", "=", "gap_count", "data", "[", "'a.time'", "]", "=", "a_gap", "if", "'range'", "in", "aggs", ":", "# Work in progress", "# Pay attention in the following code lines: Make it better!!!!", "time_count", "=", "[", "]", "time_resp", "=", "aggs", "[", "\"range\"", "]", "[", "\"buckets\"", "]", "a_time", "=", "{", "}", "a_time", "[", "'start'", "]", "=", "gte", "a_time", "[", "'end'", "]", "=", "lte", "a_time", "[", "'gap'", "]", "=", "None", "for", "item", "in", "time_resp", ":", "temp", "=", "{", "}", "if", "item", "[", "'doc_count'", "]", "!=", "0", ":", "temp", "[", "'count'", "]", "=", "item", "[", "'doc_count'", "]", "temp", "[", "'value'", "]", "=", "item", "[", "'key'", "]", ".", "replace", "(", "'+0000'", ",", "'z'", ")", "time_count", ".", "append", "(", "temp", ")", "a_time", "[", "'counts'", "]", "=", "time_count", "data", "[", "'a.time'", "]", "=", "a_time", "if", "not", "int", "(", "d_docs_limit", ")", "==", "0", ":", "for", "item", "in", "es_response", "[", "'hits'", "]", "[", "'hits'", "]", ":", "# data", "temp", "=", "item", "[", "'_source'", "]", "[", "'abstract'", "]", "temp", "=", "temp", ".", "replace", "(", "u'\\u201c'", ",", "\"\\\"\"", ")", "temp", "=", "temp", ".", "replace", "(", "u'\\u201d'", ",", "\"\\\"\"", ")", "temp", "=", "temp", ".", "replace", "(", "'\"'", ",", "\"\\\"\"", ")", "temp", "=", "temp", ".", "replace", "(", "\"'\"", ",", "\"\\'\"", ")", "temp", "=", "temp", ".", "replace", "(", "u'\\u2019'", ",", "\"\\'\"", ")", "item", "[", "'_source'", "]", "[", "'abstract'", "]", "=", "temp", "docs", ".", "append", "(", "item", "[", "'_source'", "]", ")", "data", "[", "\"d.docs\"", "]", "=", "docs", "return", "data" ]
https://www.elastic.co/guide/en/elasticsearch/reference/current/_the_search_api.html :param serializer: :return:
[ "https", ":", "//", "www", ".", "elastic", ".", "co", "/", "guide", "/", "en", "/", "elasticsearch", "/", "reference", "/", "current", "/", "_the_search_api", ".", "html", ":", "param", "serializer", ":", ":", "return", ":" ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/search_api/views.py#L32-L361
cga-harvard/Hypermap-Registry
hypermap/search_api/views.py
solr
def solr(serializer): """ Search on solr endpoint :param serializer: :return: """ search_engine_endpoint = serializer.validated_data.get("search_engine_endpoint") q_time = serializer.validated_data.get("q_time") q_geo = serializer.validated_data.get("q_geo") q_text = serializer.validated_data.get("q_text") q_user = serializer.validated_data.get("q_user") d_docs_limit = serializer.validated_data.get("d_docs_limit") d_docs_page = serializer.validated_data.get("d_docs_page") d_docs_sort = serializer.validated_data.get("d_docs_sort") a_time_limit = serializer.validated_data.get("a_time_limit") a_time_gap = serializer.validated_data.get("a_time_gap") a_time_filter = serializer.validated_data.get("a_time_filter") a_hm_limit = serializer.validated_data.get("a_hm_limit") a_hm_gridlevel = serializer.validated_data.get("a_hm_gridlevel") a_hm_filter = serializer.validated_data.get("a_hm_filter") a_text_limit = serializer.validated_data.get("a_text_limit") a_user_limit = serializer.validated_data.get("a_user_limit") original_response = serializer.validated_data.get("original_response") # query params to be sent via restful solr params = { "q": "*:*", "indent": "on", "wt": "json", "rows": d_docs_limit, "facet": "off", "facet.field": [], "debug": "timing" } if q_text: params["q"] = q_text if d_docs_limit >= 0: d_docs_page -= 1 d_docs_page = d_docs_limit * d_docs_page params["start"] = d_docs_page # query params for filters filters = [] if q_time: # TODO: when user sends incomplete dates like 2000, its completed: 2000-(TODAY-MONTH)-(TODAY-DAY)T00:00:00Z # TODO: "Invalid Date in Date Math String:'[* TO 2000-12-05T00:00:00Z]'" # Kotlin like: "{!field f=layer_date tag=layer_date}[* TO 2000-12-05T00:00:00Z]" # then do it simple: filters.append("{0}:{1}".format(TIME_FILTER_FIELD, q_time)) if q_geo: filters.append("{0}:{1}".format(GEO_FILTER_FIELD, q_geo)) if q_user: filters.append("{{!field f={0} tag={0}}}{1}".format(USER_FIELD, q_user)) if filters: params["fq"] = filters # query params for ordering if d_docs_sort == 'score' and q_text: params["sort"] = 'score desc' elif d_docs_sort == 'time': params["sort"] = '{} desc'.format(TIME_SORT_FIELD) elif d_docs_sort == 'distance': rectangle = parse_geo_box(q_geo) params["sort"] = 'geodist() asc' params["sfield"] = GEO_SORT_FIELD params["pt"] = '{0},{1}'.format(rectangle.centroid.x, rectangle.centroid.y) # query params for facets if a_time_limit > 0: params["facet"] = 'on' time_filter = a_time_filter or q_time or None # traduce * to actual min/max dates. time_filter = asterisk_to_min_max(TIME_FILTER_FIELD, time_filter, search_engine_endpoint) # create the range faceting params. facet_parms = request_time_facet(TIME_FILTER_FIELD, time_filter, a_time_gap, a_time_limit) params.update(facet_parms) if a_hm_limit > 0: params["facet"] = 'on' hm_facet_params = request_heatmap_facet(GEO_HEATMAP_FIELD, a_hm_filter, a_hm_gridlevel, a_hm_limit) params.update(hm_facet_params) if a_text_limit > 0: params["facet"] = 'on' params["facet.field"].append(TEXT_FIELD) params["f.{}.facet.limit".format(TEXT_FIELD)] = a_text_limit if a_user_limit > 0: params["facet"] = 'on' params["facet.field"].append("{{! ex={0}}}{0}".format(USER_FIELD)) params["f.{}.facet.limit".format(USER_FIELD)] = a_user_limit try: res = requests.get( search_engine_endpoint, params=params ) except Exception as e: return 500, {"error": {"msg": str(e)}} print '>', res.url solr_response = res.json() solr_response["solr_request"] = res.url if original_response > 0: return solr_response # create the response dict following the swagger model: data = {} if 'error' in solr_response: data["error"] = solr_response["error"] return 400, data response = solr_response["response"] data["a.matchDocs"] = response.get("numFound") if response.get("docs"): data["d.docs"] = response.get("docs") if a_time_limit > 0: date_facet = solr_response["facet_counts"]["facet_ranges"][TIME_FILTER_FIELD] counts = [] value_count = iter(date_facet.get("counts")) for value, count in zip(value_count, value_count): counts.append({ "value": value, "count": count }) a_time = { "start": date_facet.get("start"), "end": date_facet.get("end"), "gap": date_facet.get("gap"), "counts": counts } data["a.time"] = a_time if a_hm_limit > 0: hm_facet_raw = solr_response["facet_counts"]["facet_heatmaps"][GEO_HEATMAP_FIELD] hm_facet = { 'gridLevel': hm_facet_raw[1], 'columns': hm_facet_raw[3], 'rows': hm_facet_raw[5], 'minX': hm_facet_raw[7], 'maxX': hm_facet_raw[9], 'minY': hm_facet_raw[11], 'maxY': hm_facet_raw[13], 'counts_ints2D': hm_facet_raw[15], 'projection': 'EPSG:4326' } data["a.hm"] = hm_facet if a_user_limit > 0: user_facet = solr_response["facet_counts"]["facet_fields"][USER_FIELD] counts = [] value_count = iter(user_facet) for value, count in zip(value_count, value_count): counts.append({ "value": value, "count": count }) data["a.user"] = counts if a_text_limit > 0: text_facet = solr_response["facet_counts"]["facet_fields"][TEXT_FIELD] counts = [] value_count = iter(text_facet) for value, count in zip(value_count, value_count): counts.append({ "value": value, "count": count }) data["a.text"] = counts subs = [] for label, values in solr_response["debug"]["timing"].iteritems(): if type(values) is not dict: continue subs_data = {"label": label, "subs": []} for label, values in values.iteritems(): if type(values) is not dict: subs_data["millis"] = values continue subs_data["subs"].append({ "label": label, "millis": values.get("time") }) subs.append(subs_data) timing = { "label": "requests.get.elapsed", "millis": res.elapsed, "subs": [{ "label": "QTime", "millis": solr_response["responseHeader"].get("QTime"), "subs": subs }] } data["timing"] = timing data["request_url"] = res.url return data
python
def solr(serializer): """ Search on solr endpoint :param serializer: :return: """ search_engine_endpoint = serializer.validated_data.get("search_engine_endpoint") q_time = serializer.validated_data.get("q_time") q_geo = serializer.validated_data.get("q_geo") q_text = serializer.validated_data.get("q_text") q_user = serializer.validated_data.get("q_user") d_docs_limit = serializer.validated_data.get("d_docs_limit") d_docs_page = serializer.validated_data.get("d_docs_page") d_docs_sort = serializer.validated_data.get("d_docs_sort") a_time_limit = serializer.validated_data.get("a_time_limit") a_time_gap = serializer.validated_data.get("a_time_gap") a_time_filter = serializer.validated_data.get("a_time_filter") a_hm_limit = serializer.validated_data.get("a_hm_limit") a_hm_gridlevel = serializer.validated_data.get("a_hm_gridlevel") a_hm_filter = serializer.validated_data.get("a_hm_filter") a_text_limit = serializer.validated_data.get("a_text_limit") a_user_limit = serializer.validated_data.get("a_user_limit") original_response = serializer.validated_data.get("original_response") # query params to be sent via restful solr params = { "q": "*:*", "indent": "on", "wt": "json", "rows": d_docs_limit, "facet": "off", "facet.field": [], "debug": "timing" } if q_text: params["q"] = q_text if d_docs_limit >= 0: d_docs_page -= 1 d_docs_page = d_docs_limit * d_docs_page params["start"] = d_docs_page # query params for filters filters = [] if q_time: # TODO: when user sends incomplete dates like 2000, its completed: 2000-(TODAY-MONTH)-(TODAY-DAY)T00:00:00Z # TODO: "Invalid Date in Date Math String:'[* TO 2000-12-05T00:00:00Z]'" # Kotlin like: "{!field f=layer_date tag=layer_date}[* TO 2000-12-05T00:00:00Z]" # then do it simple: filters.append("{0}:{1}".format(TIME_FILTER_FIELD, q_time)) if q_geo: filters.append("{0}:{1}".format(GEO_FILTER_FIELD, q_geo)) if q_user: filters.append("{{!field f={0} tag={0}}}{1}".format(USER_FIELD, q_user)) if filters: params["fq"] = filters # query params for ordering if d_docs_sort == 'score' and q_text: params["sort"] = 'score desc' elif d_docs_sort == 'time': params["sort"] = '{} desc'.format(TIME_SORT_FIELD) elif d_docs_sort == 'distance': rectangle = parse_geo_box(q_geo) params["sort"] = 'geodist() asc' params["sfield"] = GEO_SORT_FIELD params["pt"] = '{0},{1}'.format(rectangle.centroid.x, rectangle.centroid.y) # query params for facets if a_time_limit > 0: params["facet"] = 'on' time_filter = a_time_filter or q_time or None # traduce * to actual min/max dates. time_filter = asterisk_to_min_max(TIME_FILTER_FIELD, time_filter, search_engine_endpoint) # create the range faceting params. facet_parms = request_time_facet(TIME_FILTER_FIELD, time_filter, a_time_gap, a_time_limit) params.update(facet_parms) if a_hm_limit > 0: params["facet"] = 'on' hm_facet_params = request_heatmap_facet(GEO_HEATMAP_FIELD, a_hm_filter, a_hm_gridlevel, a_hm_limit) params.update(hm_facet_params) if a_text_limit > 0: params["facet"] = 'on' params["facet.field"].append(TEXT_FIELD) params["f.{}.facet.limit".format(TEXT_FIELD)] = a_text_limit if a_user_limit > 0: params["facet"] = 'on' params["facet.field"].append("{{! ex={0}}}{0}".format(USER_FIELD)) params["f.{}.facet.limit".format(USER_FIELD)] = a_user_limit try: res = requests.get( search_engine_endpoint, params=params ) except Exception as e: return 500, {"error": {"msg": str(e)}} print '>', res.url solr_response = res.json() solr_response["solr_request"] = res.url if original_response > 0: return solr_response # create the response dict following the swagger model: data = {} if 'error' in solr_response: data["error"] = solr_response["error"] return 400, data response = solr_response["response"] data["a.matchDocs"] = response.get("numFound") if response.get("docs"): data["d.docs"] = response.get("docs") if a_time_limit > 0: date_facet = solr_response["facet_counts"]["facet_ranges"][TIME_FILTER_FIELD] counts = [] value_count = iter(date_facet.get("counts")) for value, count in zip(value_count, value_count): counts.append({ "value": value, "count": count }) a_time = { "start": date_facet.get("start"), "end": date_facet.get("end"), "gap": date_facet.get("gap"), "counts": counts } data["a.time"] = a_time if a_hm_limit > 0: hm_facet_raw = solr_response["facet_counts"]["facet_heatmaps"][GEO_HEATMAP_FIELD] hm_facet = { 'gridLevel': hm_facet_raw[1], 'columns': hm_facet_raw[3], 'rows': hm_facet_raw[5], 'minX': hm_facet_raw[7], 'maxX': hm_facet_raw[9], 'minY': hm_facet_raw[11], 'maxY': hm_facet_raw[13], 'counts_ints2D': hm_facet_raw[15], 'projection': 'EPSG:4326' } data["a.hm"] = hm_facet if a_user_limit > 0: user_facet = solr_response["facet_counts"]["facet_fields"][USER_FIELD] counts = [] value_count = iter(user_facet) for value, count in zip(value_count, value_count): counts.append({ "value": value, "count": count }) data["a.user"] = counts if a_text_limit > 0: text_facet = solr_response["facet_counts"]["facet_fields"][TEXT_FIELD] counts = [] value_count = iter(text_facet) for value, count in zip(value_count, value_count): counts.append({ "value": value, "count": count }) data["a.text"] = counts subs = [] for label, values in solr_response["debug"]["timing"].iteritems(): if type(values) is not dict: continue subs_data = {"label": label, "subs": []} for label, values in values.iteritems(): if type(values) is not dict: subs_data["millis"] = values continue subs_data["subs"].append({ "label": label, "millis": values.get("time") }) subs.append(subs_data) timing = { "label": "requests.get.elapsed", "millis": res.elapsed, "subs": [{ "label": "QTime", "millis": solr_response["responseHeader"].get("QTime"), "subs": subs }] } data["timing"] = timing data["request_url"] = res.url return data
[ "def", "solr", "(", "serializer", ")", ":", "search_engine_endpoint", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"search_engine_endpoint\"", ")", "q_time", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"q_time\"", ")", "q_geo", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"q_geo\"", ")", "q_text", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"q_text\"", ")", "q_user", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"q_user\"", ")", "d_docs_limit", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"d_docs_limit\"", ")", "d_docs_page", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"d_docs_page\"", ")", "d_docs_sort", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"d_docs_sort\"", ")", "a_time_limit", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"a_time_limit\"", ")", "a_time_gap", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"a_time_gap\"", ")", "a_time_filter", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"a_time_filter\"", ")", "a_hm_limit", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"a_hm_limit\"", ")", "a_hm_gridlevel", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"a_hm_gridlevel\"", ")", "a_hm_filter", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"a_hm_filter\"", ")", "a_text_limit", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"a_text_limit\"", ")", "a_user_limit", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"a_user_limit\"", ")", "original_response", "=", "serializer", ".", "validated_data", ".", "get", "(", "\"original_response\"", ")", "# query params to be sent via restful solr", "params", "=", "{", "\"q\"", ":", "\"*:*\"", ",", "\"indent\"", ":", "\"on\"", ",", "\"wt\"", ":", "\"json\"", ",", "\"rows\"", ":", "d_docs_limit", ",", "\"facet\"", ":", "\"off\"", ",", "\"facet.field\"", ":", "[", "]", ",", "\"debug\"", ":", "\"timing\"", "}", "if", "q_text", ":", "params", "[", "\"q\"", "]", "=", "q_text", "if", "d_docs_limit", ">=", "0", ":", "d_docs_page", "-=", "1", "d_docs_page", "=", "d_docs_limit", "*", "d_docs_page", "params", "[", "\"start\"", "]", "=", "d_docs_page", "# query params for filters", "filters", "=", "[", "]", "if", "q_time", ":", "# TODO: when user sends incomplete dates like 2000, its completed: 2000-(TODAY-MONTH)-(TODAY-DAY)T00:00:00Z", "# TODO: \"Invalid Date in Date Math String:'[* TO 2000-12-05T00:00:00Z]'\"", "# Kotlin like: \"{!field f=layer_date tag=layer_date}[* TO 2000-12-05T00:00:00Z]\"", "# then do it simple:", "filters", ".", "append", "(", "\"{0}:{1}\"", ".", "format", "(", "TIME_FILTER_FIELD", ",", "q_time", ")", ")", "if", "q_geo", ":", "filters", ".", "append", "(", "\"{0}:{1}\"", ".", "format", "(", "GEO_FILTER_FIELD", ",", "q_geo", ")", ")", "if", "q_user", ":", "filters", ".", "append", "(", "\"{{!field f={0} tag={0}}}{1}\"", ".", "format", "(", "USER_FIELD", ",", "q_user", ")", ")", "if", "filters", ":", "params", "[", "\"fq\"", "]", "=", "filters", "# query params for ordering", "if", "d_docs_sort", "==", "'score'", "and", "q_text", ":", "params", "[", "\"sort\"", "]", "=", "'score desc'", "elif", "d_docs_sort", "==", "'time'", ":", "params", "[", "\"sort\"", "]", "=", "'{} desc'", ".", "format", "(", "TIME_SORT_FIELD", ")", "elif", "d_docs_sort", "==", "'distance'", ":", "rectangle", "=", "parse_geo_box", "(", "q_geo", ")", "params", "[", "\"sort\"", "]", "=", "'geodist() asc'", "params", "[", "\"sfield\"", "]", "=", "GEO_SORT_FIELD", "params", "[", "\"pt\"", "]", "=", "'{0},{1}'", ".", "format", "(", "rectangle", ".", "centroid", ".", "x", ",", "rectangle", ".", "centroid", ".", "y", ")", "# query params for facets", "if", "a_time_limit", ">", "0", ":", "params", "[", "\"facet\"", "]", "=", "'on'", "time_filter", "=", "a_time_filter", "or", "q_time", "or", "None", "# traduce * to actual min/max dates.", "time_filter", "=", "asterisk_to_min_max", "(", "TIME_FILTER_FIELD", ",", "time_filter", ",", "search_engine_endpoint", ")", "# create the range faceting params.", "facet_parms", "=", "request_time_facet", "(", "TIME_FILTER_FIELD", ",", "time_filter", ",", "a_time_gap", ",", "a_time_limit", ")", "params", ".", "update", "(", "facet_parms", ")", "if", "a_hm_limit", ">", "0", ":", "params", "[", "\"facet\"", "]", "=", "'on'", "hm_facet_params", "=", "request_heatmap_facet", "(", "GEO_HEATMAP_FIELD", ",", "a_hm_filter", ",", "a_hm_gridlevel", ",", "a_hm_limit", ")", "params", ".", "update", "(", "hm_facet_params", ")", "if", "a_text_limit", ">", "0", ":", "params", "[", "\"facet\"", "]", "=", "'on'", "params", "[", "\"facet.field\"", "]", ".", "append", "(", "TEXT_FIELD", ")", "params", "[", "\"f.{}.facet.limit\"", ".", "format", "(", "TEXT_FIELD", ")", "]", "=", "a_text_limit", "if", "a_user_limit", ">", "0", ":", "params", "[", "\"facet\"", "]", "=", "'on'", "params", "[", "\"facet.field\"", "]", ".", "append", "(", "\"{{! ex={0}}}{0}\"", ".", "format", "(", "USER_FIELD", ")", ")", "params", "[", "\"f.{}.facet.limit\"", ".", "format", "(", "USER_FIELD", ")", "]", "=", "a_user_limit", "try", ":", "res", "=", "requests", ".", "get", "(", "search_engine_endpoint", ",", "params", "=", "params", ")", "except", "Exception", "as", "e", ":", "return", "500", ",", "{", "\"error\"", ":", "{", "\"msg\"", ":", "str", "(", "e", ")", "}", "}", "print", "'>'", ",", "res", ".", "url", "solr_response", "=", "res", ".", "json", "(", ")", "solr_response", "[", "\"solr_request\"", "]", "=", "res", ".", "url", "if", "original_response", ">", "0", ":", "return", "solr_response", "# create the response dict following the swagger model:", "data", "=", "{", "}", "if", "'error'", "in", "solr_response", ":", "data", "[", "\"error\"", "]", "=", "solr_response", "[", "\"error\"", "]", "return", "400", ",", "data", "response", "=", "solr_response", "[", "\"response\"", "]", "data", "[", "\"a.matchDocs\"", "]", "=", "response", ".", "get", "(", "\"numFound\"", ")", "if", "response", ".", "get", "(", "\"docs\"", ")", ":", "data", "[", "\"d.docs\"", "]", "=", "response", ".", "get", "(", "\"docs\"", ")", "if", "a_time_limit", ">", "0", ":", "date_facet", "=", "solr_response", "[", "\"facet_counts\"", "]", "[", "\"facet_ranges\"", "]", "[", "TIME_FILTER_FIELD", "]", "counts", "=", "[", "]", "value_count", "=", "iter", "(", "date_facet", ".", "get", "(", "\"counts\"", ")", ")", "for", "value", ",", "count", "in", "zip", "(", "value_count", ",", "value_count", ")", ":", "counts", ".", "append", "(", "{", "\"value\"", ":", "value", ",", "\"count\"", ":", "count", "}", ")", "a_time", "=", "{", "\"start\"", ":", "date_facet", ".", "get", "(", "\"start\"", ")", ",", "\"end\"", ":", "date_facet", ".", "get", "(", "\"end\"", ")", ",", "\"gap\"", ":", "date_facet", ".", "get", "(", "\"gap\"", ")", ",", "\"counts\"", ":", "counts", "}", "data", "[", "\"a.time\"", "]", "=", "a_time", "if", "a_hm_limit", ">", "0", ":", "hm_facet_raw", "=", "solr_response", "[", "\"facet_counts\"", "]", "[", "\"facet_heatmaps\"", "]", "[", "GEO_HEATMAP_FIELD", "]", "hm_facet", "=", "{", "'gridLevel'", ":", "hm_facet_raw", "[", "1", "]", ",", "'columns'", ":", "hm_facet_raw", "[", "3", "]", ",", "'rows'", ":", "hm_facet_raw", "[", "5", "]", ",", "'minX'", ":", "hm_facet_raw", "[", "7", "]", ",", "'maxX'", ":", "hm_facet_raw", "[", "9", "]", ",", "'minY'", ":", "hm_facet_raw", "[", "11", "]", ",", "'maxY'", ":", "hm_facet_raw", "[", "13", "]", ",", "'counts_ints2D'", ":", "hm_facet_raw", "[", "15", "]", ",", "'projection'", ":", "'EPSG:4326'", "}", "data", "[", "\"a.hm\"", "]", "=", "hm_facet", "if", "a_user_limit", ">", "0", ":", "user_facet", "=", "solr_response", "[", "\"facet_counts\"", "]", "[", "\"facet_fields\"", "]", "[", "USER_FIELD", "]", "counts", "=", "[", "]", "value_count", "=", "iter", "(", "user_facet", ")", "for", "value", ",", "count", "in", "zip", "(", "value_count", ",", "value_count", ")", ":", "counts", ".", "append", "(", "{", "\"value\"", ":", "value", ",", "\"count\"", ":", "count", "}", ")", "data", "[", "\"a.user\"", "]", "=", "counts", "if", "a_text_limit", ">", "0", ":", "text_facet", "=", "solr_response", "[", "\"facet_counts\"", "]", "[", "\"facet_fields\"", "]", "[", "TEXT_FIELD", "]", "counts", "=", "[", "]", "value_count", "=", "iter", "(", "text_facet", ")", "for", "value", ",", "count", "in", "zip", "(", "value_count", ",", "value_count", ")", ":", "counts", ".", "append", "(", "{", "\"value\"", ":", "value", ",", "\"count\"", ":", "count", "}", ")", "data", "[", "\"a.text\"", "]", "=", "counts", "subs", "=", "[", "]", "for", "label", ",", "values", "in", "solr_response", "[", "\"debug\"", "]", "[", "\"timing\"", "]", ".", "iteritems", "(", ")", ":", "if", "type", "(", "values", ")", "is", "not", "dict", ":", "continue", "subs_data", "=", "{", "\"label\"", ":", "label", ",", "\"subs\"", ":", "[", "]", "}", "for", "label", ",", "values", "in", "values", ".", "iteritems", "(", ")", ":", "if", "type", "(", "values", ")", "is", "not", "dict", ":", "subs_data", "[", "\"millis\"", "]", "=", "values", "continue", "subs_data", "[", "\"subs\"", "]", ".", "append", "(", "{", "\"label\"", ":", "label", ",", "\"millis\"", ":", "values", ".", "get", "(", "\"time\"", ")", "}", ")", "subs", ".", "append", "(", "subs_data", ")", "timing", "=", "{", "\"label\"", ":", "\"requests.get.elapsed\"", ",", "\"millis\"", ":", "res", ".", "elapsed", ",", "\"subs\"", ":", "[", "{", "\"label\"", ":", "\"QTime\"", ",", "\"millis\"", ":", "solr_response", "[", "\"responseHeader\"", "]", ".", "get", "(", "\"QTime\"", ")", ",", "\"subs\"", ":", "subs", "}", "]", "}", "data", "[", "\"timing\"", "]", "=", "timing", "data", "[", "\"request_url\"", "]", "=", "res", ".", "url", "return", "data" ]
Search on solr endpoint :param serializer: :return:
[ "Search", "on", "solr", "endpoint", ":", "param", "serializer", ":", ":", "return", ":" ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/search_api/views.py#L364-L573
cga-harvard/Hypermap-Registry
hypermap/search_api/views.py
parse_get_params
def parse_get_params(request): """ parse all url get params that contains dots in a representation of serializer field names, for example: d.docs.limit to d_docs_limit. that makes compatible an actual API client with django-rest-framework serializers. :param request: :return: QueryDict with parsed get params. """ get = request.GET.copy() new_get = request.GET.copy() for key in get.iterkeys(): if key.count(".") > 0: new_key = key.replace(".", "_") new_get[new_key] = get.get(key) del new_get[key] return new_get
python
def parse_get_params(request): """ parse all url get params that contains dots in a representation of serializer field names, for example: d.docs.limit to d_docs_limit. that makes compatible an actual API client with django-rest-framework serializers. :param request: :return: QueryDict with parsed get params. """ get = request.GET.copy() new_get = request.GET.copy() for key in get.iterkeys(): if key.count(".") > 0: new_key = key.replace(".", "_") new_get[new_key] = get.get(key) del new_get[key] return new_get
[ "def", "parse_get_params", "(", "request", ")", ":", "get", "=", "request", ".", "GET", ".", "copy", "(", ")", "new_get", "=", "request", ".", "GET", ".", "copy", "(", ")", "for", "key", "in", "get", ".", "iterkeys", "(", ")", ":", "if", "key", ".", "count", "(", "\".\"", ")", ">", "0", ":", "new_key", "=", "key", ".", "replace", "(", "\".\"", ",", "\"_\"", ")", "new_get", "[", "new_key", "]", "=", "get", ".", "get", "(", "key", ")", "del", "new_get", "[", "key", "]", "return", "new_get" ]
parse all url get params that contains dots in a representation of serializer field names, for example: d.docs.limit to d_docs_limit. that makes compatible an actual API client with django-rest-framework serializers. :param request: :return: QueryDict with parsed get params.
[ "parse", "all", "url", "get", "params", "that", "contains", "dots", "in", "a", "representation", "of", "serializer", "field", "names", "for", "example", ":", "d", ".", "docs", ".", "limit", "to", "d_docs_limit", ".", "that", "makes", "compatible", "an", "actual", "API", "client", "with", "django", "-", "rest", "-", "framework", "serializers", ".", ":", "param", "request", ":", ":", "return", ":", "QueryDict", "with", "parsed", "get", "params", "." ]
train
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/search_api/views.py#L576-L594
konikvranik/pyCEC
pycec/tcp.py
main
def main(): """For testing purpose""" tcp_adapter = TcpAdapter("192.168.1.3", name="HASS", activate_source=False) hdmi_network = HDMINetwork(tcp_adapter) hdmi_network.start() while True: for d in hdmi_network.devices: _LOGGER.info("Device: %s", d) time.sleep(7)
python
def main(): """For testing purpose""" tcp_adapter = TcpAdapter("192.168.1.3", name="HASS", activate_source=False) hdmi_network = HDMINetwork(tcp_adapter) hdmi_network.start() while True: for d in hdmi_network.devices: _LOGGER.info("Device: %s", d) time.sleep(7)
[ "def", "main", "(", ")", ":", "tcp_adapter", "=", "TcpAdapter", "(", "\"192.168.1.3\"", ",", "name", "=", "\"HASS\"", ",", "activate_source", "=", "False", ")", "hdmi_network", "=", "HDMINetwork", "(", "tcp_adapter", ")", "hdmi_network", ".", "start", "(", ")", "while", "True", ":", "for", "d", "in", "hdmi_network", ".", "devices", ":", "_LOGGER", ".", "info", "(", "\"Device: %s\"", ",", "d", ")", "time", ".", "sleep", "(", "7", ")" ]
For testing purpose
[ "For", "testing", "purpose" ]
train
https://github.com/konikvranik/pyCEC/blob/acf42a842d8a912ed68d63d8d6b653e6c405b29b/pycec/tcp.py#L143-L152
diffeo/py-nilsimsa
nilsimsa/deprecated/_deprecated_nilsimsa.py
compare_hexdigests
def compare_hexdigests( digest1, digest2 ): """Compute difference in bits between digest1 and digest2 returns -127 to 128; 128 is the same, -127 is different""" # convert to 32-tuple of unsighed two-byte INTs digest1 = tuple([int(digest1[i:i+2],16) for i in range(0,63,2)]) digest2 = tuple([int(digest2[i:i+2],16) for i in range(0,63,2)]) bits = 0 for i in range(32): bits += POPC[255 & digest1[i] ^ digest2[i]] return 128 - bits
python
def compare_hexdigests( digest1, digest2 ): """Compute difference in bits between digest1 and digest2 returns -127 to 128; 128 is the same, -127 is different""" # convert to 32-tuple of unsighed two-byte INTs digest1 = tuple([int(digest1[i:i+2],16) for i in range(0,63,2)]) digest2 = tuple([int(digest2[i:i+2],16) for i in range(0,63,2)]) bits = 0 for i in range(32): bits += POPC[255 & digest1[i] ^ digest2[i]] return 128 - bits
[ "def", "compare_hexdigests", "(", "digest1", ",", "digest2", ")", ":", "# convert to 32-tuple of unsighed two-byte INTs", "digest1", "=", "tuple", "(", "[", "int", "(", "digest1", "[", "i", ":", "i", "+", "2", "]", ",", "16", ")", "for", "i", "in", "range", "(", "0", ",", "63", ",", "2", ")", "]", ")", "digest2", "=", "tuple", "(", "[", "int", "(", "digest2", "[", "i", ":", "i", "+", "2", "]", ",", "16", ")", "for", "i", "in", "range", "(", "0", ",", "63", ",", "2", ")", "]", ")", "bits", "=", "0", "for", "i", "in", "range", "(", "32", ")", ":", "bits", "+=", "POPC", "[", "255", "&", "digest1", "[", "i", "]", "^", "digest2", "[", "i", "]", "]", "return", "128", "-", "bits" ]
Compute difference in bits between digest1 and digest2 returns -127 to 128; 128 is the same, -127 is different
[ "Compute", "difference", "in", "bits", "between", "digest1", "and", "digest2", "returns", "-", "127", "to", "128", ";", "128", "is", "the", "same", "-", "127", "is", "different" ]
train
https://github.com/diffeo/py-nilsimsa/blob/c652f4bbfd836f7aebf292dcea676cc925ec315a/nilsimsa/deprecated/_deprecated_nilsimsa.py#L196-L205
diffeo/py-nilsimsa
nilsimsa/deprecated/_deprecated_nilsimsa.py
Nilsimsa.tran3
def tran3(self, a, b, c, n): """Get accumulator for a transition n between chars a, b, c.""" return (((TRAN[(a+n)&255]^TRAN[b]*(n+n+1))+TRAN[(c)^TRAN[n]])&255)
python
def tran3(self, a, b, c, n): """Get accumulator for a transition n between chars a, b, c.""" return (((TRAN[(a+n)&255]^TRAN[b]*(n+n+1))+TRAN[(c)^TRAN[n]])&255)
[ "def", "tran3", "(", "self", ",", "a", ",", "b", ",", "c", ",", "n", ")", ":", "return", "(", "(", "(", "TRAN", "[", "(", "a", "+", "n", ")", "&", "255", "]", "^", "TRAN", "[", "b", "]", "*", "(", "n", "+", "n", "+", "1", ")", ")", "+", "TRAN", "[", "(", "c", ")", "^", "TRAN", "[", "n", "]", "]", ")", "&", "255", ")" ]
Get accumulator for a transition n between chars a, b, c.
[ "Get", "accumulator", "for", "a", "transition", "n", "between", "chars", "a", "b", "c", "." ]
train
https://github.com/diffeo/py-nilsimsa/blob/c652f4bbfd836f7aebf292dcea676cc925ec315a/nilsimsa/deprecated/_deprecated_nilsimsa.py#L117-L119
diffeo/py-nilsimsa
nilsimsa/deprecated/_deprecated_nilsimsa.py
Nilsimsa.update
def update(self, data): """Add data to running digest, increasing the accumulators for 0-8 triplets formed by this char and the previous 0-3 chars.""" for character in data: if PY3: ch = character else: ch = ord(character) self.count += 1 # incr accumulators for triplets if self.lastch[1] > -1: self.acc[self.tran3(ch, self.lastch[0], self.lastch[1], 0)] +=1 if self.lastch[2] > -1: self.acc[self.tran3(ch, self.lastch[0], self.lastch[2], 1)] +=1 self.acc[self.tran3(ch, self.lastch[1], self.lastch[2], 2)] +=1 if self.lastch[3] > -1: self.acc[self.tran3(ch, self.lastch[0], self.lastch[3], 3)] +=1 self.acc[self.tran3(ch, self.lastch[1], self.lastch[3], 4)] +=1 self.acc[self.tran3(ch, self.lastch[2], self.lastch[3], 5)] +=1 self.acc[self.tran3(self.lastch[3], self.lastch[0], ch, 6)] +=1 self.acc[self.tran3(self.lastch[3], self.lastch[2], ch, 7)] +=1 # adjust last seen chars self.lastch = [ch] + self.lastch[:3]
python
def update(self, data): """Add data to running digest, increasing the accumulators for 0-8 triplets formed by this char and the previous 0-3 chars.""" for character in data: if PY3: ch = character else: ch = ord(character) self.count += 1 # incr accumulators for triplets if self.lastch[1] > -1: self.acc[self.tran3(ch, self.lastch[0], self.lastch[1], 0)] +=1 if self.lastch[2] > -1: self.acc[self.tran3(ch, self.lastch[0], self.lastch[2], 1)] +=1 self.acc[self.tran3(ch, self.lastch[1], self.lastch[2], 2)] +=1 if self.lastch[3] > -1: self.acc[self.tran3(ch, self.lastch[0], self.lastch[3], 3)] +=1 self.acc[self.tran3(ch, self.lastch[1], self.lastch[3], 4)] +=1 self.acc[self.tran3(ch, self.lastch[2], self.lastch[3], 5)] +=1 self.acc[self.tran3(self.lastch[3], self.lastch[0], ch, 6)] +=1 self.acc[self.tran3(self.lastch[3], self.lastch[2], ch, 7)] +=1 # adjust last seen chars self.lastch = [ch] + self.lastch[:3]
[ "def", "update", "(", "self", ",", "data", ")", ":", "for", "character", "in", "data", ":", "if", "PY3", ":", "ch", "=", "character", "else", ":", "ch", "=", "ord", "(", "character", ")", "self", ".", "count", "+=", "1", "# incr accumulators for triplets", "if", "self", ".", "lastch", "[", "1", "]", ">", "-", "1", ":", "self", ".", "acc", "[", "self", ".", "tran3", "(", "ch", ",", "self", ".", "lastch", "[", "0", "]", ",", "self", ".", "lastch", "[", "1", "]", ",", "0", ")", "]", "+=", "1", "if", "self", ".", "lastch", "[", "2", "]", ">", "-", "1", ":", "self", ".", "acc", "[", "self", ".", "tran3", "(", "ch", ",", "self", ".", "lastch", "[", "0", "]", ",", "self", ".", "lastch", "[", "2", "]", ",", "1", ")", "]", "+=", "1", "self", ".", "acc", "[", "self", ".", "tran3", "(", "ch", ",", "self", ".", "lastch", "[", "1", "]", ",", "self", ".", "lastch", "[", "2", "]", ",", "2", ")", "]", "+=", "1", "if", "self", ".", "lastch", "[", "3", "]", ">", "-", "1", ":", "self", ".", "acc", "[", "self", ".", "tran3", "(", "ch", ",", "self", ".", "lastch", "[", "0", "]", ",", "self", ".", "lastch", "[", "3", "]", ",", "3", ")", "]", "+=", "1", "self", ".", "acc", "[", "self", ".", "tran3", "(", "ch", ",", "self", ".", "lastch", "[", "1", "]", ",", "self", ".", "lastch", "[", "3", "]", ",", "4", ")", "]", "+=", "1", "self", ".", "acc", "[", "self", ".", "tran3", "(", "ch", ",", "self", ".", "lastch", "[", "2", "]", ",", "self", ".", "lastch", "[", "3", "]", ",", "5", ")", "]", "+=", "1", "self", ".", "acc", "[", "self", ".", "tran3", "(", "self", ".", "lastch", "[", "3", "]", ",", "self", ".", "lastch", "[", "0", "]", ",", "ch", ",", "6", ")", "]", "+=", "1", "self", ".", "acc", "[", "self", ".", "tran3", "(", "self", ".", "lastch", "[", "3", "]", ",", "self", ".", "lastch", "[", "2", "]", ",", "ch", ",", "7", ")", "]", "+=", "1", "# adjust last seen chars", "self", ".", "lastch", "=", "[", "ch", "]", "+", "self", ".", "lastch", "[", ":", "3", "]" ]
Add data to running digest, increasing the accumulators for 0-8 triplets formed by this char and the previous 0-3 chars.
[ "Add", "data", "to", "running", "digest", "increasing", "the", "accumulators", "for", "0", "-", "8", "triplets", "formed", "by", "this", "char", "and", "the", "previous", "0", "-", "3", "chars", "." ]
train
https://github.com/diffeo/py-nilsimsa/blob/c652f4bbfd836f7aebf292dcea676cc925ec315a/nilsimsa/deprecated/_deprecated_nilsimsa.py#L121-L145
diffeo/py-nilsimsa
nilsimsa/deprecated/_deprecated_nilsimsa.py
Nilsimsa.digest
def digest(self): """Get digest of data seen thus far as a list of bytes.""" total = 0 # number of triplets seen if self.count == 3: # 3 chars = 1 triplet total = 1 elif self.count == 4: # 4 chars = 4 triplets total = 4 elif self.count > 4: # otherwise 8 triplets/char less total = 8 * self.count - 28 # 28 'missed' during 'ramp-up' threshold = total / 256 # threshold for accumulators, using the mean code = [0]*32 # start with all zero bits for i in range(256): # for all 256 accumulators if self.acc[i] > threshold: # if it meets the threshold code[i >> 3] += 1 << (i&7) # set corresponding digest bit, equivalent to i/8, 2 ** (i % 8) return code[::-1]
python
def digest(self): """Get digest of data seen thus far as a list of bytes.""" total = 0 # number of triplets seen if self.count == 3: # 3 chars = 1 triplet total = 1 elif self.count == 4: # 4 chars = 4 triplets total = 4 elif self.count > 4: # otherwise 8 triplets/char less total = 8 * self.count - 28 # 28 'missed' during 'ramp-up' threshold = total / 256 # threshold for accumulators, using the mean code = [0]*32 # start with all zero bits for i in range(256): # for all 256 accumulators if self.acc[i] > threshold: # if it meets the threshold code[i >> 3] += 1 << (i&7) # set corresponding digest bit, equivalent to i/8, 2 ** (i % 8) return code[::-1]
[ "def", "digest", "(", "self", ")", ":", "total", "=", "0", "# number of triplets seen", "if", "self", ".", "count", "==", "3", ":", "# 3 chars = 1 triplet", "total", "=", "1", "elif", "self", ".", "count", "==", "4", ":", "# 4 chars = 4 triplets", "total", "=", "4", "elif", "self", ".", "count", ">", "4", ":", "# otherwise 8 triplets/char less", "total", "=", "8", "*", "self", ".", "count", "-", "28", "# 28 'missed' during 'ramp-up'", "threshold", "=", "total", "/", "256", "# threshold for accumulators, using the mean", "code", "=", "[", "0", "]", "*", "32", "# start with all zero bits", "for", "i", "in", "range", "(", "256", ")", ":", "# for all 256 accumulators", "if", "self", ".", "acc", "[", "i", "]", ">", "threshold", ":", "# if it meets the threshold", "code", "[", "i", ">>", "3", "]", "+=", "1", "<<", "(", "i", "&", "7", ")", "# set corresponding digest bit, equivalent to i/8, 2 ** (i % 8)", "return", "code", "[", ":", ":", "-", "1", "]" ]
Get digest of data seen thus far as a list of bytes.
[ "Get", "digest", "of", "data", "seen", "thus", "far", "as", "a", "list", "of", "bytes", "." ]
train
https://github.com/diffeo/py-nilsimsa/blob/c652f4bbfd836f7aebf292dcea676cc925ec315a/nilsimsa/deprecated/_deprecated_nilsimsa.py#L147-L164
diffeo/py-nilsimsa
nilsimsa/deprecated/_deprecated_nilsimsa.py
Nilsimsa.from_file
def from_file(self, filename): """Update running digest with content of named file.""" f = open(filename, 'rb') while True: data = f.read(10480) if not data: break self.update(data) f.close()
python
def from_file(self, filename): """Update running digest with content of named file.""" f = open(filename, 'rb') while True: data = f.read(10480) if not data: break self.update(data) f.close()
[ "def", "from_file", "(", "self", ",", "filename", ")", ":", "f", "=", "open", "(", "filename", ",", "'rb'", ")", "while", "True", ":", "data", "=", "f", ".", "read", "(", "10480", ")", "if", "not", "data", ":", "break", "self", ".", "update", "(", "data", ")", "f", ".", "close", "(", ")" ]
Update running digest with content of named file.
[ "Update", "running", "digest", "with", "content", "of", "named", "file", "." ]
train
https://github.com/diffeo/py-nilsimsa/blob/c652f4bbfd836f7aebf292dcea676cc925ec315a/nilsimsa/deprecated/_deprecated_nilsimsa.py#L174-L182
diffeo/py-nilsimsa
nilsimsa/deprecated/_deprecated_nilsimsa.py
Nilsimsa.compare
def compare(self, otherdigest, ishex=False): """Compute difference in bits between own digest and another. returns -127 to 128; 128 is the same, -127 is different""" bits = 0 myd = self.digest() if ishex: # convert to 32-tuple of unsighed two-byte INTs otherdigest = tuple([int(otherdigest[i:i+2],16) for i in range(0,63,2)]) for i in range(32): bits += POPC[255 & myd[i] ^ otherdigest[i]] return 128 - bits
python
def compare(self, otherdigest, ishex=False): """Compute difference in bits between own digest and another. returns -127 to 128; 128 is the same, -127 is different""" bits = 0 myd = self.digest() if ishex: # convert to 32-tuple of unsighed two-byte INTs otherdigest = tuple([int(otherdigest[i:i+2],16) for i in range(0,63,2)]) for i in range(32): bits += POPC[255 & myd[i] ^ otherdigest[i]] return 128 - bits
[ "def", "compare", "(", "self", ",", "otherdigest", ",", "ishex", "=", "False", ")", ":", "bits", "=", "0", "myd", "=", "self", ".", "digest", "(", ")", "if", "ishex", ":", "# convert to 32-tuple of unsighed two-byte INTs", "otherdigest", "=", "tuple", "(", "[", "int", "(", "otherdigest", "[", "i", ":", "i", "+", "2", "]", ",", "16", ")", "for", "i", "in", "range", "(", "0", ",", "63", ",", "2", ")", "]", ")", "for", "i", "in", "range", "(", "32", ")", ":", "bits", "+=", "POPC", "[", "255", "&", "myd", "[", "i", "]", "^", "otherdigest", "[", "i", "]", "]", "return", "128", "-", "bits" ]
Compute difference in bits between own digest and another. returns -127 to 128; 128 is the same, -127 is different
[ "Compute", "difference", "in", "bits", "between", "own", "digest", "and", "another", ".", "returns", "-", "127", "to", "128", ";", "128", "is", "the", "same", "-", "127", "is", "different" ]
train
https://github.com/diffeo/py-nilsimsa/blob/c652f4bbfd836f7aebf292dcea676cc925ec315a/nilsimsa/deprecated/_deprecated_nilsimsa.py#L184-L194
CloudGenix/sdk-python
cloudgenix/__init__.py
jdout
def jdout(api_response): """ JD Output function. Does quick pretty printing of a CloudGenix Response body. This function returns a string instead of directly printing content. **Parameters:** - **api_response:** A CloudGenix-attribute extended `requests.Response` object **Returns:** Pretty-formatted text of the Response body """ try: # attempt to output the cgx_content. should always be a Dict if it exists. output = json.dumps(api_response.cgx_content, indent=4) except (TypeError, ValueError, AttributeError): # cgx_content did not exist, or was not JSON serializable. Try pretty output the base obj. try: output = json.dumps(api_response, indent=4) except (TypeError, ValueError, AttributeError): # Same issue, just raw output the passed data. Let any exceptions happen here. output = api_response return output
python
def jdout(api_response): """ JD Output function. Does quick pretty printing of a CloudGenix Response body. This function returns a string instead of directly printing content. **Parameters:** - **api_response:** A CloudGenix-attribute extended `requests.Response` object **Returns:** Pretty-formatted text of the Response body """ try: # attempt to output the cgx_content. should always be a Dict if it exists. output = json.dumps(api_response.cgx_content, indent=4) except (TypeError, ValueError, AttributeError): # cgx_content did not exist, or was not JSON serializable. Try pretty output the base obj. try: output = json.dumps(api_response, indent=4) except (TypeError, ValueError, AttributeError): # Same issue, just raw output the passed data. Let any exceptions happen here. output = api_response return output
[ "def", "jdout", "(", "api_response", ")", ":", "try", ":", "# attempt to output the cgx_content. should always be a Dict if it exists.", "output", "=", "json", ".", "dumps", "(", "api_response", ".", "cgx_content", ",", "indent", "=", "4", ")", "except", "(", "TypeError", ",", "ValueError", ",", "AttributeError", ")", ":", "# cgx_content did not exist, or was not JSON serializable. Try pretty output the base obj.", "try", ":", "output", "=", "json", ".", "dumps", "(", "api_response", ",", "indent", "=", "4", ")", "except", "(", "TypeError", ",", "ValueError", ",", "AttributeError", ")", ":", "# Same issue, just raw output the passed data. Let any exceptions happen here.", "output", "=", "api_response", "return", "output" ]
JD Output function. Does quick pretty printing of a CloudGenix Response body. This function returns a string instead of directly printing content. **Parameters:** - **api_response:** A CloudGenix-attribute extended `requests.Response` object **Returns:** Pretty-formatted text of the Response body
[ "JD", "Output", "function", ".", "Does", "quick", "pretty", "printing", "of", "a", "CloudGenix", "Response", "body", ".", "This", "function", "returns", "a", "string", "instead", "of", "directly", "printing", "content", "." ]
train
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L159-L180
CloudGenix/sdk-python
cloudgenix/__init__.py
jdout_detailed
def jdout_detailed(api_response, sensitive=False): """ JD Output Detailed function. Meant for quick DETAILED pretty-printing of CloudGenix Request and Response objects for troubleshooting. This function returns a string instead of directly printing content. **Parameters:** - **api_response:** A CloudGenix-attribute extended `requests.Response` object - **sensitive:** Boolean, if True will print sensitive content (specifically, authentication cookies/headers). **Returns:** Pretty-formatted text of the Request, Request Headers, Request body, Response, Response Headers, and Response Body. """ try: # try to be super verbose. output = "REQUEST: {0} {1}\n".format(api_response.request.method, api_response.request.path_url) output += "REQUEST HEADERS:\n" for key, value in api_response.request.headers.items(): # look for sensitive values if key.lower() in ['cookie'] and not sensitive: # we need to do some work to watch for the AUTH_TOKEN cookie. Split on cookie separator cookie_list = value.split('; ') muted_cookie_list = [] for cookie in cookie_list: # check if cookie starts with a permutation of AUTH_TOKEN/whitespace. if cookie.lower().strip().startswith('auth_token='): # first 11 chars of cookie with whitespace removed + mute string. newcookie = cookie.strip()[:11] + "\"<SENSITIVE - NOT SHOWN BY DEFAULT>\"" muted_cookie_list.append(newcookie) else: muted_cookie_list.append(cookie) # got list of cookies, muted as needed. recombine. muted_value = "; ".join(muted_cookie_list) output += "\t{0}: {1}\n".format(key, muted_value) elif key.lower() in ['x-auth-token'] and not sensitive: output += "\t{0}: {1}\n".format(key, "<SENSITIVE - NOT SHOWN BY DEFAULT>") else: output += "\t{0}: {1}\n".format(key, value) # if body not present, output blank. if not api_response.request.body: output += "REQUEST BODY:\n{0}\n\n".format({}) else: try: # Attempt to load JSON from string to make it look beter. output += "REQUEST BODY:\n{0}\n\n".format(json.dumps(json.loads(api_response.request.body), indent=4)) except (TypeError, ValueError, AttributeError): # if pretty call above didn't work, just toss it to jdout to best effort it. output += "REQUEST BODY:\n{0}\n\n".format(jdout(api_response.request.body)) output += "RESPONSE: {0} {1}\n".format(api_response.status_code, api_response.reason) output += "RESPONSE HEADERS:\n" for key, value in api_response.headers.items(): output += "\t{0}: {1}\n".format(key, value) try: # look for CGX content first. output += "RESPONSE DATA:\n{0}".format(json.dumps(api_response.cgx_content, indent=4)) except (TypeError, ValueError, AttributeError): # look for standard response data. output += "RESPONSE DATA:\n{0}".format(json.dumps(json.loads(api_response.content), indent=4)) except (TypeError, ValueError, AttributeError, UnicodeDecodeError): # cgx_content did not exist, or was not JSON serializable. Try pretty output the base obj. try: output = json.dumps(api_response, indent=4) except (TypeError, ValueError, AttributeError): # Same issue, just raw output the passed data. Let any exceptions happen here. output = api_response return output
python
def jdout_detailed(api_response, sensitive=False): """ JD Output Detailed function. Meant for quick DETAILED pretty-printing of CloudGenix Request and Response objects for troubleshooting. This function returns a string instead of directly printing content. **Parameters:** - **api_response:** A CloudGenix-attribute extended `requests.Response` object - **sensitive:** Boolean, if True will print sensitive content (specifically, authentication cookies/headers). **Returns:** Pretty-formatted text of the Request, Request Headers, Request body, Response, Response Headers, and Response Body. """ try: # try to be super verbose. output = "REQUEST: {0} {1}\n".format(api_response.request.method, api_response.request.path_url) output += "REQUEST HEADERS:\n" for key, value in api_response.request.headers.items(): # look for sensitive values if key.lower() in ['cookie'] and not sensitive: # we need to do some work to watch for the AUTH_TOKEN cookie. Split on cookie separator cookie_list = value.split('; ') muted_cookie_list = [] for cookie in cookie_list: # check if cookie starts with a permutation of AUTH_TOKEN/whitespace. if cookie.lower().strip().startswith('auth_token='): # first 11 chars of cookie with whitespace removed + mute string. newcookie = cookie.strip()[:11] + "\"<SENSITIVE - NOT SHOWN BY DEFAULT>\"" muted_cookie_list.append(newcookie) else: muted_cookie_list.append(cookie) # got list of cookies, muted as needed. recombine. muted_value = "; ".join(muted_cookie_list) output += "\t{0}: {1}\n".format(key, muted_value) elif key.lower() in ['x-auth-token'] and not sensitive: output += "\t{0}: {1}\n".format(key, "<SENSITIVE - NOT SHOWN BY DEFAULT>") else: output += "\t{0}: {1}\n".format(key, value) # if body not present, output blank. if not api_response.request.body: output += "REQUEST BODY:\n{0}\n\n".format({}) else: try: # Attempt to load JSON from string to make it look beter. output += "REQUEST BODY:\n{0}\n\n".format(json.dumps(json.loads(api_response.request.body), indent=4)) except (TypeError, ValueError, AttributeError): # if pretty call above didn't work, just toss it to jdout to best effort it. output += "REQUEST BODY:\n{0}\n\n".format(jdout(api_response.request.body)) output += "RESPONSE: {0} {1}\n".format(api_response.status_code, api_response.reason) output += "RESPONSE HEADERS:\n" for key, value in api_response.headers.items(): output += "\t{0}: {1}\n".format(key, value) try: # look for CGX content first. output += "RESPONSE DATA:\n{0}".format(json.dumps(api_response.cgx_content, indent=4)) except (TypeError, ValueError, AttributeError): # look for standard response data. output += "RESPONSE DATA:\n{0}".format(json.dumps(json.loads(api_response.content), indent=4)) except (TypeError, ValueError, AttributeError, UnicodeDecodeError): # cgx_content did not exist, or was not JSON serializable. Try pretty output the base obj. try: output = json.dumps(api_response, indent=4) except (TypeError, ValueError, AttributeError): # Same issue, just raw output the passed data. Let any exceptions happen here. output = api_response return output
[ "def", "jdout_detailed", "(", "api_response", ",", "sensitive", "=", "False", ")", ":", "try", ":", "# try to be super verbose.", "output", "=", "\"REQUEST: {0} {1}\\n\"", ".", "format", "(", "api_response", ".", "request", ".", "method", ",", "api_response", ".", "request", ".", "path_url", ")", "output", "+=", "\"REQUEST HEADERS:\\n\"", "for", "key", ",", "value", "in", "api_response", ".", "request", ".", "headers", ".", "items", "(", ")", ":", "# look for sensitive values", "if", "key", ".", "lower", "(", ")", "in", "[", "'cookie'", "]", "and", "not", "sensitive", ":", "# we need to do some work to watch for the AUTH_TOKEN cookie. Split on cookie separator", "cookie_list", "=", "value", ".", "split", "(", "'; '", ")", "muted_cookie_list", "=", "[", "]", "for", "cookie", "in", "cookie_list", ":", "# check if cookie starts with a permutation of AUTH_TOKEN/whitespace.", "if", "cookie", ".", "lower", "(", ")", ".", "strip", "(", ")", ".", "startswith", "(", "'auth_token='", ")", ":", "# first 11 chars of cookie with whitespace removed + mute string.", "newcookie", "=", "cookie", ".", "strip", "(", ")", "[", ":", "11", "]", "+", "\"\\\"<SENSITIVE - NOT SHOWN BY DEFAULT>\\\"\"", "muted_cookie_list", ".", "append", "(", "newcookie", ")", "else", ":", "muted_cookie_list", ".", "append", "(", "cookie", ")", "# got list of cookies, muted as needed. recombine.", "muted_value", "=", "\"; \"", ".", "join", "(", "muted_cookie_list", ")", "output", "+=", "\"\\t{0}: {1}\\n\"", ".", "format", "(", "key", ",", "muted_value", ")", "elif", "key", ".", "lower", "(", ")", "in", "[", "'x-auth-token'", "]", "and", "not", "sensitive", ":", "output", "+=", "\"\\t{0}: {1}\\n\"", ".", "format", "(", "key", ",", "\"<SENSITIVE - NOT SHOWN BY DEFAULT>\"", ")", "else", ":", "output", "+=", "\"\\t{0}: {1}\\n\"", ".", "format", "(", "key", ",", "value", ")", "# if body not present, output blank.", "if", "not", "api_response", ".", "request", ".", "body", ":", "output", "+=", "\"REQUEST BODY:\\n{0}\\n\\n\"", ".", "format", "(", "{", "}", ")", "else", ":", "try", ":", "# Attempt to load JSON from string to make it look beter.", "output", "+=", "\"REQUEST BODY:\\n{0}\\n\\n\"", ".", "format", "(", "json", ".", "dumps", "(", "json", ".", "loads", "(", "api_response", ".", "request", ".", "body", ")", ",", "indent", "=", "4", ")", ")", "except", "(", "TypeError", ",", "ValueError", ",", "AttributeError", ")", ":", "# if pretty call above didn't work, just toss it to jdout to best effort it.", "output", "+=", "\"REQUEST BODY:\\n{0}\\n\\n\"", ".", "format", "(", "jdout", "(", "api_response", ".", "request", ".", "body", ")", ")", "output", "+=", "\"RESPONSE: {0} {1}\\n\"", ".", "format", "(", "api_response", ".", "status_code", ",", "api_response", ".", "reason", ")", "output", "+=", "\"RESPONSE HEADERS:\\n\"", "for", "key", ",", "value", "in", "api_response", ".", "headers", ".", "items", "(", ")", ":", "output", "+=", "\"\\t{0}: {1}\\n\"", ".", "format", "(", "key", ",", "value", ")", "try", ":", "# look for CGX content first.", "output", "+=", "\"RESPONSE DATA:\\n{0}\"", ".", "format", "(", "json", ".", "dumps", "(", "api_response", ".", "cgx_content", ",", "indent", "=", "4", ")", ")", "except", "(", "TypeError", ",", "ValueError", ",", "AttributeError", ")", ":", "# look for standard response data.", "output", "+=", "\"RESPONSE DATA:\\n{0}\"", ".", "format", "(", "json", ".", "dumps", "(", "json", ".", "loads", "(", "api_response", ".", "content", ")", ",", "indent", "=", "4", ")", ")", "except", "(", "TypeError", ",", "ValueError", ",", "AttributeError", ",", "UnicodeDecodeError", ")", ":", "# cgx_content did not exist, or was not JSON serializable. Try pretty output the base obj.", "try", ":", "output", "=", "json", ".", "dumps", "(", "api_response", ",", "indent", "=", "4", ")", "except", "(", "TypeError", ",", "ValueError", ",", "AttributeError", ")", ":", "# Same issue, just raw output the passed data. Let any exceptions happen here.", "output", "=", "api_response", "return", "output" ]
JD Output Detailed function. Meant for quick DETAILED pretty-printing of CloudGenix Request and Response objects for troubleshooting. This function returns a string instead of directly printing content. **Parameters:** - **api_response:** A CloudGenix-attribute extended `requests.Response` object - **sensitive:** Boolean, if True will print sensitive content (specifically, authentication cookies/headers). **Returns:** Pretty-formatted text of the Request, Request Headers, Request body, Response, Response Headers, and Response Body.
[ "JD", "Output", "Detailed", "function", ".", "Meant", "for", "quick", "DETAILED", "pretty", "-", "printing", "of", "CloudGenix", "Request", "and", "Response", "objects", "for", "troubleshooting", ".", "This", "function", "returns", "a", "string", "instead", "of", "directly", "printing", "content", "." ]
train
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L201-L266
CloudGenix/sdk-python
cloudgenix/__init__.py
API.notify_for_new_version
def notify_for_new_version(self): """ Check for a new version of the SDK on API constructor instantiation. If new version found, print Notification to STDERR. On failure of this check, fail silently. **Returns:** No item returned, directly prints notification to `sys.stderr`. """ # broad exception clause, if this fails for any reason just return. try: recommend_update = False update_check_resp = requests.get(self.update_info_url, timeout=3) web_version = update_check_resp.json()["info"]["version"] api_logger.debug("RETRIEVED_VERSION: %s", web_version) available_version = SDK_BUILD_REGEX.search(web_version).groupdict() current_version = SDK_BUILD_REGEX.search(self.version).groupdict() available_major = available_version.get('major') available_minor = available_version.get('minor') available_patch = available_version.get('patch') available_build = available_version.get('build') current_major = current_version.get('major') current_minor = current_version.get('minor') current_patch = current_version.get('patch') current_build = current_version.get('build') api_logger.debug("AVAILABLE_VERSION: %s", available_version) api_logger.debug("CURRENT_VERSION: %s", current_version) # check for major/minor version differences, do not alert for build differences. if available_major > current_major: recommend_update = True elif available_major >= current_major and available_minor > current_minor: recommend_update = True elif available_major >= current_major and available_minor >= current_minor and \ available_patch > current_patch: recommend_update = True api_logger.debug("NEED_UPDATE: %s", recommend_update) # notify. if recommend_update: sys.stderr.write("WARNING: CloudGenix Python SDK upgrade available. SDKs are typically deprecated 6 " "months after release of a new version.\n" "\tLatest Version: {0}\n" "\tCurrent Version: {1}\n" "\tFor more info, see 'https://github.com/cloudgenix/sdk-python'. Additionally, this " "message can be suppressed by instantiating the API with API(update_check=False).\n\n" "".format(web_version, self.version)) return except Exception: # just return and continue. return
python
def notify_for_new_version(self): """ Check for a new version of the SDK on API constructor instantiation. If new version found, print Notification to STDERR. On failure of this check, fail silently. **Returns:** No item returned, directly prints notification to `sys.stderr`. """ # broad exception clause, if this fails for any reason just return. try: recommend_update = False update_check_resp = requests.get(self.update_info_url, timeout=3) web_version = update_check_resp.json()["info"]["version"] api_logger.debug("RETRIEVED_VERSION: %s", web_version) available_version = SDK_BUILD_REGEX.search(web_version).groupdict() current_version = SDK_BUILD_REGEX.search(self.version).groupdict() available_major = available_version.get('major') available_minor = available_version.get('minor') available_patch = available_version.get('patch') available_build = available_version.get('build') current_major = current_version.get('major') current_minor = current_version.get('minor') current_patch = current_version.get('patch') current_build = current_version.get('build') api_logger.debug("AVAILABLE_VERSION: %s", available_version) api_logger.debug("CURRENT_VERSION: %s", current_version) # check for major/minor version differences, do not alert for build differences. if available_major > current_major: recommend_update = True elif available_major >= current_major and available_minor > current_minor: recommend_update = True elif available_major >= current_major and available_minor >= current_minor and \ available_patch > current_patch: recommend_update = True api_logger.debug("NEED_UPDATE: %s", recommend_update) # notify. if recommend_update: sys.stderr.write("WARNING: CloudGenix Python SDK upgrade available. SDKs are typically deprecated 6 " "months after release of a new version.\n" "\tLatest Version: {0}\n" "\tCurrent Version: {1}\n" "\tFor more info, see 'https://github.com/cloudgenix/sdk-python'. Additionally, this " "message can be suppressed by instantiating the API with API(update_check=False).\n\n" "".format(web_version, self.version)) return except Exception: # just return and continue. return
[ "def", "notify_for_new_version", "(", "self", ")", ":", "# broad exception clause, if this fails for any reason just return.", "try", ":", "recommend_update", "=", "False", "update_check_resp", "=", "requests", ".", "get", "(", "self", ".", "update_info_url", ",", "timeout", "=", "3", ")", "web_version", "=", "update_check_resp", ".", "json", "(", ")", "[", "\"info\"", "]", "[", "\"version\"", "]", "api_logger", ".", "debug", "(", "\"RETRIEVED_VERSION: %s\"", ",", "web_version", ")", "available_version", "=", "SDK_BUILD_REGEX", ".", "search", "(", "web_version", ")", ".", "groupdict", "(", ")", "current_version", "=", "SDK_BUILD_REGEX", ".", "search", "(", "self", ".", "version", ")", ".", "groupdict", "(", ")", "available_major", "=", "available_version", ".", "get", "(", "'major'", ")", "available_minor", "=", "available_version", ".", "get", "(", "'minor'", ")", "available_patch", "=", "available_version", ".", "get", "(", "'patch'", ")", "available_build", "=", "available_version", ".", "get", "(", "'build'", ")", "current_major", "=", "current_version", ".", "get", "(", "'major'", ")", "current_minor", "=", "current_version", ".", "get", "(", "'minor'", ")", "current_patch", "=", "current_version", ".", "get", "(", "'patch'", ")", "current_build", "=", "current_version", ".", "get", "(", "'build'", ")", "api_logger", ".", "debug", "(", "\"AVAILABLE_VERSION: %s\"", ",", "available_version", ")", "api_logger", ".", "debug", "(", "\"CURRENT_VERSION: %s\"", ",", "current_version", ")", "# check for major/minor version differences, do not alert for build differences.", "if", "available_major", ">", "current_major", ":", "recommend_update", "=", "True", "elif", "available_major", ">=", "current_major", "and", "available_minor", ">", "current_minor", ":", "recommend_update", "=", "True", "elif", "available_major", ">=", "current_major", "and", "available_minor", ">=", "current_minor", "and", "available_patch", ">", "current_patch", ":", "recommend_update", "=", "True", "api_logger", ".", "debug", "(", "\"NEED_UPDATE: %s\"", ",", "recommend_update", ")", "# notify.", "if", "recommend_update", ":", "sys", ".", "stderr", ".", "write", "(", "\"WARNING: CloudGenix Python SDK upgrade available. SDKs are typically deprecated 6 \"", "\"months after release of a new version.\\n\"", "\"\\tLatest Version: {0}\\n\"", "\"\\tCurrent Version: {1}\\n\"", "\"\\tFor more info, see 'https://github.com/cloudgenix/sdk-python'. Additionally, this \"", "\"message can be suppressed by instantiating the API with API(update_check=False).\\n\\n\"", "\"\"", ".", "format", "(", "web_version", ",", "self", ".", "version", ")", ")", "return", "except", "Exception", ":", "# just return and continue.", "return" ]
Check for a new version of the SDK on API constructor instantiation. If new version found, print Notification to STDERR. On failure of this check, fail silently. **Returns:** No item returned, directly prints notification to `sys.stderr`.
[ "Check", "for", "a", "new", "version", "of", "the", "SDK", "on", "API", "constructor", "instantiation", ".", "If", "new", "version", "found", "print", "Notification", "to", "STDERR", "." ]
train
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L446-L503
CloudGenix/sdk-python
cloudgenix/__init__.py
API.ssl_verify
def ssl_verify(self, ssl_verify): """ Modify ssl verification settings **Parameters:** - ssl_verify: - True: Verify using builtin BYTE_CA_BUNDLE. - False: No SSL Verification. - Str: Full path to a x509 PEM CA File or bundle. **Returns:** Mutates API object in place, no return. """ self.verify = ssl_verify # if verify true/false, set ca_verify_file appropriately if isinstance(self.verify, bool): if self.verify: # True if os.name == 'nt': # Windows does not allow tmpfile access w/out close. Close file then delete it when done. self._ca_verify_file_handle = temp_ca_bundle(delete=False) self._ca_verify_file_handle.write(BYTE_CA_BUNDLE) self._ca_verify_file_handle.flush() self.ca_verify_filename = self._ca_verify_file_handle.name self._ca_verify_file_handle.close() # Other (POSIX/Unix/Linux/OSX) else: self._ca_verify_file_handle = temp_ca_bundle() self._ca_verify_file_handle.write(BYTE_CA_BUNDLE) self._ca_verify_file_handle.flush() self.ca_verify_filename = self._ca_verify_file_handle.name # register cleanup function for temp file. atexit.register(self._cleanup_ca_temp_file) else: # False # disable warnings for SSL certs. urllib3.disable_warnings() self.ca_verify_filename = False else: # Not True/False, assume path to file/dir for Requests self.ca_verify_filename = self.verify return
python
def ssl_verify(self, ssl_verify): """ Modify ssl verification settings **Parameters:** - ssl_verify: - True: Verify using builtin BYTE_CA_BUNDLE. - False: No SSL Verification. - Str: Full path to a x509 PEM CA File or bundle. **Returns:** Mutates API object in place, no return. """ self.verify = ssl_verify # if verify true/false, set ca_verify_file appropriately if isinstance(self.verify, bool): if self.verify: # True if os.name == 'nt': # Windows does not allow tmpfile access w/out close. Close file then delete it when done. self._ca_verify_file_handle = temp_ca_bundle(delete=False) self._ca_verify_file_handle.write(BYTE_CA_BUNDLE) self._ca_verify_file_handle.flush() self.ca_verify_filename = self._ca_verify_file_handle.name self._ca_verify_file_handle.close() # Other (POSIX/Unix/Linux/OSX) else: self._ca_verify_file_handle = temp_ca_bundle() self._ca_verify_file_handle.write(BYTE_CA_BUNDLE) self._ca_verify_file_handle.flush() self.ca_verify_filename = self._ca_verify_file_handle.name # register cleanup function for temp file. atexit.register(self._cleanup_ca_temp_file) else: # False # disable warnings for SSL certs. urllib3.disable_warnings() self.ca_verify_filename = False else: # Not True/False, assume path to file/dir for Requests self.ca_verify_filename = self.verify return
[ "def", "ssl_verify", "(", "self", ",", "ssl_verify", ")", ":", "self", ".", "verify", "=", "ssl_verify", "# if verify true/false, set ca_verify_file appropriately", "if", "isinstance", "(", "self", ".", "verify", ",", "bool", ")", ":", "if", "self", ".", "verify", ":", "# True", "if", "os", ".", "name", "==", "'nt'", ":", "# Windows does not allow tmpfile access w/out close. Close file then delete it when done.", "self", ".", "_ca_verify_file_handle", "=", "temp_ca_bundle", "(", "delete", "=", "False", ")", "self", ".", "_ca_verify_file_handle", ".", "write", "(", "BYTE_CA_BUNDLE", ")", "self", ".", "_ca_verify_file_handle", ".", "flush", "(", ")", "self", ".", "ca_verify_filename", "=", "self", ".", "_ca_verify_file_handle", ".", "name", "self", ".", "_ca_verify_file_handle", ".", "close", "(", ")", "# Other (POSIX/Unix/Linux/OSX)", "else", ":", "self", ".", "_ca_verify_file_handle", "=", "temp_ca_bundle", "(", ")", "self", ".", "_ca_verify_file_handle", ".", "write", "(", "BYTE_CA_BUNDLE", ")", "self", ".", "_ca_verify_file_handle", ".", "flush", "(", ")", "self", ".", "ca_verify_filename", "=", "self", ".", "_ca_verify_file_handle", ".", "name", "# register cleanup function for temp file.", "atexit", ".", "register", "(", "self", ".", "_cleanup_ca_temp_file", ")", "else", ":", "# False", "# disable warnings for SSL certs.", "urllib3", ".", "disable_warnings", "(", ")", "self", ".", "ca_verify_filename", "=", "False", "else", ":", "# Not True/False, assume path to file/dir for Requests", "self", ".", "ca_verify_filename", "=", "self", ".", "verify", "return" ]
Modify ssl verification settings **Parameters:** - ssl_verify: - True: Verify using builtin BYTE_CA_BUNDLE. - False: No SSL Verification. - Str: Full path to a x509 PEM CA File or bundle. **Returns:** Mutates API object in place, no return.
[ "Modify", "ssl", "verification", "settings" ]
train
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L505-L546
CloudGenix/sdk-python
cloudgenix/__init__.py
API.modify_rest_retry
def modify_rest_retry(self, total=8, connect=None, read=None, redirect=None, status=None, method_whitelist=urllib3.util.retry.Retry.DEFAULT_METHOD_WHITELIST, status_forcelist=None, backoff_factor=0.705883, raise_on_redirect=True, raise_on_status=True, respect_retry_after_header=True, adapter_url="https://"): """ Modify retry parameters for the SDK's rest call object. Parameters are directly from and passed directly to `urllib3.util.retry.Retry`, and get applied directly to the underlying `requests.Session` object. Default retry with total=8 and backoff_factor=0.705883: - Try 1, 0 delay (0 total seconds) - Try 2, 0 delay (0 total seconds) - Try 3, 0.705883 delay (0.705883 total seconds) - Try 4, 1.411766 delay (2.117649 total seconds) - Try 5, 2.823532 delay (4.941181 total seconds) - Try 6, 5.647064 delay (10.588245 total seconds) - Try 7, 11.294128 delay (21.882373 total seconds) - Try 8, 22.588256 delay (44.470629 total seconds) - Try 9, 45.176512 delay (89.647141 total seconds) - Try 10, 90.353024 delay (180.000165 total seconds) **Parameters:** - **total:** int, Total number of retries to allow. Takes precedence over other counts. - **connect:** int, How many connection-related errors to retry on. - **read:** int, How many times to retry on read errors. - **redirect:** int, How many redirects to perform. loops. - **status:** int, How many times to retry on bad status codes. - **method_whitelist:** iterable, Set of uppercased HTTP method verbs that we should retry on. - **status_forcelist:** iterable, A set of integer HTTP status codes that we should force a retry on. - **backoff_factor:** float, A backoff factor to apply between attempts after the second try. - **raise_on_redirect:** bool, True = raise a MaxRetryError, False = return latest 3xx response. - **raise_on_status:** bool, Similar logic to ``raise_on_redirect`` but for status responses. - **respect_retry_after_header:** bool, Whether to respect Retry-After header on status codes. - **adapter_url:** string, URL match for these retry values (default `https://`) **Returns:** No return, mutates the session directly """ # Cloudgenix responses with 502/504 are usually recoverable. Use them if no list specified. if status_forcelist is None: status_forcelist = (413, 429, 502, 503, 504) retry = urllib3.util.retry.Retry(total=total, connect=connect, read=read, redirect=redirect, status=status, method_whitelist=method_whitelist, status_forcelist=status_forcelist, backoff_factor=backoff_factor, raise_on_redirect=raise_on_redirect, raise_on_status=raise_on_status, respect_retry_after_header=respect_retry_after_header) adapter = requests.adapters.HTTPAdapter(max_retries=retry) self._session.mount(adapter_url, adapter) return
python
def modify_rest_retry(self, total=8, connect=None, read=None, redirect=None, status=None, method_whitelist=urllib3.util.retry.Retry.DEFAULT_METHOD_WHITELIST, status_forcelist=None, backoff_factor=0.705883, raise_on_redirect=True, raise_on_status=True, respect_retry_after_header=True, adapter_url="https://"): """ Modify retry parameters for the SDK's rest call object. Parameters are directly from and passed directly to `urllib3.util.retry.Retry`, and get applied directly to the underlying `requests.Session` object. Default retry with total=8 and backoff_factor=0.705883: - Try 1, 0 delay (0 total seconds) - Try 2, 0 delay (0 total seconds) - Try 3, 0.705883 delay (0.705883 total seconds) - Try 4, 1.411766 delay (2.117649 total seconds) - Try 5, 2.823532 delay (4.941181 total seconds) - Try 6, 5.647064 delay (10.588245 total seconds) - Try 7, 11.294128 delay (21.882373 total seconds) - Try 8, 22.588256 delay (44.470629 total seconds) - Try 9, 45.176512 delay (89.647141 total seconds) - Try 10, 90.353024 delay (180.000165 total seconds) **Parameters:** - **total:** int, Total number of retries to allow. Takes precedence over other counts. - **connect:** int, How many connection-related errors to retry on. - **read:** int, How many times to retry on read errors. - **redirect:** int, How many redirects to perform. loops. - **status:** int, How many times to retry on bad status codes. - **method_whitelist:** iterable, Set of uppercased HTTP method verbs that we should retry on. - **status_forcelist:** iterable, A set of integer HTTP status codes that we should force a retry on. - **backoff_factor:** float, A backoff factor to apply between attempts after the second try. - **raise_on_redirect:** bool, True = raise a MaxRetryError, False = return latest 3xx response. - **raise_on_status:** bool, Similar logic to ``raise_on_redirect`` but for status responses. - **respect_retry_after_header:** bool, Whether to respect Retry-After header on status codes. - **adapter_url:** string, URL match for these retry values (default `https://`) **Returns:** No return, mutates the session directly """ # Cloudgenix responses with 502/504 are usually recoverable. Use them if no list specified. if status_forcelist is None: status_forcelist = (413, 429, 502, 503, 504) retry = urllib3.util.retry.Retry(total=total, connect=connect, read=read, redirect=redirect, status=status, method_whitelist=method_whitelist, status_forcelist=status_forcelist, backoff_factor=backoff_factor, raise_on_redirect=raise_on_redirect, raise_on_status=raise_on_status, respect_retry_after_header=respect_retry_after_header) adapter = requests.adapters.HTTPAdapter(max_retries=retry) self._session.mount(adapter_url, adapter) return
[ "def", "modify_rest_retry", "(", "self", ",", "total", "=", "8", ",", "connect", "=", "None", ",", "read", "=", "None", ",", "redirect", "=", "None", ",", "status", "=", "None", ",", "method_whitelist", "=", "urllib3", ".", "util", ".", "retry", ".", "Retry", ".", "DEFAULT_METHOD_WHITELIST", ",", "status_forcelist", "=", "None", ",", "backoff_factor", "=", "0.705883", ",", "raise_on_redirect", "=", "True", ",", "raise_on_status", "=", "True", ",", "respect_retry_after_header", "=", "True", ",", "adapter_url", "=", "\"https://\"", ")", ":", "# Cloudgenix responses with 502/504 are usually recoverable. Use them if no list specified.", "if", "status_forcelist", "is", "None", ":", "status_forcelist", "=", "(", "413", ",", "429", ",", "502", ",", "503", ",", "504", ")", "retry", "=", "urllib3", ".", "util", ".", "retry", ".", "Retry", "(", "total", "=", "total", ",", "connect", "=", "connect", ",", "read", "=", "read", ",", "redirect", "=", "redirect", ",", "status", "=", "status", ",", "method_whitelist", "=", "method_whitelist", ",", "status_forcelist", "=", "status_forcelist", ",", "backoff_factor", "=", "backoff_factor", ",", "raise_on_redirect", "=", "raise_on_redirect", ",", "raise_on_status", "=", "raise_on_status", ",", "respect_retry_after_header", "=", "respect_retry_after_header", ")", "adapter", "=", "requests", ".", "adapters", ".", "HTTPAdapter", "(", "max_retries", "=", "retry", ")", "self", ".", "_session", ".", "mount", "(", "adapter_url", ",", "adapter", ")", "return" ]
Modify retry parameters for the SDK's rest call object. Parameters are directly from and passed directly to `urllib3.util.retry.Retry`, and get applied directly to the underlying `requests.Session` object. Default retry with total=8 and backoff_factor=0.705883: - Try 1, 0 delay (0 total seconds) - Try 2, 0 delay (0 total seconds) - Try 3, 0.705883 delay (0.705883 total seconds) - Try 4, 1.411766 delay (2.117649 total seconds) - Try 5, 2.823532 delay (4.941181 total seconds) - Try 6, 5.647064 delay (10.588245 total seconds) - Try 7, 11.294128 delay (21.882373 total seconds) - Try 8, 22.588256 delay (44.470629 total seconds) - Try 9, 45.176512 delay (89.647141 total seconds) - Try 10, 90.353024 delay (180.000165 total seconds) **Parameters:** - **total:** int, Total number of retries to allow. Takes precedence over other counts. - **connect:** int, How many connection-related errors to retry on. - **read:** int, How many times to retry on read errors. - **redirect:** int, How many redirects to perform. loops. - **status:** int, How many times to retry on bad status codes. - **method_whitelist:** iterable, Set of uppercased HTTP method verbs that we should retry on. - **status_forcelist:** iterable, A set of integer HTTP status codes that we should force a retry on. - **backoff_factor:** float, A backoff factor to apply between attempts after the second try. - **raise_on_redirect:** bool, True = raise a MaxRetryError, False = return latest 3xx response. - **raise_on_status:** bool, Similar logic to ``raise_on_redirect`` but for status responses. - **respect_retry_after_header:** bool, Whether to respect Retry-After header on status codes. - **adapter_url:** string, URL match for these retry values (default `https://`) **Returns:** No return, mutates the session directly
[ "Modify", "retry", "parameters", "for", "the", "SDK", "s", "rest", "call", "object", "." ]
train
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L548-L604
CloudGenix/sdk-python
cloudgenix/__init__.py
API.view_rest_retry
def view_rest_retry(self, url=None): """ View current rest retry settings in the `requests.Session()` object **Parameters:** - **url:** URL to use to determine retry methods for. Defaults to 'https://' **Returns:** Dict, Key header, value is header value. """ if url is None: url = "https://" return vars(self._session.get_adapter(url).max_retries)
python
def view_rest_retry(self, url=None): """ View current rest retry settings in the `requests.Session()` object **Parameters:** - **url:** URL to use to determine retry methods for. Defaults to 'https://' **Returns:** Dict, Key header, value is header value. """ if url is None: url = "https://" return vars(self._session.get_adapter(url).max_retries)
[ "def", "view_rest_retry", "(", "self", ",", "url", "=", "None", ")", ":", "if", "url", "is", "None", ":", "url", "=", "\"https://\"", "return", "vars", "(", "self", ".", "_session", ".", "get_adapter", "(", "url", ")", ".", "max_retries", ")" ]
View current rest retry settings in the `requests.Session()` object **Parameters:** - **url:** URL to use to determine retry methods for. Defaults to 'https://' **Returns:** Dict, Key header, value is header value.
[ "View", "current", "rest", "retry", "settings", "in", "the", "requests", ".", "Session", "()", "object" ]
train
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L606-L618
CloudGenix/sdk-python
cloudgenix/__init__.py
API.view_cookies
def view_cookies(self): """ View current cookies in the `requests.Session()` object **Returns:** List of Dicts, one cookie per Dict. """ return_list = [] for cookie in self._session.cookies: return_list.append(vars(cookie)) return return_list
python
def view_cookies(self): """ View current cookies in the `requests.Session()` object **Returns:** List of Dicts, one cookie per Dict. """ return_list = [] for cookie in self._session.cookies: return_list.append(vars(cookie)) return return_list
[ "def", "view_cookies", "(", "self", ")", ":", "return_list", "=", "[", "]", "for", "cookie", "in", "self", ".", "_session", ".", "cookies", ":", "return_list", ".", "append", "(", "vars", "(", "cookie", ")", ")", "return", "return_list" ]
View current cookies in the `requests.Session()` object **Returns:** List of Dicts, one cookie per Dict.
[ "View", "current", "cookies", "in", "the", "requests", ".", "Session", "()", "object" ]
train
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L662-L672
CloudGenix/sdk-python
cloudgenix/__init__.py
API.set_debug
def set_debug(self, debuglevel): """ Change the debug level of the API **Returns:** No item returned. """ if isinstance(debuglevel, int): self._debuglevel = debuglevel if self._debuglevel == 1: logging.basicConfig(level=logging.INFO, format="%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s") api_logger.setLevel(logging.INFO) elif self._debuglevel == 2: logging.basicConfig(level=logging.DEBUG, format="%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s") requests.cookies.cookielib.debug = True api_logger.setLevel(logging.DEBUG) elif self._debuglevel >= 3: logging.basicConfig(level=logging.DEBUG, format="%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s") requests.cookies.cookielib.debug = True api_logger.setLevel(logging.DEBUG) urllib3_logger = logging.getLogger("requests.packages.urllib3") urllib3_logger.setLevel(logging.DEBUG) urllib3_logger.propagate = True else: # Remove all handlers for handler in logging.root.handlers[:]: logging.root.removeHandler(handler) # set logging level to default requests.cookies.cookielib.debug = False api_logger.setLevel(logging.WARNING) return
python
def set_debug(self, debuglevel): """ Change the debug level of the API **Returns:** No item returned. """ if isinstance(debuglevel, int): self._debuglevel = debuglevel if self._debuglevel == 1: logging.basicConfig(level=logging.INFO, format="%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s") api_logger.setLevel(logging.INFO) elif self._debuglevel == 2: logging.basicConfig(level=logging.DEBUG, format="%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s") requests.cookies.cookielib.debug = True api_logger.setLevel(logging.DEBUG) elif self._debuglevel >= 3: logging.basicConfig(level=logging.DEBUG, format="%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s") requests.cookies.cookielib.debug = True api_logger.setLevel(logging.DEBUG) urllib3_logger = logging.getLogger("requests.packages.urllib3") urllib3_logger.setLevel(logging.DEBUG) urllib3_logger.propagate = True else: # Remove all handlers for handler in logging.root.handlers[:]: logging.root.removeHandler(handler) # set logging level to default requests.cookies.cookielib.debug = False api_logger.setLevel(logging.WARNING) return
[ "def", "set_debug", "(", "self", ",", "debuglevel", ")", ":", "if", "isinstance", "(", "debuglevel", ",", "int", ")", ":", "self", ".", "_debuglevel", "=", "debuglevel", "if", "self", ".", "_debuglevel", "==", "1", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ",", "format", "=", "\"%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s\"", ")", "api_logger", ".", "setLevel", "(", "logging", ".", "INFO", ")", "elif", "self", ".", "_debuglevel", "==", "2", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "DEBUG", ",", "format", "=", "\"%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s\"", ")", "requests", ".", "cookies", ".", "cookielib", ".", "debug", "=", "True", "api_logger", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "elif", "self", ".", "_debuglevel", ">=", "3", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "DEBUG", ",", "format", "=", "\"%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s\"", ")", "requests", ".", "cookies", ".", "cookielib", ".", "debug", "=", "True", "api_logger", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "urllib3_logger", "=", "logging", ".", "getLogger", "(", "\"requests.packages.urllib3\"", ")", "urllib3_logger", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "urllib3_logger", ".", "propagate", "=", "True", "else", ":", "# Remove all handlers", "for", "handler", "in", "logging", ".", "root", ".", "handlers", "[", ":", "]", ":", "logging", ".", "root", ".", "removeHandler", "(", "handler", ")", "# set logging level to default", "requests", ".", "cookies", ".", "cookielib", ".", "debug", "=", "False", "api_logger", ".", "setLevel", "(", "logging", ".", "WARNING", ")", "return" ]
Change the debug level of the API **Returns:** No item returned.
[ "Change", "the", "debug", "level", "of", "the", "API" ]
train
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L674-L708
CloudGenix/sdk-python
cloudgenix/__init__.py
API._subclass_container
def _subclass_container(self): """ Call subclasses via function to allow passing parent namespace to subclasses. **Returns:** dict with subclass references. """ _parent_class = self class GetWrapper(Get): def __init__(self): self._parent_class = _parent_class class PostWrapper(Post): def __init__(self): self._parent_class = _parent_class class PutWrapper(Put): def __init__(self): self._parent_class = _parent_class class PatchWrapper(Patch): def __init__(self): self._parent_class = _parent_class class DeleteWrapper(Delete): def __init__(self): self._parent_class = _parent_class class InteractiveWrapper(Interactive): def __init__(self): self._parent_class = _parent_class return {"get": GetWrapper, "post": PostWrapper, "put": PutWrapper, "patch": PatchWrapper, "delete": DeleteWrapper, "interactive": InteractiveWrapper}
python
def _subclass_container(self): """ Call subclasses via function to allow passing parent namespace to subclasses. **Returns:** dict with subclass references. """ _parent_class = self class GetWrapper(Get): def __init__(self): self._parent_class = _parent_class class PostWrapper(Post): def __init__(self): self._parent_class = _parent_class class PutWrapper(Put): def __init__(self): self._parent_class = _parent_class class PatchWrapper(Patch): def __init__(self): self._parent_class = _parent_class class DeleteWrapper(Delete): def __init__(self): self._parent_class = _parent_class class InteractiveWrapper(Interactive): def __init__(self): self._parent_class = _parent_class return {"get": GetWrapper, "post": PostWrapper, "put": PutWrapper, "patch": PatchWrapper, "delete": DeleteWrapper, "interactive": InteractiveWrapper}
[ "def", "_subclass_container", "(", "self", ")", ":", "_parent_class", "=", "self", "class", "GetWrapper", "(", "Get", ")", ":", "def", "__init__", "(", "self", ")", ":", "self", ".", "_parent_class", "=", "_parent_class", "class", "PostWrapper", "(", "Post", ")", ":", "def", "__init__", "(", "self", ")", ":", "self", ".", "_parent_class", "=", "_parent_class", "class", "PutWrapper", "(", "Put", ")", ":", "def", "__init__", "(", "self", ")", ":", "self", ".", "_parent_class", "=", "_parent_class", "class", "PatchWrapper", "(", "Patch", ")", ":", "def", "__init__", "(", "self", ")", ":", "self", ".", "_parent_class", "=", "_parent_class", "class", "DeleteWrapper", "(", "Delete", ")", ":", "def", "__init__", "(", "self", ")", ":", "self", ".", "_parent_class", "=", "_parent_class", "class", "InteractiveWrapper", "(", "Interactive", ")", ":", "def", "__init__", "(", "self", ")", ":", "self", ".", "_parent_class", "=", "_parent_class", "return", "{", "\"get\"", ":", "GetWrapper", ",", "\"post\"", ":", "PostWrapper", ",", "\"put\"", ":", "PutWrapper", ",", "\"patch\"", ":", "PatchWrapper", ",", "\"delete\"", ":", "DeleteWrapper", ",", "\"interactive\"", ":", "InteractiveWrapper", "}" ]
Call subclasses via function to allow passing parent namespace to subclasses. **Returns:** dict with subclass references.
[ "Call", "subclasses", "via", "function", "to", "allow", "passing", "parent", "namespace", "to", "subclasses", "." ]
train
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L710-L753
CloudGenix/sdk-python
cloudgenix/__init__.py
API.rest_call
def rest_call(self, url, method, data=None, sensitive=False, timeout=None, content_json=True, retry=None, max_retry=None, retry_sleep=None): """ Generic REST call worker function **Parameters:** - **url:** URL for the REST call - **method:** METHOD for the REST call - **data:** Optional DATA for the call (for POST/PUT/etc.) - **sensitive:** Flag if content request/response should be hidden from logging functions - **timeout:** Requests Timeout - **content_json:** Bool on whether the Content-Type header should be set to application/json - **retry:** DEPRECATED - please use `cloudgenix.API.modify_rest_retry` instead. - **max_retry:** DEPRECATED - please use `cloudgenix.API.modify_rest_retry` instead. - **retry_sleep:** DEPRECATED - please use `cloudgenix.API.modify_rest_retry` instead. **Returns:** Requests.Response object, extended with: - **cgx_status**: Bool, True if a successful CloudGenix response, False if error. - **cgx_content**: Content of the response, guaranteed to be in Dict format. Empty/invalid responses will be converted to a Dict response. """ # pull retry related items from Constructor if not specified. if timeout is None: timeout = self.rest_call_timeout if retry is not None: # Someone using deprecated retry code. Notify. sys.stderr.write("WARNING: 'retry' option of rest_call() has been deprecated. " "Please use 'API.modify_rest_retry()' instead.") if max_retry is not None: # Someone using deprecated retry code. Notify. sys.stderr.write("WARNING: 'max_retry' option of rest_call() has been deprecated. " "Please use 'API.modify_rest_retry()' instead.") if retry_sleep is not None: # Someone using deprecated retry code. Notify. sys.stderr.write("WARNING: 'max_retry' option of rest_call() has been deprecated. " "Please use 'API.modify_rest_retry()' instead.") # Get logging level, use this to bypass logging functions with possible large content if not set. logger_level = api_logger.getEffectiveLevel() # populate headers and cookies from session. if content_json and method.lower() not in ['get', 'delete']: headers = { 'Content-Type': 'application/json' } else: headers = {} # add session headers headers.update(self._session.headers) cookie = self._session.cookies.get_dict() # make sure data is populated if present. if isinstance(data, (list, dict)): data = json.dumps(data) api_logger.debug('REST_CALL URL = %s', url) # make request try: if not sensitive: api_logger.debug('\n\tREQUEST: %s %s\n\tHEADERS: %s\n\tCOOKIES: %s\n\tDATA: %s\n', method.upper(), url, headers, cookie, data) # Actual request response = self._session.request(method, url, data=data, verify=self.ca_verify_filename, stream=True, timeout=timeout, headers=headers, allow_redirects=False) # Request complete - lets parse. # if it's a non-CGX-good response, return with cgx_status = False if response.status_code not in [requests.codes.ok, requests.codes.no_content, requests.codes.found, requests.codes.moved]: # Simple JSON debug if not sensitive: try: api_logger.debug('RESPONSE HEADERS: %s\n', json.dumps( json.loads(text_type(response.headers)), indent=4)) except ValueError: api_logger.debug('RESPONSE HEADERS: %s\n', text_type(response.headers)) try: api_logger.debug('RESPONSE: %s\n', json.dumps(response.json(), indent=4)) except ValueError: api_logger.debug('RESPONSE: %s\n', text_type(response.text)) else: api_logger.debug('RESPONSE NOT LOGGED (sensitive content)') api_logger.debug("Error, non-200 response received: %s", response.status_code) # CGX extend requests.Response for return response.cgx_status = False response.cgx_content = self._catch_nonjson_streamresponse(response.text) return response else: # Simple JSON debug if not sensitive and (logger_level <= logging.DEBUG and logger_level != logging.NOTSET): try: api_logger.debug('RESPONSE HEADERS: %s\n', json.dumps( json.loads(text_type(response.headers)), indent=4)) api_logger.debug('RESPONSE: %s\n', json.dumps(response.json(), indent=4)) except ValueError: api_logger.debug('RESPONSE HEADERS: %s\n', text_type(response.headers)) api_logger.debug('RESPONSE: %s\n', text_type(response.text)) elif sensitive: api_logger.debug('RESPONSE NOT LOGGED (sensitive content)') # CGX extend requests.Response for return response.cgx_status = True response.cgx_content = self._catch_nonjson_streamresponse(response.text) return response except (requests.exceptions.Timeout, requests.exceptions.ConnectionError, urllib3.exceptions.MaxRetryError)\ as e: api_logger.info("Error, %s.", text_type(e)) # make a requests.Response object for return since we didn't get one. response = requests.Response # CGX extend requests.Response for return response.cgx_status = False response.cgx_content = { '_error': [ { 'message': 'REST Request Exception: {}'.format(e), 'data': {}, } ] } return response
python
def rest_call(self, url, method, data=None, sensitive=False, timeout=None, content_json=True, retry=None, max_retry=None, retry_sleep=None): """ Generic REST call worker function **Parameters:** - **url:** URL for the REST call - **method:** METHOD for the REST call - **data:** Optional DATA for the call (for POST/PUT/etc.) - **sensitive:** Flag if content request/response should be hidden from logging functions - **timeout:** Requests Timeout - **content_json:** Bool on whether the Content-Type header should be set to application/json - **retry:** DEPRECATED - please use `cloudgenix.API.modify_rest_retry` instead. - **max_retry:** DEPRECATED - please use `cloudgenix.API.modify_rest_retry` instead. - **retry_sleep:** DEPRECATED - please use `cloudgenix.API.modify_rest_retry` instead. **Returns:** Requests.Response object, extended with: - **cgx_status**: Bool, True if a successful CloudGenix response, False if error. - **cgx_content**: Content of the response, guaranteed to be in Dict format. Empty/invalid responses will be converted to a Dict response. """ # pull retry related items from Constructor if not specified. if timeout is None: timeout = self.rest_call_timeout if retry is not None: # Someone using deprecated retry code. Notify. sys.stderr.write("WARNING: 'retry' option of rest_call() has been deprecated. " "Please use 'API.modify_rest_retry()' instead.") if max_retry is not None: # Someone using deprecated retry code. Notify. sys.stderr.write("WARNING: 'max_retry' option of rest_call() has been deprecated. " "Please use 'API.modify_rest_retry()' instead.") if retry_sleep is not None: # Someone using deprecated retry code. Notify. sys.stderr.write("WARNING: 'max_retry' option of rest_call() has been deprecated. " "Please use 'API.modify_rest_retry()' instead.") # Get logging level, use this to bypass logging functions with possible large content if not set. logger_level = api_logger.getEffectiveLevel() # populate headers and cookies from session. if content_json and method.lower() not in ['get', 'delete']: headers = { 'Content-Type': 'application/json' } else: headers = {} # add session headers headers.update(self._session.headers) cookie = self._session.cookies.get_dict() # make sure data is populated if present. if isinstance(data, (list, dict)): data = json.dumps(data) api_logger.debug('REST_CALL URL = %s', url) # make request try: if not sensitive: api_logger.debug('\n\tREQUEST: %s %s\n\tHEADERS: %s\n\tCOOKIES: %s\n\tDATA: %s\n', method.upper(), url, headers, cookie, data) # Actual request response = self._session.request(method, url, data=data, verify=self.ca_verify_filename, stream=True, timeout=timeout, headers=headers, allow_redirects=False) # Request complete - lets parse. # if it's a non-CGX-good response, return with cgx_status = False if response.status_code not in [requests.codes.ok, requests.codes.no_content, requests.codes.found, requests.codes.moved]: # Simple JSON debug if not sensitive: try: api_logger.debug('RESPONSE HEADERS: %s\n', json.dumps( json.loads(text_type(response.headers)), indent=4)) except ValueError: api_logger.debug('RESPONSE HEADERS: %s\n', text_type(response.headers)) try: api_logger.debug('RESPONSE: %s\n', json.dumps(response.json(), indent=4)) except ValueError: api_logger.debug('RESPONSE: %s\n', text_type(response.text)) else: api_logger.debug('RESPONSE NOT LOGGED (sensitive content)') api_logger.debug("Error, non-200 response received: %s", response.status_code) # CGX extend requests.Response for return response.cgx_status = False response.cgx_content = self._catch_nonjson_streamresponse(response.text) return response else: # Simple JSON debug if not sensitive and (logger_level <= logging.DEBUG and logger_level != logging.NOTSET): try: api_logger.debug('RESPONSE HEADERS: %s\n', json.dumps( json.loads(text_type(response.headers)), indent=4)) api_logger.debug('RESPONSE: %s\n', json.dumps(response.json(), indent=4)) except ValueError: api_logger.debug('RESPONSE HEADERS: %s\n', text_type(response.headers)) api_logger.debug('RESPONSE: %s\n', text_type(response.text)) elif sensitive: api_logger.debug('RESPONSE NOT LOGGED (sensitive content)') # CGX extend requests.Response for return response.cgx_status = True response.cgx_content = self._catch_nonjson_streamresponse(response.text) return response except (requests.exceptions.Timeout, requests.exceptions.ConnectionError, urllib3.exceptions.MaxRetryError)\ as e: api_logger.info("Error, %s.", text_type(e)) # make a requests.Response object for return since we didn't get one. response = requests.Response # CGX extend requests.Response for return response.cgx_status = False response.cgx_content = { '_error': [ { 'message': 'REST Request Exception: {}'.format(e), 'data': {}, } ] } return response
[ "def", "rest_call", "(", "self", ",", "url", ",", "method", ",", "data", "=", "None", ",", "sensitive", "=", "False", ",", "timeout", "=", "None", ",", "content_json", "=", "True", ",", "retry", "=", "None", ",", "max_retry", "=", "None", ",", "retry_sleep", "=", "None", ")", ":", "# pull retry related items from Constructor if not specified.", "if", "timeout", "is", "None", ":", "timeout", "=", "self", ".", "rest_call_timeout", "if", "retry", "is", "not", "None", ":", "# Someone using deprecated retry code. Notify.", "sys", ".", "stderr", ".", "write", "(", "\"WARNING: 'retry' option of rest_call() has been deprecated. \"", "\"Please use 'API.modify_rest_retry()' instead.\"", ")", "if", "max_retry", "is", "not", "None", ":", "# Someone using deprecated retry code. Notify.", "sys", ".", "stderr", ".", "write", "(", "\"WARNING: 'max_retry' option of rest_call() has been deprecated. \"", "\"Please use 'API.modify_rest_retry()' instead.\"", ")", "if", "retry_sleep", "is", "not", "None", ":", "# Someone using deprecated retry code. Notify.", "sys", ".", "stderr", ".", "write", "(", "\"WARNING: 'max_retry' option of rest_call() has been deprecated. \"", "\"Please use 'API.modify_rest_retry()' instead.\"", ")", "# Get logging level, use this to bypass logging functions with possible large content if not set.", "logger_level", "=", "api_logger", ".", "getEffectiveLevel", "(", ")", "# populate headers and cookies from session.", "if", "content_json", "and", "method", ".", "lower", "(", ")", "not", "in", "[", "'get'", ",", "'delete'", "]", ":", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", "}", "else", ":", "headers", "=", "{", "}", "# add session headers", "headers", ".", "update", "(", "self", ".", "_session", ".", "headers", ")", "cookie", "=", "self", ".", "_session", ".", "cookies", ".", "get_dict", "(", ")", "# make sure data is populated if present.", "if", "isinstance", "(", "data", ",", "(", "list", ",", "dict", ")", ")", ":", "data", "=", "json", ".", "dumps", "(", "data", ")", "api_logger", ".", "debug", "(", "'REST_CALL URL = %s'", ",", "url", ")", "# make request", "try", ":", "if", "not", "sensitive", ":", "api_logger", ".", "debug", "(", "'\\n\\tREQUEST: %s %s\\n\\tHEADERS: %s\\n\\tCOOKIES: %s\\n\\tDATA: %s\\n'", ",", "method", ".", "upper", "(", ")", ",", "url", ",", "headers", ",", "cookie", ",", "data", ")", "# Actual request", "response", "=", "self", ".", "_session", ".", "request", "(", "method", ",", "url", ",", "data", "=", "data", ",", "verify", "=", "self", ".", "ca_verify_filename", ",", "stream", "=", "True", ",", "timeout", "=", "timeout", ",", "headers", "=", "headers", ",", "allow_redirects", "=", "False", ")", "# Request complete - lets parse.", "# if it's a non-CGX-good response, return with cgx_status = False", "if", "response", ".", "status_code", "not", "in", "[", "requests", ".", "codes", ".", "ok", ",", "requests", ".", "codes", ".", "no_content", ",", "requests", ".", "codes", ".", "found", ",", "requests", ".", "codes", ".", "moved", "]", ":", "# Simple JSON debug", "if", "not", "sensitive", ":", "try", ":", "api_logger", ".", "debug", "(", "'RESPONSE HEADERS: %s\\n'", ",", "json", ".", "dumps", "(", "json", ".", "loads", "(", "text_type", "(", "response", ".", "headers", ")", ")", ",", "indent", "=", "4", ")", ")", "except", "ValueError", ":", "api_logger", ".", "debug", "(", "'RESPONSE HEADERS: %s\\n'", ",", "text_type", "(", "response", ".", "headers", ")", ")", "try", ":", "api_logger", ".", "debug", "(", "'RESPONSE: %s\\n'", ",", "json", ".", "dumps", "(", "response", ".", "json", "(", ")", ",", "indent", "=", "4", ")", ")", "except", "ValueError", ":", "api_logger", ".", "debug", "(", "'RESPONSE: %s\\n'", ",", "text_type", "(", "response", ".", "text", ")", ")", "else", ":", "api_logger", ".", "debug", "(", "'RESPONSE NOT LOGGED (sensitive content)'", ")", "api_logger", ".", "debug", "(", "\"Error, non-200 response received: %s\"", ",", "response", ".", "status_code", ")", "# CGX extend requests.Response for return", "response", ".", "cgx_status", "=", "False", "response", ".", "cgx_content", "=", "self", ".", "_catch_nonjson_streamresponse", "(", "response", ".", "text", ")", "return", "response", "else", ":", "# Simple JSON debug", "if", "not", "sensitive", "and", "(", "logger_level", "<=", "logging", ".", "DEBUG", "and", "logger_level", "!=", "logging", ".", "NOTSET", ")", ":", "try", ":", "api_logger", ".", "debug", "(", "'RESPONSE HEADERS: %s\\n'", ",", "json", ".", "dumps", "(", "json", ".", "loads", "(", "text_type", "(", "response", ".", "headers", ")", ")", ",", "indent", "=", "4", ")", ")", "api_logger", ".", "debug", "(", "'RESPONSE: %s\\n'", ",", "json", ".", "dumps", "(", "response", ".", "json", "(", ")", ",", "indent", "=", "4", ")", ")", "except", "ValueError", ":", "api_logger", ".", "debug", "(", "'RESPONSE HEADERS: %s\\n'", ",", "text_type", "(", "response", ".", "headers", ")", ")", "api_logger", ".", "debug", "(", "'RESPONSE: %s\\n'", ",", "text_type", "(", "response", ".", "text", ")", ")", "elif", "sensitive", ":", "api_logger", ".", "debug", "(", "'RESPONSE NOT LOGGED (sensitive content)'", ")", "# CGX extend requests.Response for return", "response", ".", "cgx_status", "=", "True", "response", ".", "cgx_content", "=", "self", ".", "_catch_nonjson_streamresponse", "(", "response", ".", "text", ")", "return", "response", "except", "(", "requests", ".", "exceptions", ".", "Timeout", ",", "requests", ".", "exceptions", ".", "ConnectionError", ",", "urllib3", ".", "exceptions", ".", "MaxRetryError", ")", "as", "e", ":", "api_logger", ".", "info", "(", "\"Error, %s.\"", ",", "text_type", "(", "e", ")", ")", "# make a requests.Response object for return since we didn't get one.", "response", "=", "requests", ".", "Response", "# CGX extend requests.Response for return", "response", ".", "cgx_status", "=", "False", "response", ".", "cgx_content", "=", "{", "'_error'", ":", "[", "{", "'message'", ":", "'REST Request Exception: {}'", ".", "format", "(", "e", ")", ",", "'data'", ":", "{", "}", ",", "}", "]", "}", "return", "response" ]
Generic REST call worker function **Parameters:** - **url:** URL for the REST call - **method:** METHOD for the REST call - **data:** Optional DATA for the call (for POST/PUT/etc.) - **sensitive:** Flag if content request/response should be hidden from logging functions - **timeout:** Requests Timeout - **content_json:** Bool on whether the Content-Type header should be set to application/json - **retry:** DEPRECATED - please use `cloudgenix.API.modify_rest_retry` instead. - **max_retry:** DEPRECATED - please use `cloudgenix.API.modify_rest_retry` instead. - **retry_sleep:** DEPRECATED - please use `cloudgenix.API.modify_rest_retry` instead. **Returns:** Requests.Response object, extended with: - **cgx_status**: Bool, True if a successful CloudGenix response, False if error. - **cgx_content**: Content of the response, guaranteed to be in Dict format. Empty/invalid responses will be converted to a Dict response.
[ "Generic", "REST", "call", "worker", "function" ]
train
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L755-L890
CloudGenix/sdk-python
cloudgenix/__init__.py
API._cleanup_ca_temp_file
def _cleanup_ca_temp_file(self): """ Function to clean up ca temp file for requests. **Returns:** Removes TEMP ca file, no return """ if os.name == 'nt': if isinstance(self.ca_verify_filename, (binary_type, text_type)): # windows requires file to be closed for access. Have to manually remove os.unlink(self.ca_verify_filename) else: # other OS's allow close and delete of file. self._ca_verify_file_handle.close()
python
def _cleanup_ca_temp_file(self): """ Function to clean up ca temp file for requests. **Returns:** Removes TEMP ca file, no return """ if os.name == 'nt': if isinstance(self.ca_verify_filename, (binary_type, text_type)): # windows requires file to be closed for access. Have to manually remove os.unlink(self.ca_verify_filename) else: # other OS's allow close and delete of file. self._ca_verify_file_handle.close()
[ "def", "_cleanup_ca_temp_file", "(", "self", ")", ":", "if", "os", ".", "name", "==", "'nt'", ":", "if", "isinstance", "(", "self", ".", "ca_verify_filename", ",", "(", "binary_type", ",", "text_type", ")", ")", ":", "# windows requires file to be closed for access. Have to manually remove", "os", ".", "unlink", "(", "self", ".", "ca_verify_filename", ")", "else", ":", "# other OS's allow close and delete of file.", "self", ".", "_ca_verify_file_handle", ".", "close", "(", ")" ]
Function to clean up ca temp file for requests. **Returns:** Removes TEMP ca file, no return
[ "Function", "to", "clean", "up", "ca", "temp", "file", "for", "requests", "." ]
train
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L892-L904
CloudGenix/sdk-python
cloudgenix/__init__.py
API.parse_auth_token
def parse_auth_token(self, auth_token): """ Break auth_token up into it's constituent values. **Parameters:** - **auth_token:** Auth_token string **Returns:** dict with Auth Token constituents """ # remove the random security key value from the front of the auth_token auth_token_cleaned = auth_token.split('-', 1)[1] # URL Decode the Auth Token auth_token_decoded = self.url_decode(auth_token_cleaned) # Create a new dict to hold the response. auth_dict = {} # Parse the token for key_value in auth_token_decoded.split("&"): key_value_list = key_value.split("=") # check for valid token parts if len(key_value_list) == 2 and type(key_value_list[0]) in [text_type, binary_type]: auth_dict[key_value_list[0]] = key_value_list[1] # Return the dict of key/values in the token. return auth_dict
python
def parse_auth_token(self, auth_token): """ Break auth_token up into it's constituent values. **Parameters:** - **auth_token:** Auth_token string **Returns:** dict with Auth Token constituents """ # remove the random security key value from the front of the auth_token auth_token_cleaned = auth_token.split('-', 1)[1] # URL Decode the Auth Token auth_token_decoded = self.url_decode(auth_token_cleaned) # Create a new dict to hold the response. auth_dict = {} # Parse the token for key_value in auth_token_decoded.split("&"): key_value_list = key_value.split("=") # check for valid token parts if len(key_value_list) == 2 and type(key_value_list[0]) in [text_type, binary_type]: auth_dict[key_value_list[0]] = key_value_list[1] # Return the dict of key/values in the token. return auth_dict
[ "def", "parse_auth_token", "(", "self", ",", "auth_token", ")", ":", "# remove the random security key value from the front of the auth_token", "auth_token_cleaned", "=", "auth_token", ".", "split", "(", "'-'", ",", "1", ")", "[", "1", "]", "# URL Decode the Auth Token", "auth_token_decoded", "=", "self", ".", "url_decode", "(", "auth_token_cleaned", ")", "# Create a new dict to hold the response.", "auth_dict", "=", "{", "}", "# Parse the token", "for", "key_value", "in", "auth_token_decoded", ".", "split", "(", "\"&\"", ")", ":", "key_value_list", "=", "key_value", ".", "split", "(", "\"=\"", ")", "# check for valid token parts", "if", "len", "(", "key_value_list", ")", "==", "2", "and", "type", "(", "key_value_list", "[", "0", "]", ")", "in", "[", "text_type", ",", "binary_type", "]", ":", "auth_dict", "[", "key_value_list", "[", "0", "]", "]", "=", "key_value_list", "[", "1", "]", "# Return the dict of key/values in the token.", "return", "auth_dict" ]
Break auth_token up into it's constituent values. **Parameters:** - **auth_token:** Auth_token string **Returns:** dict with Auth Token constituents
[ "Break", "auth_token", "up", "into", "it", "s", "constituent", "values", "." ]
train
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L906-L931
CloudGenix/sdk-python
cloudgenix/__init__.py
API.update_region_to_controller
def update_region_to_controller(self, region): """ Update the controller string with dynamic region info. Controller string should end up as `<name[-env]>.<region>.cloudgenix.com` **Parameters:** - **region:** region string. **Returns:** No return value, mutates the controller in the class namespace """ # default region position in a list region_position = 1 # Check for a global "ignore region" flag if self.ignore_region: # bypass api_logger.debug("IGNORE_REGION set, not updating controller region.") return api_logger.debug("Updating Controller Region") api_logger.debug("CONTROLLER = %s", self.controller) api_logger.debug("CONTROLLER_ORIG = %s", self.controller_orig) api_logger.debug("CONTROLLER_REGION = %s", self.controller_region) # Check if this is an initial region use or an update region use if self.controller_orig: controller_base = self.controller_orig else: controller_base = self.controller self.controller_orig = self.controller # splice controller string controller_full_part_list = controller_base.split('.') for idx, part in enumerate(controller_full_part_list): # is the region already in the controller string? if region == part: # yes, controller already has apropriate region api_logger.debug("REGION %s ALREADY IN CONTROLLER AT INDEX = %s", region, idx) # update region if it is not already set. if self.controller_region != region: self.controller_region = region api_logger.debug("UPDATED_CONTROLLER_REGION = %s", self.controller_region) return controller_part_count = len(controller_full_part_list) # handle short domain case if controller_part_count > 1: # insert region controller_full_part_list[region_position] = region self.controller = ".".join(controller_full_part_list) else: # short domain, just add region self.controller = ".".join(controller_full_part_list) + '.' + region # update SDK vars with region info self.controller_orig = controller_base self.controller_region = region api_logger.debug("UPDATED_CONTROLLER = %s", self.controller) api_logger.debug("UPDATED_CONTROLLER_ORIG = %s", self.controller_orig) api_logger.debug("UPDATED_CONTROLLER_REGION = %s", self.controller_region) return
python
def update_region_to_controller(self, region): """ Update the controller string with dynamic region info. Controller string should end up as `<name[-env]>.<region>.cloudgenix.com` **Parameters:** - **region:** region string. **Returns:** No return value, mutates the controller in the class namespace """ # default region position in a list region_position = 1 # Check for a global "ignore region" flag if self.ignore_region: # bypass api_logger.debug("IGNORE_REGION set, not updating controller region.") return api_logger.debug("Updating Controller Region") api_logger.debug("CONTROLLER = %s", self.controller) api_logger.debug("CONTROLLER_ORIG = %s", self.controller_orig) api_logger.debug("CONTROLLER_REGION = %s", self.controller_region) # Check if this is an initial region use or an update region use if self.controller_orig: controller_base = self.controller_orig else: controller_base = self.controller self.controller_orig = self.controller # splice controller string controller_full_part_list = controller_base.split('.') for idx, part in enumerate(controller_full_part_list): # is the region already in the controller string? if region == part: # yes, controller already has apropriate region api_logger.debug("REGION %s ALREADY IN CONTROLLER AT INDEX = %s", region, idx) # update region if it is not already set. if self.controller_region != region: self.controller_region = region api_logger.debug("UPDATED_CONTROLLER_REGION = %s", self.controller_region) return controller_part_count = len(controller_full_part_list) # handle short domain case if controller_part_count > 1: # insert region controller_full_part_list[region_position] = region self.controller = ".".join(controller_full_part_list) else: # short domain, just add region self.controller = ".".join(controller_full_part_list) + '.' + region # update SDK vars with region info self.controller_orig = controller_base self.controller_region = region api_logger.debug("UPDATED_CONTROLLER = %s", self.controller) api_logger.debug("UPDATED_CONTROLLER_ORIG = %s", self.controller_orig) api_logger.debug("UPDATED_CONTROLLER_REGION = %s", self.controller_region) return
[ "def", "update_region_to_controller", "(", "self", ",", "region", ")", ":", "# default region position in a list", "region_position", "=", "1", "# Check for a global \"ignore region\" flag", "if", "self", ".", "ignore_region", ":", "# bypass", "api_logger", ".", "debug", "(", "\"IGNORE_REGION set, not updating controller region.\"", ")", "return", "api_logger", ".", "debug", "(", "\"Updating Controller Region\"", ")", "api_logger", ".", "debug", "(", "\"CONTROLLER = %s\"", ",", "self", ".", "controller", ")", "api_logger", ".", "debug", "(", "\"CONTROLLER_ORIG = %s\"", ",", "self", ".", "controller_orig", ")", "api_logger", ".", "debug", "(", "\"CONTROLLER_REGION = %s\"", ",", "self", ".", "controller_region", ")", "# Check if this is an initial region use or an update region use", "if", "self", ".", "controller_orig", ":", "controller_base", "=", "self", ".", "controller_orig", "else", ":", "controller_base", "=", "self", ".", "controller", "self", ".", "controller_orig", "=", "self", ".", "controller", "# splice controller string", "controller_full_part_list", "=", "controller_base", ".", "split", "(", "'.'", ")", "for", "idx", ",", "part", "in", "enumerate", "(", "controller_full_part_list", ")", ":", "# is the region already in the controller string?", "if", "region", "==", "part", ":", "# yes, controller already has apropriate region", "api_logger", ".", "debug", "(", "\"REGION %s ALREADY IN CONTROLLER AT INDEX = %s\"", ",", "region", ",", "idx", ")", "# update region if it is not already set.", "if", "self", ".", "controller_region", "!=", "region", ":", "self", ".", "controller_region", "=", "region", "api_logger", ".", "debug", "(", "\"UPDATED_CONTROLLER_REGION = %s\"", ",", "self", ".", "controller_region", ")", "return", "controller_part_count", "=", "len", "(", "controller_full_part_list", ")", "# handle short domain case", "if", "controller_part_count", ">", "1", ":", "# insert region", "controller_full_part_list", "[", "region_position", "]", "=", "region", "self", ".", "controller", "=", "\".\"", ".", "join", "(", "controller_full_part_list", ")", "else", ":", "# short domain, just add region", "self", ".", "controller", "=", "\".\"", ".", "join", "(", "controller_full_part_list", ")", "+", "'.'", "+", "region", "# update SDK vars with region info", "self", ".", "controller_orig", "=", "controller_base", "self", ".", "controller_region", "=", "region", "api_logger", ".", "debug", "(", "\"UPDATED_CONTROLLER = %s\"", ",", "self", ".", "controller", ")", "api_logger", ".", "debug", "(", "\"UPDATED_CONTROLLER_ORIG = %s\"", ",", "self", ".", "controller_orig", ")", "api_logger", ".", "debug", "(", "\"UPDATED_CONTROLLER_REGION = %s\"", ",", "self", ".", "controller_region", ")", "return" ]
Update the controller string with dynamic region info. Controller string should end up as `<name[-env]>.<region>.cloudgenix.com` **Parameters:** - **region:** region string. **Returns:** No return value, mutates the controller in the class namespace
[ "Update", "the", "controller", "string", "with", "dynamic", "region", "info", ".", "Controller", "string", "should", "end", "up", "as", "<name", "[", "-", "env", "]", ">", ".", "<region", ">", ".", "cloudgenix", ".", "com" ]
train
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L933-L997
CloudGenix/sdk-python
cloudgenix/__init__.py
API.parse_region
def parse_region(self, login_response): """ Return region from a successful login response. **Parameters:** - **login_response:** requests.Response from a successful login. **Returns:** region name. """ auth_token = login_response.cgx_content['x_auth_token'] auth_token_dict = self.parse_auth_token(auth_token) auth_region = auth_token_dict.get('region') return auth_region
python
def parse_region(self, login_response): """ Return region from a successful login response. **Parameters:** - **login_response:** requests.Response from a successful login. **Returns:** region name. """ auth_token = login_response.cgx_content['x_auth_token'] auth_token_dict = self.parse_auth_token(auth_token) auth_region = auth_token_dict.get('region') return auth_region
[ "def", "parse_region", "(", "self", ",", "login_response", ")", ":", "auth_token", "=", "login_response", ".", "cgx_content", "[", "'x_auth_token'", "]", "auth_token_dict", "=", "self", ".", "parse_auth_token", "(", "auth_token", ")", "auth_region", "=", "auth_token_dict", ".", "get", "(", "'region'", ")", "return", "auth_region" ]
Return region from a successful login response. **Parameters:** - **login_response:** requests.Response from a successful login. **Returns:** region name.
[ "Return", "region", "from", "a", "successful", "login", "response", "." ]
train
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L999-L1012
CloudGenix/sdk-python
cloudgenix/__init__.py
API.reparse_login_cookie_after_region_update
def reparse_login_cookie_after_region_update(self, login_response): """ Sometimes, login cookie gets sent with region info instead of api.cloudgenix.com. This function re-parses the original login request and applies cookies to the session if they now match the new region. **Parameters:** - **login_response:** requests.Response from a non-region login. **Returns:** updates API() object directly, no return. """ login_url = login_response.request.url api_logger.debug("ORIGINAL REQUEST URL = %s", login_url) # replace old controller with new controller. login_url_new = login_url.replace(self.controller_orig, self.controller) api_logger.debug("UPDATED REQUEST URL = %s", login_url_new) # reset login url with new region login_response.request.url = login_url_new # prep cookie jar parsing req = requests.cookies.MockRequest(login_response.request) res = requests.cookies.MockResponse(login_response.raw._original_response.msg) # extract cookies to session cookie jar. self._session.cookies.extract_cookies(res, req) return
python
def reparse_login_cookie_after_region_update(self, login_response): """ Sometimes, login cookie gets sent with region info instead of api.cloudgenix.com. This function re-parses the original login request and applies cookies to the session if they now match the new region. **Parameters:** - **login_response:** requests.Response from a non-region login. **Returns:** updates API() object directly, no return. """ login_url = login_response.request.url api_logger.debug("ORIGINAL REQUEST URL = %s", login_url) # replace old controller with new controller. login_url_new = login_url.replace(self.controller_orig, self.controller) api_logger.debug("UPDATED REQUEST URL = %s", login_url_new) # reset login url with new region login_response.request.url = login_url_new # prep cookie jar parsing req = requests.cookies.MockRequest(login_response.request) res = requests.cookies.MockResponse(login_response.raw._original_response.msg) # extract cookies to session cookie jar. self._session.cookies.extract_cookies(res, req) return
[ "def", "reparse_login_cookie_after_region_update", "(", "self", ",", "login_response", ")", ":", "login_url", "=", "login_response", ".", "request", ".", "url", "api_logger", ".", "debug", "(", "\"ORIGINAL REQUEST URL = %s\"", ",", "login_url", ")", "# replace old controller with new controller.", "login_url_new", "=", "login_url", ".", "replace", "(", "self", ".", "controller_orig", ",", "self", ".", "controller", ")", "api_logger", ".", "debug", "(", "\"UPDATED REQUEST URL = %s\"", ",", "login_url_new", ")", "# reset login url with new region", "login_response", ".", "request", ".", "url", "=", "login_url_new", "# prep cookie jar parsing", "req", "=", "requests", ".", "cookies", ".", "MockRequest", "(", "login_response", ".", "request", ")", "res", "=", "requests", ".", "cookies", ".", "MockResponse", "(", "login_response", ".", "raw", ".", "_original_response", ".", "msg", ")", "# extract cookies to session cookie jar.", "self", ".", "_session", ".", "cookies", ".", "extract_cookies", "(", "res", ",", "req", ")", "return" ]
Sometimes, login cookie gets sent with region info instead of api.cloudgenix.com. This function re-parses the original login request and applies cookies to the session if they now match the new region. **Parameters:** - **login_response:** requests.Response from a non-region login. **Returns:** updates API() object directly, no return.
[ "Sometimes", "login", "cookie", "gets", "sent", "with", "region", "info", "instead", "of", "api", ".", "cloudgenix", ".", "com", ".", "This", "function", "re", "-", "parses", "the", "original", "login", "request", "and", "applies", "cookies", "to", "the", "session", "if", "they", "now", "match", "the", "new", "region", "." ]
train
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L1014-L1038
CloudGenix/sdk-python
cloudgenix/__init__.py
API._catch_nonjson_streamresponse
def _catch_nonjson_streamresponse(rawresponse): """ Validate a streamed response is JSON. Return a Python dictionary either way. **Parameters:** - **rawresponse:** Streamed Response from Requests. **Returns:** Dictionary """ # attempt to load response for return. try: response = json.loads(rawresponse) except (ValueError, TypeError): if rawresponse: response = { '_error': [ { 'message': 'Response not in JSON format.', 'data': rawresponse, } ] } else: # in case of null response, return empty dict. response = {} return response
python
def _catch_nonjson_streamresponse(rawresponse): """ Validate a streamed response is JSON. Return a Python dictionary either way. **Parameters:** - **rawresponse:** Streamed Response from Requests. **Returns:** Dictionary """ # attempt to load response for return. try: response = json.loads(rawresponse) except (ValueError, TypeError): if rawresponse: response = { '_error': [ { 'message': 'Response not in JSON format.', 'data': rawresponse, } ] } else: # in case of null response, return empty dict. response = {} return response
[ "def", "_catch_nonjson_streamresponse", "(", "rawresponse", ")", ":", "# attempt to load response for return.", "try", ":", "response", "=", "json", ".", "loads", "(", "rawresponse", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "if", "rawresponse", ":", "response", "=", "{", "'_error'", ":", "[", "{", "'message'", ":", "'Response not in JSON format.'", ",", "'data'", ":", "rawresponse", ",", "}", "]", "}", "else", ":", "# in case of null response, return empty dict.", "response", "=", "{", "}", "return", "response" ]
Validate a streamed response is JSON. Return a Python dictionary either way. **Parameters:** - **rawresponse:** Streamed Response from Requests. **Returns:** Dictionary
[ "Validate", "a", "streamed", "response", "is", "JSON", ".", "Return", "a", "Python", "dictionary", "either", "way", "." ]
train
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L1041-L1069
CloudGenix/sdk-python
cloudgenix/__init__.py
API.url_decode
def url_decode(url): """ URL Decode function using REGEX **Parameters:** - **url:** URLENCODED text string **Returns:** Non URLENCODED string """ return re.compile('%([0-9a-fA-F]{2})', re.M).sub(lambda m: chr(int(m.group(1), 16)), url)
python
def url_decode(url): """ URL Decode function using REGEX **Parameters:** - **url:** URLENCODED text string **Returns:** Non URLENCODED string """ return re.compile('%([0-9a-fA-F]{2})', re.M).sub(lambda m: chr(int(m.group(1), 16)), url)
[ "def", "url_decode", "(", "url", ")", ":", "return", "re", ".", "compile", "(", "'%([0-9a-fA-F]{2})'", ",", "re", ".", "M", ")", ".", "sub", "(", "lambda", "m", ":", "chr", "(", "int", "(", "m", ".", "group", "(", "1", ")", ",", "16", ")", ")", ",", "url", ")" ]
URL Decode function using REGEX **Parameters:** - **url:** URLENCODED text string **Returns:** Non URLENCODED string
[ "URL", "Decode", "function", "using", "REGEX" ]
train
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L1072-L1082
deplicate/deplicate
duplicate/utils/fs/nt.py
blksize
def blksize(path): """ Get optimal file system buffer size (in bytes) for I/O calls. """ diskfreespace = win32file.GetDiskFreeSpace dirname = os.path.dirname(fullpath(path)) try: cluster_sectors, sector_size = diskfreespace(dirname)[:2] size = cluster_sectors * sector_size except win32file.error as e: if e.winerror != winerror.ERROR_NOT_READY: raise sleep(3) size = blksize(dirname) return size
python
def blksize(path): """ Get optimal file system buffer size (in bytes) for I/O calls. """ diskfreespace = win32file.GetDiskFreeSpace dirname = os.path.dirname(fullpath(path)) try: cluster_sectors, sector_size = diskfreespace(dirname)[:2] size = cluster_sectors * sector_size except win32file.error as e: if e.winerror != winerror.ERROR_NOT_READY: raise sleep(3) size = blksize(dirname) return size
[ "def", "blksize", "(", "path", ")", ":", "diskfreespace", "=", "win32file", ".", "GetDiskFreeSpace", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "fullpath", "(", "path", ")", ")", "try", ":", "cluster_sectors", ",", "sector_size", "=", "diskfreespace", "(", "dirname", ")", "[", ":", "2", "]", "size", "=", "cluster_sectors", "*", "sector_size", "except", "win32file", ".", "error", "as", "e", ":", "if", "e", ".", "winerror", "!=", "winerror", ".", "ERROR_NOT_READY", ":", "raise", "sleep", "(", "3", ")", "size", "=", "blksize", "(", "dirname", ")", "return", "size" ]
Get optimal file system buffer size (in bytes) for I/O calls.
[ "Get", "optimal", "file", "system", "buffer", "size", "(", "in", "bytes", ")", "for", "I", "/", "O", "calls", "." ]
train
https://github.com/deplicate/deplicate/blob/9975502571d1d024a990f5cb304d01b63c0d7717/duplicate/utils/fs/nt.py#L28-L44
greyli/flask-avatars
flask_avatars/__init__.py
_Avatars.gravatar
def gravatar(hash, size=100, rating='g', default='identicon', include_extension=False, force_default=False): """Pass email hash, return Gravatar URL. You can get email hash like this:: import hashlib avatar_hash = hashlib.md5(email.lower().encode('utf-8')).hexdigest() Visit https://en.gravatar.com/site/implement/images/ for more information. :param hash: The email hash used to generate avatar URL. :param size: The size of the avatar, default to 100 pixel. :param rating: The rating of the avatar, default to ``g`` :param default: The type of default avatar, default to ``identicon``. :param include_extension: Append a '.jpg' extension at the end of URL, default to ``False``. :param force_default: Force to use default avatar, default to ``False``. """ if include_extension: hash += '.jpg' default = default or current_app.config['AVATARS_GRAVATAR_DEFAULT'] query_string = urlencode({'s': int(size), 'r': rating, 'd': default}) if force_default: query_string += '&q=y' return 'https://gravatar.com/avatar/' + hash + '?' + query_string
python
def gravatar(hash, size=100, rating='g', default='identicon', include_extension=False, force_default=False): """Pass email hash, return Gravatar URL. You can get email hash like this:: import hashlib avatar_hash = hashlib.md5(email.lower().encode('utf-8')).hexdigest() Visit https://en.gravatar.com/site/implement/images/ for more information. :param hash: The email hash used to generate avatar URL. :param size: The size of the avatar, default to 100 pixel. :param rating: The rating of the avatar, default to ``g`` :param default: The type of default avatar, default to ``identicon``. :param include_extension: Append a '.jpg' extension at the end of URL, default to ``False``. :param force_default: Force to use default avatar, default to ``False``. """ if include_extension: hash += '.jpg' default = default or current_app.config['AVATARS_GRAVATAR_DEFAULT'] query_string = urlencode({'s': int(size), 'r': rating, 'd': default}) if force_default: query_string += '&q=y' return 'https://gravatar.com/avatar/' + hash + '?' + query_string
[ "def", "gravatar", "(", "hash", ",", "size", "=", "100", ",", "rating", "=", "'g'", ",", "default", "=", "'identicon'", ",", "include_extension", "=", "False", ",", "force_default", "=", "False", ")", ":", "if", "include_extension", ":", "hash", "+=", "'.jpg'", "default", "=", "default", "or", "current_app", ".", "config", "[", "'AVATARS_GRAVATAR_DEFAULT'", "]", "query_string", "=", "urlencode", "(", "{", "'s'", ":", "int", "(", "size", ")", ",", "'r'", ":", "rating", ",", "'d'", ":", "default", "}", ")", "if", "force_default", ":", "query_string", "+=", "'&q=y'", "return", "'https://gravatar.com/avatar/'", "+", "hash", "+", "'?'", "+", "query_string" ]
Pass email hash, return Gravatar URL. You can get email hash like this:: import hashlib avatar_hash = hashlib.md5(email.lower().encode('utf-8')).hexdigest() Visit https://en.gravatar.com/site/implement/images/ for more information. :param hash: The email hash used to generate avatar URL. :param size: The size of the avatar, default to 100 pixel. :param rating: The rating of the avatar, default to ``g`` :param default: The type of default avatar, default to ``identicon``. :param include_extension: Append a '.jpg' extension at the end of URL, default to ``False``. :param force_default: Force to use default avatar, default to ``False``.
[ "Pass", "email", "hash", "return", "Gravatar", "URL", ".", "You", "can", "get", "email", "hash", "like", "this", "::" ]
train
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L27-L50
greyli/flask-avatars
flask_avatars/__init__.py
_Avatars.social_media
def social_media(username, platform='twitter', size='medium'): """Return avatar URL at social media. Visit https://avatars.io for more information. :param username: The username of the social media. :param platform: One of facebook, instagram, twitter, gravatar. :param size: The size of avatar, one of small, medium and large. """ return 'https://avatars.io/{platform}/{username}/{size}'.format( platform=platform, username=username, size=size)
python
def social_media(username, platform='twitter', size='medium'): """Return avatar URL at social media. Visit https://avatars.io for more information. :param username: The username of the social media. :param platform: One of facebook, instagram, twitter, gravatar. :param size: The size of avatar, one of small, medium and large. """ return 'https://avatars.io/{platform}/{username}/{size}'.format( platform=platform, username=username, size=size)
[ "def", "social_media", "(", "username", ",", "platform", "=", "'twitter'", ",", "size", "=", "'medium'", ")", ":", "return", "'https://avatars.io/{platform}/{username}/{size}'", ".", "format", "(", "platform", "=", "platform", ",", "username", "=", "username", ",", "size", "=", "size", ")" ]
Return avatar URL at social media. Visit https://avatars.io for more information. :param username: The username of the social media. :param platform: One of facebook, instagram, twitter, gravatar. :param size: The size of avatar, one of small, medium and large.
[ "Return", "avatar", "URL", "at", "social", "media", ".", "Visit", "https", ":", "//", "avatars", ".", "io", "for", "more", "information", "." ]
train
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L63-L72
greyli/flask-avatars
flask_avatars/__init__.py
_Avatars.jcrop_css
def jcrop_css(css_url=None): """Load jcrop css file. :param css_url: The custom CSS URL. """ if css_url is None: if current_app.config['AVATARS_SERVE_LOCAL']: css_url = url_for('avatars.static', filename='jcrop/css/jquery.Jcrop.min.css') else: css_url = 'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/css/jquery.Jcrop.min.css' return Markup('<link rel="stylesheet" href="%s">' % css_url)
python
def jcrop_css(css_url=None): """Load jcrop css file. :param css_url: The custom CSS URL. """ if css_url is None: if current_app.config['AVATARS_SERVE_LOCAL']: css_url = url_for('avatars.static', filename='jcrop/css/jquery.Jcrop.min.css') else: css_url = 'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/css/jquery.Jcrop.min.css' return Markup('<link rel="stylesheet" href="%s">' % css_url)
[ "def", "jcrop_css", "(", "css_url", "=", "None", ")", ":", "if", "css_url", "is", "None", ":", "if", "current_app", ".", "config", "[", "'AVATARS_SERVE_LOCAL'", "]", ":", "css_url", "=", "url_for", "(", "'avatars.static'", ",", "filename", "=", "'jcrop/css/jquery.Jcrop.min.css'", ")", "else", ":", "css_url", "=", "'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/css/jquery.Jcrop.min.css'", "return", "Markup", "(", "'<link rel=\"stylesheet\" href=\"%s\">'", "%", "css_url", ")" ]
Load jcrop css file. :param css_url: The custom CSS URL.
[ "Load", "jcrop", "css", "file", "." ]
train
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L84-L94
greyli/flask-avatars
flask_avatars/__init__.py
_Avatars.jcrop_js
def jcrop_js(js_url=None, with_jquery=True): """Load jcrop Javascript file. :param js_url: The custom JavaScript URL. :param with_jquery: Include jQuery or not, default to ``True``. """ serve_local = current_app.config['AVATARS_SERVE_LOCAL'] if js_url is None: if serve_local: js_url = url_for('avatars.static', filename='jcrop/js/jquery.Jcrop.min.js') else: js_url = 'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.Jcrop.min.js' if with_jquery: if serve_local: jquery = '<script src="%s"></script>' % url_for('avatars.static', filename='jcrop/js/jquery.min.js') else: jquery = '<script src="https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.min.js"></script>' else: jquery = '' return Markup('''%s\n<script src="%s"></script> ''' % (jquery, js_url))
python
def jcrop_js(js_url=None, with_jquery=True): """Load jcrop Javascript file. :param js_url: The custom JavaScript URL. :param with_jquery: Include jQuery or not, default to ``True``. """ serve_local = current_app.config['AVATARS_SERVE_LOCAL'] if js_url is None: if serve_local: js_url = url_for('avatars.static', filename='jcrop/js/jquery.Jcrop.min.js') else: js_url = 'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.Jcrop.min.js' if with_jquery: if serve_local: jquery = '<script src="%s"></script>' % url_for('avatars.static', filename='jcrop/js/jquery.min.js') else: jquery = '<script src="https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.min.js"></script>' else: jquery = '' return Markup('''%s\n<script src="%s"></script> ''' % (jquery, js_url))
[ "def", "jcrop_js", "(", "js_url", "=", "None", ",", "with_jquery", "=", "True", ")", ":", "serve_local", "=", "current_app", ".", "config", "[", "'AVATARS_SERVE_LOCAL'", "]", "if", "js_url", "is", "None", ":", "if", "serve_local", ":", "js_url", "=", "url_for", "(", "'avatars.static'", ",", "filename", "=", "'jcrop/js/jquery.Jcrop.min.js'", ")", "else", ":", "js_url", "=", "'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.Jcrop.min.js'", "if", "with_jquery", ":", "if", "serve_local", ":", "jquery", "=", "'<script src=\"%s\"></script>'", "%", "url_for", "(", "'avatars.static'", ",", "filename", "=", "'jcrop/js/jquery.min.js'", ")", "else", ":", "jquery", "=", "'<script src=\"https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.min.js\"></script>'", "else", ":", "jquery", "=", "''", "return", "Markup", "(", "'''%s\\n<script src=\"%s\"></script>\n '''", "%", "(", "jquery", ",", "js_url", ")", ")" ]
Load jcrop Javascript file. :param js_url: The custom JavaScript URL. :param with_jquery: Include jQuery or not, default to ``True``.
[ "Load", "jcrop", "Javascript", "file", "." ]
train
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L97-L119
greyli/flask-avatars
flask_avatars/__init__.py
_Avatars.crop_box
def crop_box(endpoint=None, filename=None): """Create a crop box. :param endpoint: The endpoint of view function that serve avatar image file. :param filename: The filename of the image that need to be crop. """ crop_size = current_app.config['AVATARS_CROP_BASE_WIDTH'] if endpoint is None or filename is None: url = url_for('avatars.static', filename='default/default_l.jpg') else: url = url_for(endpoint, filename=filename) return Markup('<img src="%s" id="crop-box" style="max-width: %dpx; display: block;">' % (url, crop_size))
python
def crop_box(endpoint=None, filename=None): """Create a crop box. :param endpoint: The endpoint of view function that serve avatar image file. :param filename: The filename of the image that need to be crop. """ crop_size = current_app.config['AVATARS_CROP_BASE_WIDTH'] if endpoint is None or filename is None: url = url_for('avatars.static', filename='default/default_l.jpg') else: url = url_for(endpoint, filename=filename) return Markup('<img src="%s" id="crop-box" style="max-width: %dpx; display: block;">' % (url, crop_size))
[ "def", "crop_box", "(", "endpoint", "=", "None", ",", "filename", "=", "None", ")", ":", "crop_size", "=", "current_app", ".", "config", "[", "'AVATARS_CROP_BASE_WIDTH'", "]", "if", "endpoint", "is", "None", "or", "filename", "is", "None", ":", "url", "=", "url_for", "(", "'avatars.static'", ",", "filename", "=", "'default/default_l.jpg'", ")", "else", ":", "url", "=", "url_for", "(", "endpoint", ",", "filename", "=", "filename", ")", "return", "Markup", "(", "'<img src=\"%s\" id=\"crop-box\" style=\"max-width: %dpx; display: block;\">'", "%", "(", "url", ",", "crop_size", ")", ")" ]
Create a crop box. :param endpoint: The endpoint of view function that serve avatar image file. :param filename: The filename of the image that need to be crop.
[ "Create", "a", "crop", "box", "." ]
train
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L122-L134
greyli/flask-avatars
flask_avatars/__init__.py
_Avatars.preview_box
def preview_box(endpoint=None, filename=None): """Create a preview box. :param endpoint: The endpoint of view function that serve avatar image file. :param filename: The filename of the image that need to be crop. """ preview_size = current_app.config['AVATARS_CROP_PREVIEW_SIZE'] or current_app.config['AVATARS_SIZE_TUPLE'][2] if endpoint is None or filename is None: url = url_for('avatars.static', filename='default/default_l.jpg') else: url = url_for(endpoint, filename=filename) return Markup(''' <div id="preview-box"> <div class="preview-box" style="width: %dpx; height: %dpx; overflow: hidden;"> <img src="%s" class="jcrop-preview" alt="Preview"/> </div> </div>''' % (preview_size, preview_size, url))
python
def preview_box(endpoint=None, filename=None): """Create a preview box. :param endpoint: The endpoint of view function that serve avatar image file. :param filename: The filename of the image that need to be crop. """ preview_size = current_app.config['AVATARS_CROP_PREVIEW_SIZE'] or current_app.config['AVATARS_SIZE_TUPLE'][2] if endpoint is None or filename is None: url = url_for('avatars.static', filename='default/default_l.jpg') else: url = url_for(endpoint, filename=filename) return Markup(''' <div id="preview-box"> <div class="preview-box" style="width: %dpx; height: %dpx; overflow: hidden;"> <img src="%s" class="jcrop-preview" alt="Preview"/> </div> </div>''' % (preview_size, preview_size, url))
[ "def", "preview_box", "(", "endpoint", "=", "None", ",", "filename", "=", "None", ")", ":", "preview_size", "=", "current_app", ".", "config", "[", "'AVATARS_CROP_PREVIEW_SIZE'", "]", "or", "current_app", ".", "config", "[", "'AVATARS_SIZE_TUPLE'", "]", "[", "2", "]", "if", "endpoint", "is", "None", "or", "filename", "is", "None", ":", "url", "=", "url_for", "(", "'avatars.static'", ",", "filename", "=", "'default/default_l.jpg'", ")", "else", ":", "url", "=", "url_for", "(", "endpoint", ",", "filename", "=", "filename", ")", "return", "Markup", "(", "'''\n <div id=\"preview-box\">\n <div class=\"preview-box\" style=\"width: %dpx; height: %dpx; overflow: hidden;\">\n <img src=\"%s\" class=\"jcrop-preview\" alt=\"Preview\"/>\n </div>\n </div>'''", "%", "(", "preview_size", ",", "preview_size", ",", "url", ")", ")" ]
Create a preview box. :param endpoint: The endpoint of view function that serve avatar image file. :param filename: The filename of the image that need to be crop.
[ "Create", "a", "preview", "box", "." ]
train
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L137-L154
greyli/flask-avatars
flask_avatars/__init__.py
_Avatars.init_jcrop
def init_jcrop(min_size=None): """Initialize jcrop. :param min_size: The minimal size of crop area. """ init_x = current_app.config['AVATARS_CROP_INIT_POS'][0] init_y = current_app.config['AVATARS_CROP_INIT_POS'][1] init_size = current_app.config['AVATARS_CROP_INIT_SIZE'] or current_app.config['AVATARS_SIZE_TUPLE'][2] if current_app.config['AVATARS_CROP_MIN_SIZE']: min_size = min_size or current_app.config['AVATARS_SIZE_TUPLE'][2] min_size_js = 'jcrop_api.setOptions({minSize: [%d, %d]});' % (min_size, min_size) else: min_size_js = '' return Markup(''' <script type="text/javascript"> jQuery(function ($) { // Create variables (in this scope) to hold the API and image size var jcrop_api, boundx, boundy, // Grab some information about the preview pane $preview = $('#preview-box'), $pcnt = $('#preview-box .preview-box'), $pimg = $('#preview-box .preview-box img'), xsize = $pcnt.width(), ysize = $pcnt.height(); $('#crop-box').Jcrop({ onChange: updatePreview, onSelect: updateCoords, setSelect: [%s, %s, %s, %s], aspectRatio: 1 }, function () { // Use the API to get the real image size var bounds = this.getBounds(); boundx = bounds[0]; boundy = bounds[1]; // Store the API in the jcrop_api variable jcrop_api = this; %s jcrop_api.focus(); // Move the preview into the jcrop container for css positioning $preview.appendTo(jcrop_api.ui.holder); }); function updatePreview(c) { if (parseInt(c.w) > 0) { var rx = xsize / c.w; var ry = ysize / c.h; $pimg.css({ width: Math.round(rx * boundx) + 'px', height: Math.round(ry * boundy) + 'px', marginLeft: '-' + Math.round(rx * c.x) + 'px', marginTop: '-' + Math.round(ry * c.y) + 'px' }); } } }); function updateCoords(c) { $('#x').val(c.x); $('#y').val(c.y); $('#w').val(c.w); $('#h').val(c.h); } </script> ''' % (init_x, init_y, init_size, init_size, min_size_js))
python
def init_jcrop(min_size=None): """Initialize jcrop. :param min_size: The minimal size of crop area. """ init_x = current_app.config['AVATARS_CROP_INIT_POS'][0] init_y = current_app.config['AVATARS_CROP_INIT_POS'][1] init_size = current_app.config['AVATARS_CROP_INIT_SIZE'] or current_app.config['AVATARS_SIZE_TUPLE'][2] if current_app.config['AVATARS_CROP_MIN_SIZE']: min_size = min_size or current_app.config['AVATARS_SIZE_TUPLE'][2] min_size_js = 'jcrop_api.setOptions({minSize: [%d, %d]});' % (min_size, min_size) else: min_size_js = '' return Markup(''' <script type="text/javascript"> jQuery(function ($) { // Create variables (in this scope) to hold the API and image size var jcrop_api, boundx, boundy, // Grab some information about the preview pane $preview = $('#preview-box'), $pcnt = $('#preview-box .preview-box'), $pimg = $('#preview-box .preview-box img'), xsize = $pcnt.width(), ysize = $pcnt.height(); $('#crop-box').Jcrop({ onChange: updatePreview, onSelect: updateCoords, setSelect: [%s, %s, %s, %s], aspectRatio: 1 }, function () { // Use the API to get the real image size var bounds = this.getBounds(); boundx = bounds[0]; boundy = bounds[1]; // Store the API in the jcrop_api variable jcrop_api = this; %s jcrop_api.focus(); // Move the preview into the jcrop container for css positioning $preview.appendTo(jcrop_api.ui.holder); }); function updatePreview(c) { if (parseInt(c.w) > 0) { var rx = xsize / c.w; var ry = ysize / c.h; $pimg.css({ width: Math.round(rx * boundx) + 'px', height: Math.round(ry * boundy) + 'px', marginLeft: '-' + Math.round(rx * c.x) + 'px', marginTop: '-' + Math.round(ry * c.y) + 'px' }); } } }); function updateCoords(c) { $('#x').val(c.x); $('#y').val(c.y); $('#w').val(c.w); $('#h').val(c.h); } </script> ''' % (init_x, init_y, init_size, init_size, min_size_js))
[ "def", "init_jcrop", "(", "min_size", "=", "None", ")", ":", "init_x", "=", "current_app", ".", "config", "[", "'AVATARS_CROP_INIT_POS'", "]", "[", "0", "]", "init_y", "=", "current_app", ".", "config", "[", "'AVATARS_CROP_INIT_POS'", "]", "[", "1", "]", "init_size", "=", "current_app", ".", "config", "[", "'AVATARS_CROP_INIT_SIZE'", "]", "or", "current_app", ".", "config", "[", "'AVATARS_SIZE_TUPLE'", "]", "[", "2", "]", "if", "current_app", ".", "config", "[", "'AVATARS_CROP_MIN_SIZE'", "]", ":", "min_size", "=", "min_size", "or", "current_app", ".", "config", "[", "'AVATARS_SIZE_TUPLE'", "]", "[", "2", "]", "min_size_js", "=", "'jcrop_api.setOptions({minSize: [%d, %d]});'", "%", "(", "min_size", ",", "min_size", ")", "else", ":", "min_size_js", "=", "''", "return", "Markup", "(", "'''\n<script type=\"text/javascript\">\n jQuery(function ($) {\n // Create variables (in this scope) to hold the API and image size\n var jcrop_api,\n boundx,\n boundy,\n\n // Grab some information about the preview pane\n $preview = $('#preview-box'),\n $pcnt = $('#preview-box .preview-box'),\n $pimg = $('#preview-box .preview-box img'),\n\n xsize = $pcnt.width(),\n ysize = $pcnt.height();\n\n $('#crop-box').Jcrop({\n onChange: updatePreview,\n onSelect: updateCoords,\n setSelect: [%s, %s, %s, %s],\n aspectRatio: 1\n }, function () {\n // Use the API to get the real image size\n var bounds = this.getBounds();\n boundx = bounds[0];\n boundy = bounds[1];\n // Store the API in the jcrop_api variable\n jcrop_api = this;\n %s\n jcrop_api.focus();\n // Move the preview into the jcrop container for css positioning\n $preview.appendTo(jcrop_api.ui.holder);\n });\n\n function updatePreview(c) {\n if (parseInt(c.w) > 0) {\n var rx = xsize / c.w;\n var ry = ysize / c.h;\n $pimg.css({\n width: Math.round(rx * boundx) + 'px',\n height: Math.round(ry * boundy) + 'px',\n marginLeft: '-' + Math.round(rx * c.x) + 'px',\n marginTop: '-' + Math.round(ry * c.y) + 'px'\n });\n }\n }\n });\n\n function updateCoords(c) {\n $('#x').val(c.x);\n $('#y').val(c.y);\n $('#w').val(c.w);\n $('#h').val(c.h);\n }\n </script>\n '''", "%", "(", "init_x", ",", "init_y", ",", "init_size", ",", "init_size", ",", "min_size_js", ")", ")" ]
Initialize jcrop. :param min_size: The minimal size of crop area.
[ "Initialize", "jcrop", "." ]
train
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L157-L226
greyli/flask-avatars
flask_avatars/__init__.py
Avatars.resize_avatar
def resize_avatar(self, img, base_width): """Resize an avatar. :param img: The image that needs to be resize. :param base_width: The width of output image. """ w_percent = (base_width / float(img.size[0])) h_size = int((float(img.size[1]) * float(w_percent))) img = img.resize((base_width, h_size), PIL.Image.ANTIALIAS) return img
python
def resize_avatar(self, img, base_width): """Resize an avatar. :param img: The image that needs to be resize. :param base_width: The width of output image. """ w_percent = (base_width / float(img.size[0])) h_size = int((float(img.size[1]) * float(w_percent))) img = img.resize((base_width, h_size), PIL.Image.ANTIALIAS) return img
[ "def", "resize_avatar", "(", "self", ",", "img", ",", "base_width", ")", ":", "w_percent", "=", "(", "base_width", "/", "float", "(", "img", ".", "size", "[", "0", "]", ")", ")", "h_size", "=", "int", "(", "(", "float", "(", "img", ".", "size", "[", "1", "]", ")", "*", "float", "(", "w_percent", ")", ")", ")", "img", "=", "img", ".", "resize", "(", "(", "base_width", ",", "h_size", ")", ",", "PIL", ".", "Image", ".", "ANTIALIAS", ")", "return", "img" ]
Resize an avatar. :param img: The image that needs to be resize. :param base_width: The width of output image.
[ "Resize", "an", "avatar", "." ]
train
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L278-L287
greyli/flask-avatars
flask_avatars/__init__.py
Avatars.save_avatar
def save_avatar(self, image): """Save an avatar as raw image, return new filename. :param image: The image that needs to be saved. """ path = current_app.config['AVATARS_SAVE_PATH'] filename = uuid4().hex + '_raw.png' image.save(os.path.join(path, filename)) return filename
python
def save_avatar(self, image): """Save an avatar as raw image, return new filename. :param image: The image that needs to be saved. """ path = current_app.config['AVATARS_SAVE_PATH'] filename = uuid4().hex + '_raw.png' image.save(os.path.join(path, filename)) return filename
[ "def", "save_avatar", "(", "self", ",", "image", ")", ":", "path", "=", "current_app", ".", "config", "[", "'AVATARS_SAVE_PATH'", "]", "filename", "=", "uuid4", "(", ")", ".", "hex", "+", "'_raw.png'", "image", ".", "save", "(", "os", ".", "path", ".", "join", "(", "path", ",", "filename", ")", ")", "return", "filename" ]
Save an avatar as raw image, return new filename. :param image: The image that needs to be saved.
[ "Save", "an", "avatar", "as", "raw", "image", "return", "new", "filename", "." ]
train
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L289-L297
greyli/flask-avatars
flask_avatars/__init__.py
Avatars.crop_avatar
def crop_avatar(self, filename, x, y, w, h): """Crop avatar with given size, return a list of file name: [filename_s, filename_m, filename_l]. :param filename: The raw image's filename. :param x: The x-pos to start crop. :param y: The y-pos to start crop. :param w: The crop width. :param h: The crop height. """ x = int(x) y = int(y) w = int(w) h = int(h) sizes = current_app.config['AVATARS_SIZE_TUPLE'] if not filename: path = os.path.join(self.root_path, 'static/default/default_l.jpg') else: path = os.path.join(current_app.config['AVATARS_SAVE_PATH'], filename) print(path) raw_img = Image.open(path) base_width = current_app.config['AVATARS_CROP_BASE_WIDTH'] if raw_img.size[0] >= base_width: raw_img = self.resize_avatar(raw_img, base_width=base_width) cropped_img = raw_img.crop((x, y, x + w, y + h)) filename = uuid4().hex avatar_s = self.resize_avatar(cropped_img, base_width=sizes[0]) avatar_m = self.resize_avatar(cropped_img, base_width=sizes[1]) avatar_l = self.resize_avatar(cropped_img, base_width=sizes[2]) filename_s = filename + '_s.png' filename_m = filename + '_m.png' filename_l = filename + '_l.png' path_s = os.path.join(current_app.config['AVATARS_SAVE_PATH'], filename_s) path_m = os.path.join(current_app.config['AVATARS_SAVE_PATH'], filename_m) path_l = os.path.join(current_app.config['AVATARS_SAVE_PATH'], filename_l) avatar_s.save(path_s, optimize=True, quality=85) avatar_m.save(path_m, optimize=True, quality=85) avatar_l.save(path_l, optimize=True, quality=85) return [filename_s, filename_m, filename_l]
python
def crop_avatar(self, filename, x, y, w, h): """Crop avatar with given size, return a list of file name: [filename_s, filename_m, filename_l]. :param filename: The raw image's filename. :param x: The x-pos to start crop. :param y: The y-pos to start crop. :param w: The crop width. :param h: The crop height. """ x = int(x) y = int(y) w = int(w) h = int(h) sizes = current_app.config['AVATARS_SIZE_TUPLE'] if not filename: path = os.path.join(self.root_path, 'static/default/default_l.jpg') else: path = os.path.join(current_app.config['AVATARS_SAVE_PATH'], filename) print(path) raw_img = Image.open(path) base_width = current_app.config['AVATARS_CROP_BASE_WIDTH'] if raw_img.size[0] >= base_width: raw_img = self.resize_avatar(raw_img, base_width=base_width) cropped_img = raw_img.crop((x, y, x + w, y + h)) filename = uuid4().hex avatar_s = self.resize_avatar(cropped_img, base_width=sizes[0]) avatar_m = self.resize_avatar(cropped_img, base_width=sizes[1]) avatar_l = self.resize_avatar(cropped_img, base_width=sizes[2]) filename_s = filename + '_s.png' filename_m = filename + '_m.png' filename_l = filename + '_l.png' path_s = os.path.join(current_app.config['AVATARS_SAVE_PATH'], filename_s) path_m = os.path.join(current_app.config['AVATARS_SAVE_PATH'], filename_m) path_l = os.path.join(current_app.config['AVATARS_SAVE_PATH'], filename_l) avatar_s.save(path_s, optimize=True, quality=85) avatar_m.save(path_m, optimize=True, quality=85) avatar_l.save(path_l, optimize=True, quality=85) return [filename_s, filename_m, filename_l]
[ "def", "crop_avatar", "(", "self", ",", "filename", ",", "x", ",", "y", ",", "w", ",", "h", ")", ":", "x", "=", "int", "(", "x", ")", "y", "=", "int", "(", "y", ")", "w", "=", "int", "(", "w", ")", "h", "=", "int", "(", "h", ")", "sizes", "=", "current_app", ".", "config", "[", "'AVATARS_SIZE_TUPLE'", "]", "if", "not", "filename", ":", "path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "root_path", ",", "'static/default/default_l.jpg'", ")", "else", ":", "path", "=", "os", ".", "path", ".", "join", "(", "current_app", ".", "config", "[", "'AVATARS_SAVE_PATH'", "]", ",", "filename", ")", "print", "(", "path", ")", "raw_img", "=", "Image", ".", "open", "(", "path", ")", "base_width", "=", "current_app", ".", "config", "[", "'AVATARS_CROP_BASE_WIDTH'", "]", "if", "raw_img", ".", "size", "[", "0", "]", ">=", "base_width", ":", "raw_img", "=", "self", ".", "resize_avatar", "(", "raw_img", ",", "base_width", "=", "base_width", ")", "cropped_img", "=", "raw_img", ".", "crop", "(", "(", "x", ",", "y", ",", "x", "+", "w", ",", "y", "+", "h", ")", ")", "filename", "=", "uuid4", "(", ")", ".", "hex", "avatar_s", "=", "self", ".", "resize_avatar", "(", "cropped_img", ",", "base_width", "=", "sizes", "[", "0", "]", ")", "avatar_m", "=", "self", ".", "resize_avatar", "(", "cropped_img", ",", "base_width", "=", "sizes", "[", "1", "]", ")", "avatar_l", "=", "self", ".", "resize_avatar", "(", "cropped_img", ",", "base_width", "=", "sizes", "[", "2", "]", ")", "filename_s", "=", "filename", "+", "'_s.png'", "filename_m", "=", "filename", "+", "'_m.png'", "filename_l", "=", "filename", "+", "'_l.png'", "path_s", "=", "os", ".", "path", ".", "join", "(", "current_app", ".", "config", "[", "'AVATARS_SAVE_PATH'", "]", ",", "filename_s", ")", "path_m", "=", "os", ".", "path", ".", "join", "(", "current_app", ".", "config", "[", "'AVATARS_SAVE_PATH'", "]", ",", "filename_m", ")", "path_l", "=", "os", ".", "path", ".", "join", "(", "current_app", ".", "config", "[", "'AVATARS_SAVE_PATH'", "]", ",", "filename_l", ")", "avatar_s", ".", "save", "(", "path_s", ",", "optimize", "=", "True", ",", "quality", "=", "85", ")", "avatar_m", ".", "save", "(", "path_m", ",", "optimize", "=", "True", ",", "quality", "=", "85", ")", "avatar_l", ".", "save", "(", "path_l", ",", "optimize", "=", "True", ",", "quality", "=", "85", ")", "return", "[", "filename_s", ",", "filename_m", ",", "filename_l", "]" ]
Crop avatar with given size, return a list of file name: [filename_s, filename_m, filename_l]. :param filename: The raw image's filename. :param x: The x-pos to start crop. :param y: The y-pos to start crop. :param w: The crop width. :param h: The crop height.
[ "Crop", "avatar", "with", "given", "size", "return", "a", "list", "of", "file", "name", ":", "[", "filename_s", "filename_m", "filename_l", "]", "." ]
train
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L299-L349
greyli/flask-avatars
flask_avatars/identicon.py
Identicon.get_image
def get_image(self, string, width, height, pad=0): """ Byte representation of a PNG image """ hex_digest_byte_list = self._string_to_byte_list(string) matrix = self._create_matrix(hex_digest_byte_list) return self._create_image(matrix, width, height, pad)
python
def get_image(self, string, width, height, pad=0): """ Byte representation of a PNG image """ hex_digest_byte_list = self._string_to_byte_list(string) matrix = self._create_matrix(hex_digest_byte_list) return self._create_image(matrix, width, height, pad)
[ "def", "get_image", "(", "self", ",", "string", ",", "width", ",", "height", ",", "pad", "=", "0", ")", ":", "hex_digest_byte_list", "=", "self", ".", "_string_to_byte_list", "(", "string", ")", "matrix", "=", "self", ".", "_create_matrix", "(", "hex_digest_byte_list", ")", "return", "self", ".", "_create_image", "(", "matrix", ",", "width", ",", "height", ",", "pad", ")" ]
Byte representation of a PNG image
[ "Byte", "representation", "of", "a", "PNG", "image" ]
train
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/identicon.py#L72-L78
greyli/flask-avatars
flask_avatars/identicon.py
Identicon._get_pastel_colour
def _get_pastel_colour(self, lighten=127): """ Create a pastel colour hex colour string """ def r(): return random.randint(0, 128) + lighten return r(), r(), r()
python
def _get_pastel_colour(self, lighten=127): """ Create a pastel colour hex colour string """ def r(): return random.randint(0, 128) + lighten return r(), r(), r()
[ "def", "_get_pastel_colour", "(", "self", ",", "lighten", "=", "127", ")", ":", "def", "r", "(", ")", ":", "return", "random", ".", "randint", "(", "0", ",", "128", ")", "+", "lighten", "return", "r", "(", ")", ",", "r", "(", ")", ",", "r", "(", ")" ]
Create a pastel colour hex colour string
[ "Create", "a", "pastel", "colour", "hex", "colour", "string" ]
train
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/identicon.py#L87-L93
greyli/flask-avatars
flask_avatars/identicon.py
Identicon._luminance
def _luminance(self, rgb): """ Determine the liminanace of an RGB colour """ a = [] for v in rgb: v = v / float(255) if v < 0.03928: result = v / 12.92 else: result = math.pow(((v + 0.055) / 1.055), 2.4) a.append(result) return a[0] * 0.2126 + a[1] * 0.7152 + a[2] * 0.0722
python
def _luminance(self, rgb): """ Determine the liminanace of an RGB colour """ a = [] for v in rgb: v = v / float(255) if v < 0.03928: result = v / 12.92 else: result = math.pow(((v + 0.055) / 1.055), 2.4) a.append(result) return a[0] * 0.2126 + a[1] * 0.7152 + a[2] * 0.0722
[ "def", "_luminance", "(", "self", ",", "rgb", ")", ":", "a", "=", "[", "]", "for", "v", "in", "rgb", ":", "v", "=", "v", "/", "float", "(", "255", ")", "if", "v", "<", "0.03928", ":", "result", "=", "v", "/", "12.92", "else", ":", "result", "=", "math", ".", "pow", "(", "(", "(", "v", "+", "0.055", ")", "/", "1.055", ")", ",", "2.4", ")", "a", ".", "append", "(", "result", ")", "return", "a", "[", "0", "]", "*", "0.2126", "+", "a", "[", "1", "]", "*", "0.7152", "+", "a", "[", "2", "]", "*", "0.0722" ]
Determine the liminanace of an RGB colour
[ "Determine", "the", "liminanace", "of", "an", "RGB", "colour" ]
train
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/identicon.py#L95-L108
greyli/flask-avatars
flask_avatars/identicon.py
Identicon._string_to_byte_list
def _string_to_byte_list(self, data): """ Creates a hex digest of the input string given to create the image, if it's not already hexadecimal Returns: Length 16 list of rgb value range integers (each representing a byte of the hex digest) """ bytes_length = 16 m = self.digest() m.update(str.encode(data)) hex_digest = m.hexdigest() return list(int(hex_digest[num * 2:num * 2 + 2], bytes_length) for num in range(bytes_length))
python
def _string_to_byte_list(self, data): """ Creates a hex digest of the input string given to create the image, if it's not already hexadecimal Returns: Length 16 list of rgb value range integers (each representing a byte of the hex digest) """ bytes_length = 16 m = self.digest() m.update(str.encode(data)) hex_digest = m.hexdigest() return list(int(hex_digest[num * 2:num * 2 + 2], bytes_length) for num in range(bytes_length))
[ "def", "_string_to_byte_list", "(", "self", ",", "data", ")", ":", "bytes_length", "=", "16", "m", "=", "self", ".", "digest", "(", ")", "m", ".", "update", "(", "str", ".", "encode", "(", "data", ")", ")", "hex_digest", "=", "m", ".", "hexdigest", "(", ")", "return", "list", "(", "int", "(", "hex_digest", "[", "num", "*", "2", ":", "num", "*", "2", "+", "2", "]", ",", "bytes_length", ")", "for", "num", "in", "range", "(", "bytes_length", ")", ")" ]
Creates a hex digest of the input string given to create the image, if it's not already hexadecimal Returns: Length 16 list of rgb value range integers (each representing a byte of the hex digest)
[ "Creates", "a", "hex", "digest", "of", "the", "input", "string", "given", "to", "create", "the", "image", "if", "it", "s", "not", "already", "hexadecimal" ]
train
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/identicon.py#L110-L126
greyli/flask-avatars
flask_avatars/identicon.py
Identicon._bit_is_one
def _bit_is_one(self, n, hash_bytes): """ Check if the n (index) of hash_bytes is 1 or 0. """ scale = 16 # hexadecimal if not hash_bytes[int(n / (scale / 2))] >> int( (scale / 2) - ((n % (scale / 2)) + 1)) & 1 == 1: return False return True
python
def _bit_is_one(self, n, hash_bytes): """ Check if the n (index) of hash_bytes is 1 or 0. """ scale = 16 # hexadecimal if not hash_bytes[int(n / (scale / 2))] >> int( (scale / 2) - ((n % (scale / 2)) + 1)) & 1 == 1: return False return True
[ "def", "_bit_is_one", "(", "self", ",", "n", ",", "hash_bytes", ")", ":", "scale", "=", "16", "# hexadecimal", "if", "not", "hash_bytes", "[", "int", "(", "n", "/", "(", "scale", "/", "2", ")", ")", "]", ">>", "int", "(", "(", "scale", "/", "2", ")", "-", "(", "(", "n", "%", "(", "scale", "/", "2", ")", ")", "+", "1", ")", ")", "&", "1", "==", "1", ":", "return", "False", "return", "True" ]
Check if the n (index) of hash_bytes is 1 or 0.
[ "Check", "if", "the", "n", "(", "index", ")", "of", "hash_bytes", "is", "1", "or", "0", "." ]
train
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/identicon.py#L128-L138
greyli/flask-avatars
flask_avatars/identicon.py
Identicon._create_image
def _create_image(self, matrix, width, height, pad): """ Generates a PNG byte list """ image = Image.new("RGB", (width + (pad * 2), height + (pad * 2)), self.bg_colour) image_draw = ImageDraw.Draw(image) # Calculate the block width and height. block_width = float(width) / self.cols block_height = float(height) / self.rows # Loop through blocks in matrix, draw rectangles. for row, cols in enumerate(matrix): for col, cell in enumerate(cols): if cell: image_draw.rectangle(( pad + col * block_width, # x1 pad + row * block_height, # y1 pad + (col + 1) * block_width - 1, # x2 pad + (row + 1) * block_height - 1 # y2 ), fill=self.fg_colour) stream = BytesIO() image.save(stream, format="png", optimize=True) # return the image byte data return stream.getvalue()
python
def _create_image(self, matrix, width, height, pad): """ Generates a PNG byte list """ image = Image.new("RGB", (width + (pad * 2), height + (pad * 2)), self.bg_colour) image_draw = ImageDraw.Draw(image) # Calculate the block width and height. block_width = float(width) / self.cols block_height = float(height) / self.rows # Loop through blocks in matrix, draw rectangles. for row, cols in enumerate(matrix): for col, cell in enumerate(cols): if cell: image_draw.rectangle(( pad + col * block_width, # x1 pad + row * block_height, # y1 pad + (col + 1) * block_width - 1, # x2 pad + (row + 1) * block_height - 1 # y2 ), fill=self.fg_colour) stream = BytesIO() image.save(stream, format="png", optimize=True) # return the image byte data return stream.getvalue()
[ "def", "_create_image", "(", "self", ",", "matrix", ",", "width", ",", "height", ",", "pad", ")", ":", "image", "=", "Image", ".", "new", "(", "\"RGB\"", ",", "(", "width", "+", "(", "pad", "*", "2", ")", ",", "height", "+", "(", "pad", "*", "2", ")", ")", ",", "self", ".", "bg_colour", ")", "image_draw", "=", "ImageDraw", ".", "Draw", "(", "image", ")", "# Calculate the block width and height.", "block_width", "=", "float", "(", "width", ")", "/", "self", ".", "cols", "block_height", "=", "float", "(", "height", ")", "/", "self", ".", "rows", "# Loop through blocks in matrix, draw rectangles.", "for", "row", ",", "cols", "in", "enumerate", "(", "matrix", ")", ":", "for", "col", ",", "cell", "in", "enumerate", "(", "cols", ")", ":", "if", "cell", ":", "image_draw", ".", "rectangle", "(", "(", "pad", "+", "col", "*", "block_width", ",", "# x1", "pad", "+", "row", "*", "block_height", ",", "# y1", "pad", "+", "(", "col", "+", "1", ")", "*", "block_width", "-", "1", ",", "# x2", "pad", "+", "(", "row", "+", "1", ")", "*", "block_height", "-", "1", "# y2", ")", ",", "fill", "=", "self", ".", "fg_colour", ")", "stream", "=", "BytesIO", "(", ")", "image", ".", "save", "(", "stream", ",", "format", "=", "\"png\"", ",", "optimize", "=", "True", ")", "# return the image byte data", "return", "stream", ".", "getvalue", "(", ")" ]
Generates a PNG byte list
[ "Generates", "a", "PNG", "byte", "list" ]
train
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/identicon.py#L140-L167
greyli/flask-avatars
flask_avatars/identicon.py
Identicon._create_matrix
def _create_matrix(self, byte_list): """ This matrix decides which blocks should be filled fg/bg colour True for fg_colour False for bg_colour hash_bytes - array of hash bytes values. RGB range values in each slot Returns: List representation of the matrix [[True, True, True, True], [False, True, True, False], [True, True, True, True], [False, False, False, False]] """ # Number of rows * cols halfed and rounded # in order to fill opposite side cells = int(self.rows * self.cols / 2 + self.cols % 2) matrix = [[False] * self.cols for num in range(self.rows)] for cell_number in range(cells): # If the bit with index corresponding to this cell is 1 # mark that cell as fg_colour # Skip byte 1, that's used in determining fg_colour if self._bit_is_one(cell_number, byte_list[1:]): # Find cell coordinates in matrix. x_row = cell_number % self.rows y_col = int(cell_number / self.cols) # Set coord True and its opposite side matrix[x_row][self.cols - y_col - 1] = True matrix[x_row][y_col] = True return matrix
python
def _create_matrix(self, byte_list): """ This matrix decides which blocks should be filled fg/bg colour True for fg_colour False for bg_colour hash_bytes - array of hash bytes values. RGB range values in each slot Returns: List representation of the matrix [[True, True, True, True], [False, True, True, False], [True, True, True, True], [False, False, False, False]] """ # Number of rows * cols halfed and rounded # in order to fill opposite side cells = int(self.rows * self.cols / 2 + self.cols % 2) matrix = [[False] * self.cols for num in range(self.rows)] for cell_number in range(cells): # If the bit with index corresponding to this cell is 1 # mark that cell as fg_colour # Skip byte 1, that's used in determining fg_colour if self._bit_is_one(cell_number, byte_list[1:]): # Find cell coordinates in matrix. x_row = cell_number % self.rows y_col = int(cell_number / self.cols) # Set coord True and its opposite side matrix[x_row][self.cols - y_col - 1] = True matrix[x_row][y_col] = True return matrix
[ "def", "_create_matrix", "(", "self", ",", "byte_list", ")", ":", "# Number of rows * cols halfed and rounded", "# in order to fill opposite side", "cells", "=", "int", "(", "self", ".", "rows", "*", "self", ".", "cols", "/", "2", "+", "self", ".", "cols", "%", "2", ")", "matrix", "=", "[", "[", "False", "]", "*", "self", ".", "cols", "for", "num", "in", "range", "(", "self", ".", "rows", ")", "]", "for", "cell_number", "in", "range", "(", "cells", ")", ":", "# If the bit with index corresponding to this cell is 1", "# mark that cell as fg_colour", "# Skip byte 1, that's used in determining fg_colour", "if", "self", ".", "_bit_is_one", "(", "cell_number", ",", "byte_list", "[", "1", ":", "]", ")", ":", "# Find cell coordinates in matrix.", "x_row", "=", "cell_number", "%", "self", ".", "rows", "y_col", "=", "int", "(", "cell_number", "/", "self", ".", "cols", ")", "# Set coord True and its opposite side", "matrix", "[", "x_row", "]", "[", "self", ".", "cols", "-", "y_col", "-", "1", "]", "=", "True", "matrix", "[", "x_row", "]", "[", "y_col", "]", "=", "True", "return", "matrix" ]
This matrix decides which blocks should be filled fg/bg colour True for fg_colour False for bg_colour hash_bytes - array of hash bytes values. RGB range values in each slot Returns: List representation of the matrix [[True, True, True, True], [False, True, True, False], [True, True, True, True], [False, False, False, False]]
[ "This", "matrix", "decides", "which", "blocks", "should", "be", "filled", "fg", "/", "bg", "colour", "True", "for", "fg_colour", "False", "for", "bg_colour" ]
train
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/identicon.py#L169-L203
greyli/flask-avatars
flask_avatars/identicon.py
Identicon.generate
def generate(self, text): """Generate and save avatars, return a list of file name: [filename_s, filename_m, filename_l]. :param text: The text used to generate image. """ sizes = current_app.config['AVATARS_SIZE_TUPLE'] path = current_app.config['AVATARS_SAVE_PATH'] suffix = {sizes[0]: 's', sizes[1]: 'm', sizes[2]: 'l'} for size in sizes: image_byte_array = self.get_image( string=str(text), width=int(size), height=int(size), pad=int(size * 0.1)) self.save(image_byte_array, save_location=os.path.join(path, '%s_%s.png' % (text, suffix[size]))) return [text + '_s.png', text + '_m.png', text + '_l.png']
python
def generate(self, text): """Generate and save avatars, return a list of file name: [filename_s, filename_m, filename_l]. :param text: The text used to generate image. """ sizes = current_app.config['AVATARS_SIZE_TUPLE'] path = current_app.config['AVATARS_SAVE_PATH'] suffix = {sizes[0]: 's', sizes[1]: 'm', sizes[2]: 'l'} for size in sizes: image_byte_array = self.get_image( string=str(text), width=int(size), height=int(size), pad=int(size * 0.1)) self.save(image_byte_array, save_location=os.path.join(path, '%s_%s.png' % (text, suffix[size]))) return [text + '_s.png', text + '_m.png', text + '_l.png']
[ "def", "generate", "(", "self", ",", "text", ")", ":", "sizes", "=", "current_app", ".", "config", "[", "'AVATARS_SIZE_TUPLE'", "]", "path", "=", "current_app", ".", "config", "[", "'AVATARS_SAVE_PATH'", "]", "suffix", "=", "{", "sizes", "[", "0", "]", ":", "'s'", ",", "sizes", "[", "1", "]", ":", "'m'", ",", "sizes", "[", "2", "]", ":", "'l'", "}", "for", "size", "in", "sizes", ":", "image_byte_array", "=", "self", ".", "get_image", "(", "string", "=", "str", "(", "text", ")", ",", "width", "=", "int", "(", "size", ")", ",", "height", "=", "int", "(", "size", ")", ",", "pad", "=", "int", "(", "size", "*", "0.1", ")", ")", "self", ".", "save", "(", "image_byte_array", ",", "save_location", "=", "os", ".", "path", ".", "join", "(", "path", ",", "'%s_%s.png'", "%", "(", "text", ",", "suffix", "[", "size", "]", ")", ")", ")", "return", "[", "text", "+", "'_s.png'", ",", "text", "+", "'_m.png'", ",", "text", "+", "'_l.png'", "]" ]
Generate and save avatars, return a list of file name: [filename_s, filename_m, filename_l]. :param text: The text used to generate image.
[ "Generate", "and", "save", "avatars", "return", "a", "list", "of", "file", "name", ":", "[", "filename_s", "filename_m", "filename_l", "]", "." ]
train
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/identicon.py#L205-L221
rbuffat/pyepw
pyepw/epw.py
Location.read
def read(self, vals): """Read values. Args: vals (list): list of strings representing values """ i = 0 if len(vals[i]) == 0: self.city = None else: self.city = vals[i] i += 1 if len(vals[i]) == 0: self.state_province_region = None else: self.state_province_region = vals[i] i += 1 if len(vals[i]) == 0: self.country = None else: self.country = vals[i] i += 1 if len(vals[i]) == 0: self.source = None else: self.source = vals[i] i += 1 if len(vals[i]) == 0: self.wmo = None else: self.wmo = vals[i] i += 1 if len(vals[i]) == 0: self.latitude = None else: self.latitude = vals[i] i += 1 if len(vals[i]) == 0: self.longitude = None else: self.longitude = vals[i] i += 1 if len(vals[i]) == 0: self.timezone = None else: self.timezone = vals[i] i += 1 if len(vals[i]) == 0: self.elevation = None else: self.elevation = vals[i] i += 1
python
def read(self, vals): """Read values. Args: vals (list): list of strings representing values """ i = 0 if len(vals[i]) == 0: self.city = None else: self.city = vals[i] i += 1 if len(vals[i]) == 0: self.state_province_region = None else: self.state_province_region = vals[i] i += 1 if len(vals[i]) == 0: self.country = None else: self.country = vals[i] i += 1 if len(vals[i]) == 0: self.source = None else: self.source = vals[i] i += 1 if len(vals[i]) == 0: self.wmo = None else: self.wmo = vals[i] i += 1 if len(vals[i]) == 0: self.latitude = None else: self.latitude = vals[i] i += 1 if len(vals[i]) == 0: self.longitude = None else: self.longitude = vals[i] i += 1 if len(vals[i]) == 0: self.timezone = None else: self.timezone = vals[i] i += 1 if len(vals[i]) == 0: self.elevation = None else: self.elevation = vals[i] i += 1
[ "def", "read", "(", "self", ",", "vals", ")", ":", "i", "=", "0", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "city", "=", "None", "else", ":", "self", ".", "city", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "state_province_region", "=", "None", "else", ":", "self", ".", "state_province_region", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "country", "=", "None", "else", ":", "self", ".", "country", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "source", "=", "None", "else", ":", "self", ".", "source", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "wmo", "=", "None", "else", ":", "self", ".", "wmo", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "latitude", "=", "None", "else", ":", "self", ".", "latitude", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "longitude", "=", "None", "else", ":", "self", ".", "longitude", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "timezone", "=", "None", "else", ":", "self", ".", "timezone", "=", "vals", "[", "i", "]", "i", "+=", "1", "if", "len", "(", "vals", "[", "i", "]", ")", "==", "0", ":", "self", ".", "elevation", "=", "None", "else", ":", "self", ".", "elevation", "=", "vals", "[", "i", "]", "i", "+=", "1" ]
Read values. Args: vals (list): list of strings representing values
[ "Read", "values", "." ]
train
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L35-L87
rbuffat/pyepw
pyepw/epw.py
Location.city
def city(self, value=None): """Corresponds to IDD Field `city` Args: value (str): value for IDD Field `city` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str ' 'for field `city`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `city`') self._city = value
python
def city(self, value=None): """Corresponds to IDD Field `city` Args: value (str): value for IDD Field `city` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str ' 'for field `city`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `city`') self._city = value
[ "def", "city", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "str", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type str '", "'for field `city`'", ".", "format", "(", "value", ")", ")", "if", "','", "in", "value", ":", "raise", "ValueError", "(", "'value should not contain a comma '", "'for field `city`'", ")", "self", ".", "_city", "=", "value" ]
Corresponds to IDD Field `city` Args: value (str): value for IDD Field `city` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
[ "Corresponds", "to", "IDD", "Field", "city" ]
train
https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L100-L122