_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q45100
BasePageResource.get_prefix
train
def get_prefix(self): """ Each resource defined in config for pages as dict. This method returns key from config where located current resource. """ for key, value in self.pages_config.items(): if not hasattr(value, '__iter__'): value = (value, ) for item in value: if type(self.node) == item\ or type(self.node) == getattr(item, 'model', None): return key
python
{ "resource": "" }
q45101
bytes2zip
train
def bytes2zip(bytes): """ RETURN COMPRESSED BYTES """ if hasattr(bytes, "read"): buff = TemporaryFile() archive = gzip.GzipFile(fileobj=buff, mode='w') for b in bytes: archive.write(b) archive.close() buff.seek(0) from pyLibrary.env.big_data import FileString, safe_size return FileString(buff) buff = BytesIO() archive = gzip.GzipFile(fileobj=buff, mode='w') archive.write(bytes) archive.close() return buff.getvalue()
python
{ "resource": "" }
q45102
ini2value
train
def ini2value(ini_content): """ INI FILE CONTENT TO Data """ from mo_future import ConfigParser, StringIO buff = StringIO(ini_content) config = ConfigParser() config._read(buff, "dummy") output = {} for section in config.sections(): output[section]=s = {} for k, v in config.items(section): s[k]=v return wrap(output)
python
{ "resource": "" }
q45103
dict_partial_cmp
train
def dict_partial_cmp(target_dict, dict_list, ducktype): """ Whether partial dict are in dict_list or not """ for called_dict in dict_list: # ignore invalid test case if len(target_dict) > len(called_dict): continue # get the intersection of two dicts intersection = {} for item in target_dict: dtype = ducktype(target_dict[item]) if hasattr(dtype, "mtest"): if item in called_dict and dtype.mtest(called_dict[item]): intersection[item] = target_dict[item] else: if item in called_dict and dtype == called_dict[item]: intersection[item] = target_dict[item] if intersection == target_dict: return True # if no any arguments matched to called_args, return False return False
python
{ "resource": "" }
q45104
dict_partial_cmp_always
train
def dict_partial_cmp_always(target_dict, dict_list, ducktype): """ Whether partial dict are always in dict_list or not """ res = [] for called_dict in dict_list: # ignore invalid test case if len(target_dict) > len(called_dict): continue # get the intersection of two dicts intersection = {} for item in target_dict: dtype = ducktype(target_dict[item]) if hasattr(dtype, "mtest"): if item in called_dict and dtype.mtest(called_dict[item]): intersection[item] = target_dict[item] else: if item in called_dict and dtype == called_dict[item]: intersection[item] = target_dict[item] ret = True if intersection == target_dict else False res.append(ret) # if no any arguments matched to called_args, return False return True if res and False not in res else False
python
{ "resource": "" }
q45105
tuple_partial_cmp
train
def tuple_partial_cmp(target_tuple, tuple_list, ducktype): """ Whether partial target_tuple are in tuple_list or not """ for called_tuple in tuple_list: # ignore invalid test case if len(target_tuple) > len(called_tuple): continue # loop all argument from "current arguments" dst = len(target_tuple) for idx, part_target_tuple in enumerate(target_tuple): # test current argument one by one, if matched to previous record, counter-1 dtype = ducktype(part_target_tuple) if hasattr(dtype, "mtest"): if dtype.mtest(called_tuple[idx]): dst = dst - 1 else: if dtype == called_tuple[idx]: dst = dst - 1 # if counter is zero => arguments is partial matched => return True if not dst: return True # if no any arguments matched to called_tuple, return False return False
python
{ "resource": "" }
q45106
tuple_partial_cmp_always
train
def tuple_partial_cmp_always(target_tuple, tuple_list, ducktype): """ Whether partial target_tuple are always in tuple_list or not """ res = [] for called_tuple in tuple_list: # ignore invalid test case if len(target_tuple) > len(called_tuple): continue # loop all argument from "current arguments" dst = len(target_tuple) for idx, part_target_tuple in enumerate(target_tuple): # test current argument one by one, if matched to previous record, counter-1 dtype = ducktype(part_target_tuple) if hasattr(dtype, "mtest"): if dtype.mtest(called_tuple[idx]): dst = dst - 1 else: if dtype == called_tuple[idx]: dst = dst - 1 # if counter is zero => arguments is partial matched => return True ret = True if not dst else False res.append(ret) # if no any arguments matched to called_tuple, return False return True if res and False not in res else False
python
{ "resource": "" }
q45107
register_from_options
train
def register_from_options(options=None, template=None, extractor=None): """Register the spec codec using the provided options""" if template is None: from noseOfYeti.plugins.support.spec_options import spec_options as template if extractor is None: from noseOfYeti.plugins.support.spec_options import extract_options_dict as extractor config = Config(template) config.setup(options, extractor) imports = determine_imports( extra_imports = ';'.join([d for d in config.extra_import if d]) , with_default_imports = config.with_default_imports ) tok = Tokeniser( default_kls = config.default_kls , import_tokens = imports , wrapped_setup = config.wrapped_setup , with_describe_attrs = not config.no_describe_attrs ) TokeniserCodec(tok).register()
python
{ "resource": "" }
q45108
TokeniserCodec.register
train
def register(self): """Register spec codec""" # Assume utf8 encoding utf8 = encodings.search_function('utf8') class StreamReader(utf_8.StreamReader): """Used by cPython to deal with a spec file""" def __init__(sr, stream, *args, **kwargs): codecs.StreamReader.__init__(sr, stream, *args, **kwargs) data = self.dealwith(sr.stream.readline) sr.stream = StringIO(data) def decode(text, *args, **kwargs): """Used by pypy and pylint to deal with a spec file""" return_tuple = kwargs.get("return_tuple", True) if six.PY3: if hasattr(text, 'tobytes'): text = text.tobytes().decode('utf8') else: text = text.decode('utf8') buffered = StringIO(text) # Determine if we need to have imports for this string # It may be a fragment of the file has_spec = regexes['encoding_matcher'].search(buffered.readline()) no_imports = not has_spec buffered.seek(0) # Translate the text if six.PY2: utf8 = encodings.search_function('utf8') # Assume utf8 encoding reader = utf8.streamreader(buffered) else: reader = buffered data = self.dealwith(reader.readline, no_imports=no_imports) # If nothing was changed, then we want to use the original file/line # Also have to replace indentation of original line with indentation of new line # To take into account nested describes if text and not regexes['only_whitespace'].match(text): if regexes['whitespace'].sub('', text) == regexes['whitespace'].sub('', data): bad_indentation = regexes['leading_whitespace'].search(text).groups()[0] good_indentation = regexes['leading_whitespace'].search(data).groups()[0] data = '%s%s' % (good_indentation, text[len(bad_indentation):]) # If text is empty and data isn't, then we should return text if len(text) == 0 and len(data) == 1: if return_tuple: return "", 0 else: return "" # Return translated version and it's length if return_tuple: return data, len(data) else: return data incrementaldecoder = utf8.incrementaldecoder if six.PY3: def incremental_decode(decoder, *args, **kwargs): """Wrapper for decode from IncrementalDecoder""" kwargs["return_tuple"] = False return decode(*args, **kwargs) incrementaldecoder = type("IncrementalDecoder", (utf8.incrementaldecoder, ), {"decode": incremental_decode}) def search_function(s): """Determine if a file is of spec encoding and return special CodecInfo if it is""" if s != 'spec': return None return codecs.CodecInfo( name='spec' , encode=utf8.encode , decode=decode , streamreader=StreamReader , streamwriter=utf8.streamwriter , incrementalencoder=utf8.incrementalencoder , incrementaldecoder=incrementaldecoder ) # Do the register codecs.register(search_function)
python
{ "resource": "" }
q45109
TokeniserCodec.output_for_debugging
train
def output_for_debugging(self, stream, data): """It will write the translated version of the file""" with open('%s.spec.out' % stream.name, 'w') as f: f.write(str(data))
python
{ "resource": "" }
q45110
ChoicesField.valid_value
train
def valid_value(self, value): """ Check if the provided value is a valid choice. """ if isinstance(value, Constant): value = value.name text_value = force_text(value) for option_value, option_label, option_title in self.choices: if value == option_value or text_value == force_text(option_value): return True return False
python
{ "resource": "" }
q45111
FileSize._unit_info
train
def _unit_info(self) -> Tuple[str, int]: """ Returns both the best unit to measure the size, and its power. :return: A tuple containing the unit and its power. """ abs_bytes = abs(self.size) if abs_bytes < 1024: unit = 'B' unit_divider = 1 elif abs_bytes < (1024 ** 2): unit = 'KB' unit_divider = 1024 elif abs_bytes < (1024 ** 3): unit = 'MB' unit_divider = (1024 ** 2) elif abs_bytes < (1024 ** 4): unit = 'GB' unit_divider = (1024 ** 3) else: unit = 'TB' unit_divider = (1024 ** 4) return unit, unit_divider
python
{ "resource": "" }
q45112
execute_sql
train
def execute_sql( host, username, password, sql, schema=None, param=None, kwargs=None ): """EXECUTE MANY LINES OF SQL (FROM SQLDUMP FILE, MAYBE?""" kwargs.schema = coalesce(kwargs.schema, kwargs.database) if param: with MySQL(kwargs) as temp: sql = expand_template(sql, quote_param(param)) # We have no way to execute an entire SQL file in bulk, so we # have to shell out to the commandline client. args = [ "mysql", "-h{0}".format(host), "-u{0}".format(username), "-p{0}".format(password) ] if schema: args.append("{0}".format(schema)) try: proc = subprocess.Popen( args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=-1 ) if is_text(sql): sql = sql.encode("utf8") (output, _) = proc.communicate(sql) except Exception as e: raise Log.error("Can not call \"mysql\"", e) if proc.returncode: if len(sql) > 10000: sql = "<" + text_type(len(sql)) + " bytes of sql>" Log.error( "Unable to execute sql: return code {{return_code}}, {{output}}:\n {{sql}}\n", sql=indent(sql), return_code=proc.returncode, output=output )
python
{ "resource": "" }
q45113
quote_value
train
def quote_value(value): """ convert values to mysql code for the same mostly delegate directly to the mysql lib, but some exceptions exist """ try: if value == None: return SQL_NULL elif isinstance(value, SQL): return quote_sql(value.template, value.param) elif is_text(value): return SQL("'" + "".join(ESCAPE_DCT.get(c, c) for c in value) + "'") elif is_data(value): return quote_value(json_encode(value)) elif is_number(value): return SQL(text_type(value)) elif isinstance(value, datetime): return SQL("str_to_date('" + value.strftime("%Y%m%d%H%M%S.%f") + "', '%Y%m%d%H%i%s.%f')") elif isinstance(value, Date): return SQL("str_to_date('" + value.format("%Y%m%d%H%M%S.%f") + "', '%Y%m%d%H%i%s.%f')") elif hasattr(value, '__iter__'): return quote_value(json_encode(value)) else: return quote_value(text_type(value)) except Exception as e: Log.error("problem quoting SQL {{value}}", value=repr(value), cause=e)
python
{ "resource": "" }
q45114
int_list_packer
train
def int_list_packer(term, values): """ return singletons, ranges and exclusions """ DENSITY = 10 # a range can have holes, this is inverse of the hole density MIN_RANGE = 20 # min members before a range is allowed to be used singletons = set() ranges = [] exclude = set() sorted = jx.sort(values) last = sorted[0] curr_start = last curr_excl = set() for v in sorted[1::]: if v <= last + 1: pass elif v - last > 3: # big step, how do we deal with it? if last == curr_start: # not a range yet, so just add as singlton singletons.add(last) elif last - curr_start - len(curr_excl) < MIN_RANGE or ((last - curr_start) < len(curr_excl) * DENSITY): # small ranges are singletons, sparse ranges are singletons singletons |= set(range(curr_start, last + 1)) singletons -= curr_excl else: # big enough, and dense enough range ranges.append({"gte": curr_start, "lte": last}) exclude |= curr_excl curr_start = v curr_excl = set() else: if 1 + last - curr_start >= len(curr_excl) * DENSITY: # high density, keep track of excluded and continue add_me = set(range(last + 1, v)) curr_excl |= add_me elif 1 + last - curr_start - len(curr_excl) < MIN_RANGE: # not big enough, convert range to singletons new_singles = set(range(curr_start, last + 1)) - curr_excl singletons = singletons | new_singles curr_start = v curr_excl = set() else: ranges.append({"gte": curr_start, "lte": last}) exclude |= curr_excl curr_start = v curr_excl = set() last = v if last == curr_start: # not a range yet, so just add as singlton singletons.add(last) elif last - curr_start - len(curr_excl) < MIN_RANGE or ((last - curr_start) < len(curr_excl) * DENSITY): # small ranges are singletons, sparse ranges are singletons singletons |= set(range(curr_start, last + 1)) singletons -= curr_excl else: # big enough, and dense enough range ranges.append({"gte": curr_start, "lte": last}) exclude |= curr_excl if ranges: r = {"or": [{"range": {term: r}} for r in ranges]} if exclude: r = {"and": [r, {"not": {"terms": {term: jx.sort(exclude)}}}]} if singletons: return {"or": [ {"terms": {term: jx.sort(singletons)}}, r ]} else: return r else: return {"terms": {term: values}}
python
{ "resource": "" }
q45115
MySQL.query
train
def query(self, sql, param=None, stream=False, row_tuples=False): """ RETURN LIST OF dicts """ if not self.cursor: # ALLOW NON-TRANSACTIONAL READS Log.error("must perform all queries inside a transaction") self._execute_backlog() try: if param: sql = expand_template(sql, quote_param(param)) sql = self.preamble + outdent(sql) self.debug and Log.note("Execute SQL:\n{{sql}}", sql=indent(sql)) self.cursor.execute(sql) if row_tuples: if stream: result = self.cursor else: result = wrap(list(self.cursor)) else: columns = [utf8_to_unicode(d[0]) for d in coalesce(self.cursor.description, [])] if stream: result = (wrap({c: utf8_to_unicode(v) for c, v in zip(columns, row)}) for row in self.cursor) else: result = wrap([{c: utf8_to_unicode(v) for c, v in zip(columns, row)} for row in self.cursor]) return result except Exception as e: e = Except.wrap(e) if "InterfaceError" in e: Log.error("Did you close the db connection?", e) Log.error("Problem executing SQL:\n{{sql|indent}}", sql=sql, cause=e, stack_depth=1)
python
{ "resource": "" }
q45116
deactivate
train
def deactivate(): """ Deactivate a state in this thread. """ if hasattr(_mode, "current_state"): del _mode.current_state if hasattr(_mode, "schema"): del _mode.schema for k in connections: con = connections[k] if hasattr(con, 'reset_schema'): con.reset_schema()
python
{ "resource": "" }
q45117
collections
train
def collections(record, key, value): """Parse custom MARC tag 980.""" return { 'primary': value.get('a'), 'secondary': value.get('b'), 'deleted': value.get('c'), }
python
{ "resource": "" }
q45118
reverse_collections
train
def reverse_collections(self, key, value): """Reverse colections field to custom MARC tag 980.""" return { 'a': value.get('primary'), 'b': value.get('secondary'), 'c': value.get('deleted'), }
python
{ "resource": "" }
q45119
_select1
train
def _select1(data, field, depth, output): """ SELECT A SINGLE FIELD """ for d in data: for i, f in enumerate(field[depth:]): d = d[f] if d == None: output.append(None) break elif is_list(d): _select1(d, field, i + 1, output) break else: output.append(d)
python
{ "resource": "" }
q45120
search_form
train
def search_form(*fields, **kwargs): """ Construct a search form filter form using the fields provided as arguments to this function. By default a field will be created for each field passed and hidden field will be created for search. If you pass the key work argument `search_only` then only a visible search field will be created on the form. Passing `status_filter` will include a version status filter on this form. """ fdict = { 'search_fields': set(fields) } if kwargs.get('search_only'): fdict['search'] = forms.CharField(max_length=255, required=False) else: fdict['search'] = forms.CharField(max_length=255, required=False, widget=forms.HiddenInput) for f in fields: fdict[f] = forms.CharField(max_length=255, required=False) if kwargs.get('status_filter', False): return type("filterform", (VersionFilterForm,), fdict) else: return type("filterform", (BaseFilterForm,), fdict)
python
{ "resource": "" }
q45121
BaseFilterForm.get_filter_fields
train
def get_filter_fields(self, exclude=None): """ Get the fields that are normal filter fields """ exclude_set = set(self.exclude) if exclude: exclude_set = exclude_set.union(set(exclude)) return [name for name in self.fields if name not in exclude_set]
python
{ "resource": "" }
q45122
BaseFilterForm.get_search_fields
train
def get_search_fields(self, exclude=None): """ Get the fields for searching for an item. """ exclude = set(exclude) if self.search_fields and len(self.search_fields) > 1: exclude = exclude.union(self.search_fields) return self.get_filter_fields(exclude=exclude)
python
{ "resource": "" }
q45123
BaseFilterForm.get_filter_kwargs
train
def get_filter_kwargs(self): """ Translates the cleaned data into a dictionary that can used to generate the filter removing blank values. """ if self.is_valid(): filter_kwargs = {} for field in self.get_filter_fields(): empty_values = EMPTY_VALUES if hasattr(self.fields[field], 'empty_values'): empty_values = self.fields[field].empty_values value = self.cleaned_data.get(field) if not value in empty_values: if self.search_fields and field in self.search_fields: filter_kwargs["%s__icontains" % field] = value else: filter_kwargs[field] = value return filter_kwargs else: return {}
python
{ "resource": "" }
q45124
BaseFilterForm.get_filter
train
def get_filter(self): """ Returns a list of Q objects that is created by passing for the keyword arguments from `self.get_filter_kwargs`. If search_fields are specified and we received a seach query all search_fields will be queried use using OR (|) for that term and any specific terms for those search_fields will be ignored. Returns an empty list if there is nothing to filter on. """ args = [] filter_kwargs = self.get_filter_kwargs() search = filter_kwargs.pop('search', None) if search and self.search_fields: search_args = [] for f in self.search_fields: k = '%s__icontains' % f filter_kwargs.pop(k, None) q = Q(**{k: search}) if search_args: q = search_args[0] | q search_args[0] = q else: search_args.append(q) args.append(search_args[0]) if filter_kwargs: args.append(Q(**filter_kwargs)) return args
python
{ "resource": "" }
q45125
parse_wiki_terms
train
def parse_wiki_terms(doc): '''who needs an html parser. fragile hax, but checks the result at the end''' results = [] last3 = ['', '', ''] header = True for line in doc.split('\n'): last3.pop(0) last3.append(line.strip()) if all(s.startswith('<td>') and not s == '<td></td>' for s in last3): if header: header = False continue last3 = [s.replace('<td>', '').replace('</td>', '').strip() for s in last3] rank, term, count = last3 rank = int(rank.split()[0]) term = term.replace('</a>', '') term = term[term.index('>')+1:].lower() results.append(term) assert len(results) in [1000, 2000, 1284] # early docs have 1k entries, later have 2k, last doc has 1284 return results
python
{ "resource": "" }
q45126
filter_short
train
def filter_short(terms): ''' only keep if brute-force possibilities are greater than this word's rank in the dictionary ''' return [term for i, term in enumerate(terms) if 26**(len(term)) > i]
python
{ "resource": "" }
q45127
filter_dup
train
def filter_dup(lst, lists): ''' filters lst to only include terms that don't have lower rank in another list ''' max_rank = len(lst) + 1 dct = to_ranked_dict(lst) dicts = [to_ranked_dict(l) for l in lists] return [word for word in lst if all(dct[word] < dct2.get(word, max_rank) for dct2 in dicts)]
python
{ "resource": "" }
q45128
Matcher.__get_match_result
train
def __get_match_result(self, ret, ret2): """ Getting match result """ if self.another_compare == "__MATCH_AND__": return ret and ret2 elif self.another_compare == "__MATCH_OR__": return ret or ret2 return ret
python
{ "resource": "" }
q45129
BindXmlReader.get_stats
train
def get_stats(self): """Given XML version, parse create XMLAbstract object and sets xml_stats attribute.""" self.gather_xml() self.xml_version = self.bs_xml.find('statistics')['version'] if self.xml_version is None: raise XmlError("Unable to determine XML version via 'statistics' tag.") if self.xml_version == '3.6': self.stats = XmlV36(self.bs_xml) elif self.xml_version == '3.8': # 3.8 uses the same XML scheme as 3.6 self.stats = XmlV36(self.bs_xml) elif self.xml_version == '3.11': # BIND 9.12 uses same XML schema as XmlV36 self.stats = XmlV36(self.bs_xml) else: raise XmlError('Support must be added before being able to support newly-encountered XML version %s.' % self.xml_version)
python
{ "resource": "" }
q45130
median
train
def median(values, simple=True, mean_weight=0.0): """ RETURN MEDIAN VALUE IF simple=False THEN IN THE EVENT MULTIPLE INSTANCES OF THE MEDIAN VALUE, THE MEDIAN IS INTERPOLATED BASED ON ITS POSITION IN THE MEDIAN RANGE mean_weight IS TO PICK A MEDIAN VALUE IN THE ODD CASE THAT IS CLOSER TO THE MEAN (PICK A MEDIAN BETWEEN TWO MODES IN BIMODAL CASE) """ if OR(v == None for v in values): Log.error("median is not ready to handle None") try: if not values: return Null l = len(values) _sorted = sorted(values) middle = int(l / 2) _median = float(_sorted[middle]) if len(_sorted) == 1: return _median if simple: if l % 2 == 0: return (_sorted[middle - 1] + _median) / 2 return _median # FIND RANGE OF THE median start_index = middle - 1 while start_index > 0 and _sorted[start_index] == _median: start_index -= 1 start_index += 1 stop_index = middle + 1 while stop_index < l and _sorted[stop_index] == _median: stop_index += 1 num_middle = stop_index - start_index if l % 2 == 0: if num_middle == 1: return (_sorted[middle - 1] + _median) / 2 else: return (_median - 0.5) + (middle - start_index) / num_middle else: if num_middle == 1: return (1 - mean_weight) * _median + mean_weight * (_sorted[middle - 1] + _sorted[middle + 1]) / 2 else: return (_median - 0.5) + (middle + 0.5 - start_index) / num_middle except Exception as e: Log.error("problem with median of {{values}}", values= values, cause=e)
python
{ "resource": "" }
q45131
percentile
train
def percentile(values, percent): """ PERCENTILE WITH INTERPOLATION RETURN VALUE AT, OR ABOVE, percentile OF THE VALUES snagged from http://code.activestate.com/recipes/511478-finding-the-percentile-of-the-values/ """ N = sorted(values) if not N: return None k = (len(N) - 1) * percent f = int(math.floor(k)) c = int(math.ceil(k)) if f == c: return N[int(k)] d0 = N[f] * (c - k) d1 = N[c] * (k - f) return d0 + d1
python
{ "resource": "" }
q45132
AttrIndexedDict.map
train
def map(self, method: str, *args, _threaded: bool = True, **kwargs ) -> "AttrIndexedDict": "For all stored items, run a method they possess." work = lambda item: getattr(item, method)(*args, **kwargs) if _threaded: pool = ThreadPool(int(config.CFG["GENERAL"]["parallel_requests"])) try: pool.map(work, self.data.values()) except KeyboardInterrupt: LOG.warning("CTRL-C caught, finishing current tasks...") pool.terminate() else: pool.close() pool.join() return self for item in self.data.values(): work(item) return self
python
{ "resource": "" }
q45133
AttrIndexedDict.put
train
def put(self, *items) -> "AttrIndexedDict": "Add items to the dict that will be indexed by self.attr." for item in items: self.data[getattr(item, self.attr)] = item return self
python
{ "resource": "" }
q45134
main
train
def main(host, password, username): """Console script for tplink.""" client = tplink.TpLinkClient(password) devices = client.get_connected_devices() click.echo(json.dumps(devices, indent=4)) return 0
python
{ "resource": "" }
q45135
flatten
train
def flatten(value): """value can be any nesting of tuples, arrays, dicts. returns 1D numpy array and an unflatten function.""" if isinstance(value, np.ndarray): def unflatten(vector): return np.reshape(vector, value.shape) return np.ravel(value), unflatten elif isinstance(value, float): return np.array([value]), lambda x: x[0] elif isinstance(value, tuple): if not value: return np.array([]), lambda x: () flattened_first, unflatten_first = flatten(value[0]) flattened_rest, unflatten_rest = flatten(value[1:]) def unflatten(vector): N = len(flattened_first) return (unflatten_first(vector[:N]),) + unflatten_rest(vector[N:]) return np.concatenate((flattened_first, flattened_rest)), unflatten elif isinstance(value, list): if not value: return np.array([]), lambda x: [] flattened_first, unflatten_first = flatten(value[0]) flattened_rest, unflatten_rest = flatten(value[1:]) def unflatten(vector): N = len(flattened_first) return [unflatten_first(vector[:N])] + unflatten_rest(vector[N:]) return np.concatenate((flattened_first, flattened_rest)), unflatten elif isinstance(value, dict): flattened = [] unflatteners = [] lengths = [] keys = [] for k, v in sorted(value.items(), key=itemgetter(0)): cur_flattened, cur_unflatten = flatten(v) flattened.append(cur_flattened) unflatteners.append(cur_unflatten) lengths.append(len(cur_flattened)) keys.append(k) def unflatten(vector): split_ixs = np.cumsum(lengths) pieces = np.split(vector, split_ixs) return {key: unflattener(piece) for piece, unflattener, key in zip(pieces, unflatteners, keys)} return np.concatenate(flattened), unflatten else: raise Exception("Don't know how to flatten type {}".format(type(value)) )
python
{ "resource": "" }
q45136
switch_state
train
def switch_state(request): """ Switch the default version state in the session. """ if request.session.get(SESSION_KEY): request.session[SESSION_KEY] = False else: request.session[SESSION_KEY] = True # Get redirect location # Don't go to non local paths url = request.GET.get('redirect_to', '/') if url.startswith('http'): url = '/' return redirect(url)
python
{ "resource": "" }
q45137
_CollectHistory_
train
def _CollectHistory_(lookupType, fromVal, toVal, using={}, pattern=''): """ Return a dictionary detailing what, if any, change was made to a record field :param string lookupType: what cleaning rule made the change; one of: genericLookup, genericRegex, fieldSpecificLookup, fieldSpecificRegex, normLookup, normRegex, normIncludes, deriveValue, copyValue, deriveRegex :param string fromVal: previous field value :param string toVal: new string value :param dict using: field values used to derive new values; only applicable for deriveValue, copyValue, deriveRegex :param string pattern: which regex pattern was matched to make the change; only applicable for genericRegex, fieldSpecificRegex, deriveRegex """ histObj = {} if fromVal != toVal: histObj[lookupType] = {"from": fromVal, "to": toVal} if lookupType in ['deriveValue', 'deriveRegex', 'copyValue', 'normIncludes', 'deriveIncludes'] and using!='': histObj[lookupType]["using"] = using if lookupType in ['genericRegex', 'fieldSpecificRegex', 'normRegex', 'deriveRegex'] and pattern!='': histObj[lookupType]["pattern"] = pattern return histObj
python
{ "resource": "" }
q45138
_CollectHistoryAgg_
train
def _CollectHistoryAgg_(contactHist, fieldHistObj, fieldName): """ Return updated history dictionary with new field change :param dict contactHist: Existing contact history dictionary :param dict fieldHistObj: Output of _CollectHistory_ :param string fieldName: field name """ if fieldHistObj!={}: if fieldName not in contactHist.keys(): contactHist[fieldName] = {} for lookupType in fieldHistObj.keys(): contactHist[fieldName][lookupType] = fieldHistObj[lookupType] return contactHist
python
{ "resource": "" }
q45139
_RunUserDefinedFunctions_
train
def _RunUserDefinedFunctions_(config, data, histObj, position, namespace=__name__): """ Return a single updated data record and history object after running user-defined functions :param dict config: DWM configuration (see DataDictionary) :param dict data: single record (dictionary) to which user-defined functions should be applied :param dict histObj: History object to which changes should be appended :param string position: position name of which function set from config should be run :param namespace: namespace of current working script; must be passed if using user-defined functions """ udfConfig = config['userDefinedFunctions'] if position in udfConfig: posConfig = udfConfig[position] for udf in posConfig.keys(): posConfigUDF = posConfig[udf] data, histObj = getattr(sys.modules[namespace], posConfigUDF)(data=data, histObj=histObj) return data, histObj
python
{ "resource": "" }
q45140
FakeGPS.feed
train
def feed(self): "Feed a line from the contents of the GPS log to the daemon." line = self.testload.sentences[self.index % len(self.testload.sentences)] if "%Delay:" in line: # Delay specified number of seconds delay = line.split()[1] time.sleep(int(delay)) # self.write has to be set by the derived class self.write(line) if self.progress: self.progress("gpsfake: %s feeds %d=%s\n" % (self.testload.name, len(line), repr(line))) time.sleep(WRITE_PAD) self.index += 1
python
{ "resource": "" }
q45141
DaemonInstance.spawn
train
def spawn(self, options, port, background=False, prefix=""): "Spawn a daemon instance." self.spawncmd = None # Look for gpsd in GPSD_HOME env variable if os.environ.get('GPSD_HOME'): for path in os.environ['GPSD_HOME'].split(':'): _spawncmd = "%s/gpsd" % path if os.path.isfile(_spawncmd) and os.access(_spawncmd, os.X_OK): self.spawncmd = _spawncmd break # if we could not find it yet try PATH env variable for it if not self.spawncmd: if not '/usr/sbin' in os.environ['PATH']: os.environ['PATH']=os.environ['PATH'] + ":/usr/sbin" for path in os.environ['PATH'].split(':'): _spawncmd = "%s/gpsd" % path if os.path.isfile(_spawncmd) and os.access(_spawncmd, os.X_OK): self.spawncmd = _spawncmd break if not self.spawncmd: raise DaemonError("Cannot execute gpsd: executable not found. Set GPSD_HOME env variable") # The -b option to suppress hanging on probe returns is needed to cope # with OpenBSD (and possibly other non-Linux systems) that don't support # anything we can use to implement the FakeGPS.read() method self.spawncmd += " -b -N -S %s -F %s -P %s %s" % (port, self.control_socket, self.pidfile, options) if prefix: self.spawncmd = prefix + " " + self.spawncmd.strip() if background: self.spawncmd += " &" status = os.system(self.spawncmd) if os.WIFSIGNALED(status) or os.WEXITSTATUS(status): raise DaemonError("daemon exited with status %d" % status)
python
{ "resource": "" }
q45142
DaemonInstance.wait_pid
train
def wait_pid(self): "Wait for the daemon, get its PID and a control-socket connection." while True: try: fp = open(self.pidfile) except IOError: time.sleep(0.1) continue try: fp.seek(0) pidstr = fp.read() self.pid = int(pidstr) except ValueError: time.sleep(0.5) continue # Avoid race condition -- PID not yet written fp.close() break
python
{ "resource": "" }
q45143
DaemonInstance.add_device
train
def add_device(self, path): "Add a device to the daemon's internal search list." if self.__get_control_socket(): self.sock.sendall("+%s\r\n\x00" % path) self.sock.recv(12) self.sock.close()
python
{ "resource": "" }
q45144
DaemonInstance.remove_device
train
def remove_device(self, path): "Remove a device from the daemon's internal search list." if self.__get_control_socket(): self.sock.sendall("-%s\r\n\x00" % path) self.sock.recv(12) self.sock.close()
python
{ "resource": "" }
q45145
DaemonInstance.kill
train
def kill(self): "Kill the daemon instance." if self.pid: try: os.kill(self.pid, signal.SIGTERM) # Raises an OSError for ESRCH when we've killed it. while True: os.kill(self.pid, signal.SIGTERM) time.sleep(0.01) except OSError: pass self.pid = None
python
{ "resource": "" }
q45146
addParts
train
def addParts(parentPart, childPath, count, index): """ BUILD A hierarchy BY REPEATEDLY CALLING self METHOD WITH VARIOUS childPaths count IS THE NUMBER FOUND FOR self PATH """ if index == None: index = 0 if index == len(childPath): return c = childPath[index] parentPart.count = coalesce(parentPart.count, 0) + count if parentPart.partitions == None: parentPart.partitions = FlatList() for i, part in enumerate(parentPart.partitions): if part.name == c.name: addParts(part, childPath, count, index + 1) return parentPart.partitions.append(c) addParts(c, childPath, count, index + 1)
python
{ "resource": "" }
q45147
APIChoiceWidget.get_qs
train
def get_qs(self): """ Returns a mapping that will be used to generate the query string for the api url. Any values in the the `limit_choices_to` specified on the foreign key field and any arguments specified on self.extra_query_kwargs are converted to a format that can be used in a query string and returned as a dictionary. """ qs = url_params_from_lookup_dict(self.rel.limit_choices_to) if not qs: qs = {} if self.extra_query_kwargs: qs.update(self.extra_query_kwargs) return qs
python
{ "resource": "" }
q45148
APIChoiceWidget.get_api_link
train
def get_api_link(self): """ Adds a query string to the api url. At minimum adds the type=choices argument so that the return format is json. Any other filtering arguments calculated by the `get_qs` method are then added to the url. It is up to the destination url to respect them as filters. """ url = self._api_link if url: qs = self.get_qs() url = "%s?type=choices" % url if qs: url = "%s&amp;%s" % (url, u'&amp;'.join([u'%s=%s' % (k, urllib.quote(unicode(v).encode('utf8'))) \ for k, v in qs.items()])) url = "%s&amp;%s" % (url, u'&amp;'.join([u'exclude=%s' % x \ for x in qs.keys()])) return url
python
{ "resource": "" }
q45149
APIChoiceWidget.label_for_value
train
def label_for_value(self, value, key=None): """ Looks up the current value of the field and returns a unicode representation. Default implementation does a lookup on the target model and if a match is found calls force_unicode on that object. Otherwise a blank string is returned. """ if not key: key = self.rel.get_related_field().name if value is not None: try: obj = self.model._default_manager.using(self.db).get(**{key: value}) return force_unicode(obj) except (ValueError, self.model.DoesNotExist): return '' return ''
python
{ "resource": "" }
q45150
APIManyChoiceWidget.update_links
train
def update_links(self, request, admin_site=None): """ Called to update the widget's urls. Tries to find the bundle for the model that this foreign key points to and then asks it for the urls for adding and listing and sets them on this widget instance. The urls are only set if request.user has permissions on that url. :param request: The request for which this widget is being rendered. :param admin_site: If provided, the `admin_site` is used to lookup \ the bundle that is registered as the primary url for the model \ that this foreign key points to. """ if admin_site: bundle = admin_site.get_bundle_for_model(self.model.to) if bundle: self._api_link = self._get_bundle_link(bundle, self.view, request.user) self._add_link = self._get_bundle_link(bundle, self.add_view, request.user)
python
{ "resource": "" }
q45151
SchemaObjectFactory.schema_class
train
def schema_class(self, object_schema, model_name, classes=False): """ Create a object-class based on the object_schema. Use this class to create specific instances, and validate the data values. See the "python-jsonschema-objects" package for details on further usage. Parameters ---------- object_schema : dict The JSON-schema that defines the object model_name : str if provided, the name given to the new class. if not provided, then the name will be determined by one of the following schema values, in this order: ['x-model', 'title', 'id'] classes : bool When `True`, this method will return the complete dictionary of all resolved object-classes built from the object_schema. This can be helpful when a deeply nested object_schema is provided; but generally not necessary. You can then create a :class:`Namespace` instance using this dict. See the 'python-jschonschema-objects.utls' package for further details. When `False` (default), return only the object-class Returns ------- - new class for given object_schema (default) - dict of all classes when :param:`classes` is True """ # if not model_name: # model_name = SchemaObjectFactory.schema_model_name(object_schema) cls_bldr = ClassBuilder(self.resolver) model_cls = cls_bldr.construct(model_name, object_schema) # if `classes` is False(0) return the new model class, # else return all the classes resolved model_cls.proptype = SchemaObjectFactory.proptype return [model_cls, cls_bldr.resolved][classes]
python
{ "resource": "" }
q45152
SchemaObjectFactory.__model_class
train
def __model_class(self, model_name): """ this method is used by the lru_cache, do not call directly """ build_schema = deepcopy(self.definitions[model_name]) return self.schema_class(build_schema, model_name)
python
{ "resource": "" }
q45153
minimum_entropy_match_sequence
train
def minimum_entropy_match_sequence(password, matches): """ Returns minimum entropy Takes a list of overlapping matches, returns the non-overlapping sublist with minimum entropy. O(nm) dp alg for length-n password with m candidate matches. """ bruteforce_cardinality = calc_bruteforce_cardinality(password) # e.g. 26 for lowercase up_to_k = [0] * len(password) # minimum entropy up to k. # for the optimal sequence of matches up to k, holds the final match (match['j'] == k). null means the sequence ends # without a brute-force character. backpointers = [] for k in range(0, len(password)): # starting scenario to try and beat: adding a brute-force character to the minimum entropy sequence at k-1. up_to_k[k] = get(up_to_k, k-1) + lg(bruteforce_cardinality) backpointers.append(None) for match in matches: if match['j'] != k: continue i, j = match['i'], match['j'] # see if best entropy up to i-1 + entropy of this match is less than the current minimum at j. up_to = get(up_to_k, i-1) candidate_entropy = up_to + calc_entropy(match) if candidate_entropy < up_to_k[j]: #print "New minimum: using " + str(match) #print "Entropy: " + str(candidate_entropy) up_to_k[j] = candidate_entropy backpointers[j] = match # walk backwards and decode the best sequence match_sequence = [] k = len(password) - 1 while k >= 0: match = backpointers[k] if match: match_sequence.append(match) k = match['i'] - 1 else: k -= 1 match_sequence.reverse() # fill in the blanks between pattern matches with bruteforce "matches" # that way the match sequence fully covers the password: match1.j == match2.i - 1 for every adjacent match1, match2. def make_bruteforce_match(i, j): return { 'pattern': 'bruteforce', 'i': i, 'j': j, 'token': password[i:j+1], 'entropy': lg(math.pow(bruteforce_cardinality, j - i + 1)), 'cardinality': bruteforce_cardinality, } k = 0 match_sequence_copy = [] for match in match_sequence: i, j = match['i'], match['j'] if i - k > 0: match_sequence_copy.append(make_bruteforce_match(k, i - 1)) k = j + 1 match_sequence_copy.append(match) if k < len(password): match_sequence_copy.append(make_bruteforce_match(k, len(password) - 1)) match_sequence = match_sequence_copy min_entropy = 0 if len(password) == 0 else up_to_k[len(password) - 1] # corner case is for an empty password '' crack_time = entropy_to_crack_time(min_entropy) # final result object return { 'password': password, 'entropy': round_to_x_digits(min_entropy, 3), 'match_sequence': match_sequence, 'crack_time': round_to_x_digits(crack_time, 3), 'crack_time_display': display_time(crack_time), 'score': crack_time_to_score(crack_time), }
python
{ "resource": "" }
q45154
round_to_x_digits
train
def round_to_x_digits(number, digits): """ Returns 'number' rounded to 'digits' digits. """ return round(number * math.pow(10, digits)) / math.pow(10, digits)
python
{ "resource": "" }
q45155
Cube.values
train
def values(self): """ TRY NOT TO USE THIS, IT IS SLOW """ matrix = self.data.values()[0] # CANONICAL REPRESENTATIVE if matrix.num == 0: return e_names = self.edges.name s_names = self.select.name parts = [e.domain.partitions.value if e.domain.primitive else e.domain.partitions for e in self.edges] for c in matrix._all_combos(): try: output = {n: parts[i][c[i]] for i, n in enumerate(e_names)} except Exception as e: Log.error("problem", cause=e) for s in s_names: output[s] = self.data[s][c] yield wrap(output)
python
{ "resource": "" }
q45156
Client.save_swagger_spec
train
def save_swagger_spec(self, filepath=None): """ Saves a copy of the origin_spec to a local file in JSON format """ if filepath is True or filepath is None: filepath = self.file_spec.format(server=self.server) json.dump(self.origin_spec, open(filepath, 'w+'), indent=3)
python
{ "resource": "" }
q45157
Client.load_swagger_spec
train
def load_swagger_spec(self, filepath=None): """ Loads the origin_spec from a local JSON file. If `filepath` is not provided, then the class `file_spec` format will be used to create the file-path value. """ if filepath is True or filepath is None: filepath = self.file_spec.format(server=self.server) return json.load(open(filepath))
python
{ "resource": "" }
q45158
OssAuth.set_more_headers
train
def set_more_headers(self, req, extra_headers=None): """Set content-type, content-md5, date to the request Returns a new `PreparedRequest` :param req: the origin unsigned request :param extra_headers: extra headers you want to set, pass as dict """ oss_url = url.URL(req.url) req.headers.update(extra_headers or {}) # set content-type content_type = req.headers.get("content-type") if content_type is None: content_type, __ = mimetypes.guess_type(oss_url.path) req.headers["content-type"] = content_type or self.DEFAULT_TYPE logger.info("set content-type to: {0}".format(content_type)) # set date if self._expires is None: req.headers.setdefault( "date", time.strftime(self.TIME_FMT, time.gmtime()) ) else: req.headers["content-type"] = "" req.headers["date"] = self._expires logger.info("set date to: {0}".format(req.headers["date"])) # set content-md5 if req.body is None: content_md5 = "" else: content_md5 = req.headers.get("content-md5", "") if not content_md5 and self._allow_empty_md5 is False: content_md5 = utils.cal_b64md5(req.body) req.headers["content-md5"] = content_md5 logger.info("content-md5 to: [{0}]".format(content_md5)) return req
python
{ "resource": "" }
q45159
OssAuth.get_signature
train
def get_signature(self, req): """calculate the signature of the oss request Returns the signatue """ oss_url = url.URL(req.url) oss_headers = [ "{0}:{1}\n".format(key, val) for key, val in req.headers.lower_items() if key.startswith(self.X_OSS_PREFIX) ] canonicalized_headers = "".join(sorted(oss_headers)) logger.debug( "canonicalized header : [{0}]".format(canonicalized_headers) ) oss_url.params = { key: val for key, val in oss_url.params.items() if key in self.SUB_RESOURCES or key in self.OVERRIDE_QUERIES } oss_url.forge(key=lambda x: x[0]) canonicalized_str = "{0}/{1}{2}".format( canonicalized_headers, self.get_bucket(oss_url.host), oss_url.uri ) str_to_sign = "\n".join([ req.method, req.headers["content-md5"], req.headers["content-type"], req.headers["date"], canonicalized_str ]) logger.debug( "signature str is \n{0}\n{1}\n{0}\n".format("#" * 20, str_to_sign) ) if isinstance(str_to_sign, requests.compat.str): str_to_sign = str_to_sign.encode("utf8") signature_bin = hmac.new(self._secret_key, str_to_sign, hashlib.sha1) signature = base64.b64encode(signature_bin.digest()).decode("utf8") logger.debug("signature is [{0}]".format(signature)) return signature
python
{ "resource": "" }
q45160
Rename.convert
train
def convert(self, expr): """ EXPAND INSTANCES OF name TO value """ if expr is True or expr == None or expr is False: return expr elif is_number(expr): return expr elif expr == ".": return "." elif is_variable_name(expr): return coalesce(self.dimensions[expr], expr) elif is_text(expr): Log.error("{{name|quote}} is not a valid variable name", name=expr) elif isinstance(expr, Date): return expr elif is_op(expr, QueryOp): return self._convert_query(expr) elif is_data(expr): if expr["from"]: return self._convert_query(expr) elif len(expr) >= 2: #ASSUME WE HAVE A NAMED STRUCTURE, NOT AN EXPRESSION return wrap({name: self.convert(value) for name, value in expr.leaves()}) else: # ASSUME SINGLE-CLAUSE EXPRESSION k, v = expr.items()[0] return converter_map.get(k, self._convert_bop)(self, k, v) elif is_many(expr): return wrap([self.convert(value) for value in expr]) else: return expr
python
{ "resource": "" }
q45161
Rename._convert_clause
train
def _convert_clause(self, clause): """ JSON QUERY EXPRESSIONS HAVE MANY CLAUSES WITH SIMILAR COLUMN DELCARATIONS """ clause = wrap(clause) if clause == None: return None elif is_data(clause): return set_default({"value": self.convert(clause.value)}, clause) else: return [set_default({"value": self.convert(c.value)}, c) for c in clause]
python
{ "resource": "" }
q45162
MeteorDatabase.get_obstory_ids
train
def get_obstory_ids(self): """ Retrieve the IDs of all obstorys. :return: A list of obstory IDs for all obstorys """ self.con.execute('SELECT publicId FROM archive_observatories;') return map(lambda row: row['publicId'], self.con.fetchall())
python
{ "resource": "" }
q45163
MeteorDatabase.has_obstory_metadata
train
def has_obstory_metadata(self, status_id): """ Check for the presence of the given metadata item :param string status_id: The metadata item ID :return: True if we have a metadata item with this ID, False otherwise """ self.con.execute('SELECT 1 FROM archive_metadata WHERE publicId=%s;', (status_id,)) return len(self.con.fetchall()) > 0
python
{ "resource": "" }
q45164
MeteorDatabase.has_file_id
train
def has_file_id(self, repository_fname): """ Check for the presence of the given file_id :param string repository_fname: The file ID :return: True if we have a :class:`meteorpi_model.FileRecord` with this ID, False otherwise """ self.con.execute('SELECT 1 FROM archive_files WHERE repositoryFname = %s', (repository_fname,)) return len(self.con.fetchall()) > 0
python
{ "resource": "" }
q45165
MeteorDatabase.has_observation_id
train
def has_observation_id(self, observation_id): """ Check for the presence of the given observation_id :param string observation_id: The observation ID :return: True if we have a :class:`meteorpi_model.Observation` with this Id, False otherwise """ self.con.execute('SELECT 1 FROM archive_observations WHERE publicId = %s', (observation_id,)) return len(self.con.fetchall()) > 0
python
{ "resource": "" }
q45166
MeteorDatabase.has_obsgroup_id
train
def has_obsgroup_id(self, group_id): """ Check for the presence of the given group_id :param string group_id: The group ID :return: True if we have a :class:`meteorpi_model.ObservationGroup` with this Id, False otherwise """ self.con.execute('SELECT 1 FROM archive_obs_groups WHERE publicId = %s', (group_id,)) return len(self.con.fetchall()) > 0
python
{ "resource": "" }
q45167
MeteorDatabase.get_user
train
def get_user(self, user_id, password): """ Retrieve a user record :param user_id: the user ID :param password: password :return: A :class:`meteorpi_model.User` if everything is correct :raises: ValueError if the user is found but password is incorrect or if the user is not found. """ self.con.execute('SELECT uid, pwHash FROM archive_users WHERE userId = %s;', (user_id,)) results = self.con.fetchall() if len(results) == 0: raise ValueError("No such user") pw_hash = results[0]['pwHash'] # Check the password if not passlib.hash.bcrypt.verify(password, pw_hash): raise ValueError("Incorrect password") # Fetch list of roles self.con.execute('SELECT name FROM archive_roles r INNER JOIN archive_user_roles u ON u.roleId=r.uid ' 'WHERE u.userId = %s;', (results[0]['uid'],)) role_list = [row['name'] for row in self.con.fetchall()] return mp.User(user_id=user_id, roles=role_list)
python
{ "resource": "" }
q45168
MeteorDatabase.get_users
train
def get_users(self): """ Retrieve all users in the system :return: A list of :class:`meteorpi_model.User` """ output = [] self.con.execute('SELECT userId, uid FROM archive_users;') results = self.con.fetchall() for result in results: # Fetch list of roles self.con.execute('SELECT name FROM archive_roles r INNER JOIN archive_user_roles u ON u.roleId=r.uid ' 'WHERE u.userId = %s;', (result['uid'],)) role_list = [row['name'] for row in self.con.fetchall()] output.append(mp.User(user_id=result['userId'], roles=role_list)) return output
python
{ "resource": "" }
q45169
MeteorDatabase.create_or_update_user
train
def create_or_update_user(self, user_id, password, roles): """ Create a new user record, or update an existing one :param user_id: user ID to update or create :param password: new password, or None to leave unchanged :param roles: new roles, or None to leave unchanged :return: the action taken, one of "none", "update", "create" :raises: ValueError if there is no existing user and either password or roles is None """ action = "update" self.con.execute('SELECT 1 FROM archive_users WHERE userId = %s;', (user_id,)) results = self.con.fetchall() if len(results) == 0: if password is None: raise ValueError("Must specify an initial password when creating a new user!") action = "create" self.con.execute('INSERT INTO archive_users (userId, pwHash) VALUES (%s,%s)', (user_id, passlib.hash.bcrypt.encrypt(password))) if password is None and roles is None: action = "none" if password is not None: self.con.execute('UPDATE archive_users SET pwHash = %s WHERE userId = %s', (passlib.hash.bcrypt.encrypt(password), user_id)) if roles is not None: # Clear out existing roles, and delete any unused roles self.con.execute("DELETE r FROM archive_user_roles AS r WHERE " "(SELECT u.userId FROM archive_users AS u WHERE r.userId=u.uid)=%s;", (user_id,)) self.con.execute("DELETE r FROM archive_roles AS r WHERE r.uid NOT IN " "(SELECT roleId FROM archive_user_roles);") for role in roles: self.con.execute("SELECT uid FROM archive_roles WHERE name=%s;", (role,)) results = self.con.fetchall() if len(results) < 1: self.con.execute("INSERT INTO archive_roles (name) VALUES (%s);", (role,)) self.con.execute("SELECT uid FROM archive_roles WHERE name=%s;", (role,)) results = self.con.fetchall() self.con.execute('INSERT INTO archive_user_roles (userId, roleId) VALUES ' '((SELECT u.uid FROM archive_users u WHERE u.userId=%s),' '%s)', (user_id, results[0]['uid'])) return action
python
{ "resource": "" }
q45170
MeteorDatabase.get_export_configuration
train
def get_export_configuration(self, config_id): """ Retrieve the ExportConfiguration with the given ID :param string config_id: ID for which to search :return: a :class:`meteorpi_model.ExportConfiguration` or None, or no match was found. """ sql = ( 'SELECT uid, exportConfigId, exportType, searchString, targetURL, ' 'targetUser, targetPassword, exportName, description, active ' 'FROM archive_exportConfig WHERE exportConfigId = %s') return first_from_generator( self.generators.export_configuration_generator(sql=sql, sql_args=(config_id,)))
python
{ "resource": "" }
q45171
MeteorDatabase.get_export_configurations
train
def get_export_configurations(self): """ Retrieve all ExportConfigurations held in this db :return: a list of all :class:`meteorpi_model.ExportConfiguration` on this server """ sql = ( 'SELECT uid, exportConfigId, exportType, searchString, targetURL, ' 'targetUser, targetPassword, exportName, description, active ' 'FROM archive_exportConfig ORDER BY uid DESC') return list(self.generators.export_configuration_generator(sql=sql, sql_args=[]))
python
{ "resource": "" }
q45172
MeteorDatabase.create_or_update_export_configuration
train
def create_or_update_export_configuration(self, export_config): """ Create a new file export configuration or update an existing one :param ExportConfiguration export_config: a :class:`meteorpi_model.ExportConfiguration` containing the specification for the export. If this doesn't include a 'config_id' field it will be inserted as a new record in the database and the field will be populated, updating the supplied object. If it does exist already this will update the other properties in the database to match the supplied object. :returns: The supplied :class:`meteorpi_model.ExportConfiguration` as stored in the DB. This is guaranteed to have its 'config_id' string field defined. """ search_string = json.dumps(obj=export_config.search.as_dict()) user_id = export_config.user_id password = export_config.password target_url = export_config.target_url enabled = export_config.enabled name = export_config.name description = export_config.description export_type = export_config.type if export_config.config_id is not None: # Update existing record self.con.execute( 'UPDATE archive_exportConfig c ' 'SET c.searchString = %s, c.targetUrl = %s, c.targetUser = %s, c.targetPassword = %s, ' 'c.exportName = %s, c.description = %s, c.active = %s, c.exportType = %s ' 'WHERE c.exportConfigId = %s', (search_string, target_url, user_id, password, name, description, enabled, export_type, export_config.config_id)) else: # Create new record and add the ID into the supplied config item_id = mp.get_hash(mp.now(), name, export_type) self.con.execute( 'INSERT INTO archive_exportConfig ' '(searchString, targetUrl, targetUser, targetPassword, ' 'exportName, description, active, exportType, exportConfigId) ' 'VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s) ', (search_string, target_url, user_id, password, name, description, enabled, export_type, item_id)) export_config.config_id = item_id return export_config
python
{ "resource": "" }
q45173
MeteorDatabase.get_high_water_mark
train
def get_high_water_mark(self, mark_type, obstory_name=None): """ Retrieves the high water mark for a given obstory, defaulting to the current installation ID :param string mark_type: The type of high water mark to set :param string obstory_name: The obstory ID to check for, or the default installation ID if not specified :return: A UTC datetime for the high water mark, or None if none was found. """ if obstory_name is None: obstory_name = self.obstory_name obstory = self.get_obstory_from_name(obstory_name) key_id = self.get_hwm_key_id(mark_type) self.con.execute('SELECT time FROM archive_highWaterMarks WHERE markType=%s AND observatoryId=%s', (key_id, obstory['uid'])) results = self.con.fetchall() if len(results) > 0: return results[0]['time'] return None
python
{ "resource": "" }
q45174
baseglob
train
def baseglob(pat, base): """Given a pattern and a base, return files that match the glob pattern and also contain the base.""" return [f for f in glob(pat) if f.startswith(base)]
python
{ "resource": "" }
q45175
get_revision
train
def get_revision(): """ GET THE CURRENT GIT REVISION """ proc = Process("git log", ["git", "log", "-1"]) try: while True: line = proc.stdout.pop().strip().decode('utf8') if not line: continue if line.startswith("commit "): return line[7:] finally: with suppress_exception: proc.join()
python
{ "resource": "" }
q45176
get_remote_revision
train
def get_remote_revision(url, branch): """ GET REVISION OF A REMOTE BRANCH """ proc = Process("git remote revision", ["git", "ls-remote", url, "refs/heads/" + branch]) try: while True: raw_line = proc.stdout.pop() line = raw_line.strip().decode('utf8') if not line: continue return line.split("\t")[0] finally: try: proc.join() except Exception: pass
python
{ "resource": "" }
q45177
get_branch
train
def get_branch(): """ GET THE CURRENT GIT BRANCH """ proc = Process("git status", ["git", "status"]) try: while True: raw_line = proc.stdout.pop() line = raw_line.decode('utf8').strip() if line.startswith("On branch "): return line[10:] finally: try: proc.join() except Exception: pass
python
{ "resource": "" }
q45178
Renderer._get_accept_languages_in_order
train
def _get_accept_languages_in_order(self): """ Reads an Accept HTTP header and returns an array of Media Type string in descending weighted order :return: List of URIs of accept profiles in descending request order :rtype: list """ try: # split the header into individual URIs, with weights still attached profiles = self.request.headers['Accept-Language'].split(',') # remove \s profiles = [x.replace(' ', '').strip() for x in profiles] # split off any weights and sort by them with default weight = 1 profiles = [(float(x.split(';')[1].replace('q=', '')) if len(x.split(';')) == 2 else 1, x.split(';')[0]) for x in profiles] # sort profiles by weight, heaviest first profiles.sort(reverse=True) return[x[1] for x in profiles] except Exception as e: raise ViewsFormatsException( 'You have requested a language using an Accept-Language header that is incorrectly formatted.')
python
{ "resource": "" }
q45179
CmWalk.readCfgJson
train
def readCfgJson(cls, working_path): """Read cmWalk configuration data of a working directory from a json file. :param working_path: working path for reading the configuration data. :return: the configuration data represented in a json object, None if the configuration files does not exist. """ cfg_json_filename = os.path.join(working_path, cls.CFG_JSON_FILENAME) if os.path.isfile(cfg_json_filename): with open(cfg_json_filename) as json_file: cfg = json.load(json_file) return cfg return None
python
{ "resource": "" }
q45180
CmWalk.genTopLevelDirCMakeListsFile
train
def genTopLevelDirCMakeListsFile(self, working_path, subdirs, files, cfg): """ Generate top level CMakeLists.txt. :param working_path: current working directory :param subdirs: a list of subdirectories of current working directory. :param files: a list of files in current working directory. :return: the full path name of generated CMakeLists.txt. """ fnameOut = os.path.join(working_path, 'CMakeLists.txt') template = self.envJinja.get_template(self.TOP_LEVEL_CMAKELISTS_JINJA2_TEMPLATE) fcontent = template.render({'project_name':os.path.basename(os.path.abspath(working_path)), 'subdirs': subdirs, 'files': files, 'cfg': cfg}) with open(fnameOut, 'w') as f: f.write(fcontent) return fnameOut
python
{ "resource": "" }
q45181
CmWalk.genSubDirCMakeListsFile
train
def genSubDirCMakeListsFile(self, working_path, addToCompilerIncludeDirectories, subdirs, files): """ Generate CMakeLists.txt in subdirectories. :param working_path: current working directory :param subdirs: a list of subdirectories of current working directory. :param files: a list of files in current working directory. :return: the full path name of generated CMakeLists.txt. """ fnameOut = os.path.join(working_path, 'CMakeLists.txt') template = self.envJinja.get_template(self.SUBDIR_CMAKELISTS_JINJA2_TEMPLATE) fcontent = template.render({'addToCompilerIncludeDirectories':addToCompilerIncludeDirectories, 'subdirs': subdirs, 'files': files}) with open(fnameOut, 'w') as f: f.write(fcontent) return fnameOut
python
{ "resource": "" }
q45182
FileLoader._maybe_purge_cache
train
def _maybe_purge_cache(self): """ If enough time since last check has elapsed, check if any of the cached templates has changed. If any of the template files were deleted, remove that file only. If any were changed, then purge the entire cache. """ if self._last_reload_check + MIN_CHECK_INTERVAL > time.time(): return for name, tmpl in list(self.cache.items()): if not os.stat(tmpl.path): self.cache.pop(name) continue if os.stat(tmpl.path).st_mtime > tmpl.mtime: self.cache.clear() break self._last_reload_check = time.time()
python
{ "resource": "" }
q45183
FileLoader.load
train
def load(self, name): """ If not yet in the cache, load the named template and compiles it, placing it into the cache. If in cache, return the cached template. """ if self.reload: self._maybe_purge_cache() template = self.cache.get(name) if template: return template path = self.resolve(name) if not path: raise OSError(errno.ENOENT, "File not found: %s" % name) with codecs.open(path, 'r', encoding='UTF-8') as f: contents = f.read() mtime = os.fstat(f.fileno()).st_mtime template = self.load_string(contents, filename=path) template.mtime = mtime template.path = path self.cache[name] = template return template
python
{ "resource": "" }
q45184
MySQL.query
train
def query(self, query, stacked=False): """ TRANSLATE JSON QUERY EXPRESSION ON SINGLE TABLE TO SQL QUERY """ from jx_base.query import QueryOp query = QueryOp.wrap(query) sql, post = self._subquery(query, isolate=False, stacked=stacked) query.data = post(sql) return query.data
python
{ "resource": "" }
q45185
MySQL._sort2sql
train
def _sort2sql(self, sort): """ RETURN ORDER BY CLAUSE """ if not sort: return "" return SQL_ORDERBY + sql_list([quote_column(o.field) + (" DESC" if o.sort == -1 else "") for o in sort])
python
{ "resource": "" }
q45186
CustomAPIView.get_renderers
train
def get_renderers(self): """ Instantiates and returns the list of renderers that this view can use. """ try: source = self.get_object() except (ImproperlyConfigured, APIException): self.renderer_classes = [RENDERER_MAPPING[i] for i in self.__class__.renderers] return [RENDERER_MAPPING[i]() for i in self.__class__.renderers] else: self.renderer_classes = [RENDERER_MAPPING[i] for i in source.__class__.renderers] return [RENDERER_MAPPING[i]() for i in source.__class__.renderers]
python
{ "resource": "" }
q45187
AVIFile.rebuild
train
def rebuild(self): """Rebuild RIFF tree and index from streams.""" movi = self.riff.find('LIST', 'movi') movi.chunks = self.combine_streams() self.rebuild_index()
python
{ "resource": "" }
q45188
Lock.acquire
train
def acquire(self, **kwargs): """ Aquire the lock. Returns True if the lock was acquired; False otherwise. timeout (int): Timeout to wait for the lock to change if it is already acquired. Defaults to what was provided during initialization, which will block and retry until acquired. """ token = str(uuid.uuid4()) attempted = False while self.token is None: try: self.client.test_and_set(self.key, token, "0", ttl=self.ttl) self.token = token except etcd.EtcdKeyNotFound, e: try: self.client.write(self.key, token, prevExist=False, recursive=True, ttl=self.ttl) self.token = token except etcd.EtcdAlreadyExist, e: pass # someone created the right before us except ValueError, e: # someone else has the lock if 'timeout' in kwargs or self.timeout is not None: if attempted is True: return False kwargs.setdefault("timeout", self.timeout) try: self.client.read(self.key, wait=True, timeout=kwargs["timeout"]) attempted = True except etcd.EtcdException, e: return False else: self.client.watch(self.key) if self.renewSecondsPrior is not None: timer_ttl = self.ttl - self.renewSecondsPrior if timer_ttl > 0: def renew(): if self.renew(): Timer(timer_ttl, renew).start() Timer(timer_ttl, renew).start() else: def cleanup(): if self.token is token: self.token = None Timer(self.ttl, cleanup).start() return True
python
{ "resource": "" }
q45189
Lock.renew
train
def renew(self): """ Renew the lock if acquired. """ if self.token is not None: try: self.client.test_and_set(self.key, self.token, self.token, ttl=self.ttl) return True except ValueError, e: self.token = None return False
python
{ "resource": "" }
q45190
Lock.release
train
def release(self): """ Release the lock if acquired. """ # TODO: thread safety (currently the lock may be acquired for one more TTL length) if self.token is not None: try: self.client.test_and_set(self.key, 0, self.token) except (ValueError, etcd.EtcdKeyError, etcd.EtcdKeyNotFound) as e: pass # the key already expired or got acquired by someone else finally: self.token = None
python
{ "resource": "" }
q45191
AssetsFileField.deconstruct
train
def deconstruct(self): """ Denormalize is always false migrations """ name, path, args, kwargs = super(AssetsFileField, self).deconstruct() kwargs['denormalize'] = False return name, path, args, kwargs
python
{ "resource": "" }
q45192
ActionView.get_context_data
train
def get_context_data(self, **kwargs): """ Hook for adding arguments to the context. """ context = {'obj': self.object } if 'queryset' in kwargs: context['conf_msg'] = self.get_confirmation_message(kwargs['queryset']) context.update(kwargs) return context
python
{ "resource": "" }
q45193
ActionView.get_object
train
def get_object(self): """ If a single object has been requested, will set `self.object` and return the object. """ queryset = None slug = self.kwargs.get(self.slug_url_kwarg, None) if slug is not None: queryset = self.get_queryset() slug_field = self.slug_field queryset = queryset.filter(**{slug_field: slug}) try: self.object = queryset.get() except ObjectDoesNotExist: raise http.Http404 return self.object
python
{ "resource": "" }
q45194
ActionView.get_selected
train
def get_selected(self, request): """ Returns a queryset of the selected objects as specified by \ a GET or POST request. """ obj = self.get_object() queryset = None # if single-object URL not used, check for selected objects if not obj: if request.GET.get(CHECKBOX_NAME): selected = request.GET.get(CHECKBOX_NAME).split(',') else: selected = request.POST.getlist(CHECKBOX_NAME) else: selected = [obj.pk] queryset = self.get_queryset().filter(pk__in=selected) return queryset
python
{ "resource": "" }
q45195
ActionView.post
train
def post(self, request, *args, **kwargs): """ Method for handling POST requests. Checks for a modify confirmation and performs the action by calling `process_action`. """ queryset = self.get_selected(request) if request.POST.get('modify'): response = self.process_action(request, queryset) if not response: url = self.get_done_url() return self.render(request, redirect_url=url) else: return response else: return self.render(request, redirect_url=request.build_absolute_uri())
python
{ "resource": "" }
q45196
PublishActionView.process_action
train
def process_action(self, request, queryset): """ Publishes the selected objects by passing the value of \ 'when' to the object's publish method. The object's \ `purge_archives` method is also called to limit the number \ of old items that we keep around. The action is logged as \ either 'published' or 'scheduled' depending on the value of \ 'when', and the user is notified with a message. Returns a 'render redirect' to the result of the \ `get_done_url` method. """ form = self.form(request.POST) if form.is_valid(): when = form.cleaned_data.get('when') count = 0 for obj in queryset: count += 1 obj.publish(user=request.user, when=when) obj.purge_archives() object_url = self.get_object_url(obj) if obj.state == obj.PUBLISHED: self.log_action( obj, CMSLog.PUBLISH, url=object_url) else: self.log_action( obj, CMSLog.SCHEDULE, url=object_url) message = "%s objects published." % count self.write_message(message=message) return self.render(request, redirect_url= self.get_done_url(), message=message, collect_render_data=False) return self.render(request, queryset=queryset, publish_form=form, action='Publish')
python
{ "resource": "" }
q45197
UnPublishActionView.process_action
train
def process_action(self, request, queryset): """ Unpublishes the selected objects by calling the object's \ unpublish method. The action is logged and the user is \ notified with a message. Returns a 'render redirect' to the result of the \ `get_done_url` method. """ count = 0 for obj in queryset: count += 1 obj.unpublish() object_url = self.get_object_url(obj) self.log_action(obj, CMSLog.UNPUBLISH, url=object_url) url = self.get_done_url() msg = self.write_message(message="%s objects unpublished." % count) return self.render(request, redirect_url=url, message=msg, collect_render_data=False)
python
{ "resource": "" }
q45198
PublishView.get_object_url
train
def get_object_url(self): """ Returns the url to link to the object The get_view_url will be called on the current bundle using 'edit` as the view name. """ return self.bundle.get_view_url('edit', self.request.user, {}, self.kwargs)
python
{ "resource": "" }
q45199
PublishView.post
train
def post(self, request, *args, **kwargs): """ Method for handling POST requests. Publishes the object passing the value of 'when' to the object's publish method. The object's `purge_archives` method is also called to limit the number of old items that we keep around. The action is logged as either 'published' or 'scheduled' depending on the value of 'when', and the user is notified with a message. Returns a 'render redirect' to the result of the `get_done_url` method. """ self.object = self.get_object() form = self.form() url = self.get_done_url() if request.POST.get('publish'): form = self.form(request.POST) if form.is_valid(): when = form.cleaned_data.get('when') self.object.publish(user=request.user, when=when) self.object.purge_archives() object_url = self.get_object_url() if self.object.state == self.object.PUBLISHED: self.log_action( self.object, CMSLog.PUBLISH, url=object_url) else: self.log_action( self.object, CMSLog.SCHEDULE, url=object_url) message = "%s %s" % (self.object, self.object.state) self.write_message(message=message) return self.render(request, redirect_url=url, message=message, obj=self.object, collect_render_data=False) return self.render(request, obj=self.object, form=form, done_url=url)
python
{ "resource": "" }