_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q45200
UnPublishView.post
train
def post(self, request, *args, **kwargs): """ Method for handling POST requests. Unpublishes the the object by calling the object's unpublish method. The action is logged, the user is notified with a message. Returns a 'render redirect' to the result of the `get_done_url` method. """ self.object = self.get_object() url = self.get_done_url() if request.POST.get('unpublish'): self.object.unpublish() object_url = self.get_object_url() self.log_action(self.object, CMSLog.UNPUBLISH, url=object_url) msg = self.write_message(message="%s unpublished" % (self.object)) return self.render(request, redirect_url=url, message=msg, obj=self.object, collect_render_data=False) return self.render(request, obj=self.object, done_url=url)
python
{ "resource": "" }
q45201
DeleteView.post
train
def post(self, request, *args, **kwargs): """ Method for handling POST requests. Deletes the object. Successful deletes are logged. Returns a 'render redirect' to the result of the `get_done_url` method. If a ProtectedError is raised, the `render` method is called with message explaining the error added to the context as `protected`. """ self.object = self.get_object() msg = None if request.POST.get('delete'): try: with transaction.commit_on_success(): self.log_action(self.object, CMSLog.DELETE) msg = "%s deleted" % self.object self.object.delete() except ProtectedError, e: protected = [] for x in e.protected_objects: if hasattr(x, 'delete_blocked_message'): protected.append(x.delete_blocked_message()) else: protected.append(u"%s: %s" % (x._meta.verbose_name, x)) return self.render(request, obj=self.object, protected=protected) return self.render(request, redirect_url=self.get_done_url(), obj=self.object, message=msg, collect_render_data=False)
python
{ "resource": "" }
q45202
KeyExpander.expand
train
def expand(self, key_array): """ Expand the encryption key per AES key schedule specifications http://en.wikipedia.org/wiki/Rijndael_key_schedule# Key_schedule_description """ if len(key_array) != self._n: raise RuntimeError('expand(): key size ' + str(len(key_array)) + ' is invalid') # First n bytes are copied from key. Copy prevents inplace modification of original key new_key = list(key_array) rcon_iteration = 1 len_new_key = len(new_key) # There are several parts of the code below that could be done with tidy list comprehensions like # the one I put in _core, but I left this alone for readability. # Grow the key until it is the correct length while len_new_key < self._b: # Copy last 4 bytes of extended key, apply _core function order i, increment i(rcon_iteration), # xor with 4 bytes n bytes from end of extended key t = new_key[-4:] t = self._core(t, rcon_iteration) rcon_iteration += 1 t = self._xor_list(t, new_key[-self._n : -self._n + 4])# self._n_bytes_before(len_new_key, new_key)) new_key.extend(t) len_new_key += 4 # Run three passes of 4 byte expansion using copy of 4 byte tail of extended key # which is then xor'd with 4 bytes n bytes from end of extended key for j in range(3): t = new_key[-4:] t = self._xor_list(t, new_key[-self._n : -self._n + 4]) new_key.extend(t) len_new_key += 4 # If key length is 256 and key is not complete, add 4 bytes tail of extended key # run through sbox before xor with 4 bytes n bytes from end of extended key if self._key_length == 256 and len_new_key < self._b: t = new_key[-4:] t2=[] for x in t: t2.append(aes_tables.sbox[x]) t = self._xor_list(t2, new_key[-self._n : -self._n + 4]) new_key.extend(t) len_new_key += 4 # If key length is 192 or 256 and key is not complete, run 2 or 3 passes respectively # of 4 byte tail of extended key xor with 4 bytes n bytes from end of extended key if self._key_length != 128 and len_new_key < self._b: if self._key_length == 192: r = range(2) else: r = range(3) for j in r: t = new_key[-4:] t = self._xor_list(t, new_key[-self._n : -self._n + 4]) new_key.extend(t) len_new_key += 4 return new_key
python
{ "resource": "" }
q45203
build_dot_value
train
def build_dot_value(key, value): """Build new dictionaries based off of the dot notation key. For example, if a key were 'x.y.z' and the value was 'foo', we would expect a return value of: ('x', {'y': {'z': 'foo'}}) Args: key (str): The key to build a dictionary off of. value: The value associated with the dot notation key. Returns: tuple: A 2-tuple where the first element is the key of the outermost scope (e.g. left-most in the dot notation key) and the value is the constructed value for that key (e.g. a dictionary) """ # if there is no nesting in the key (as specified by the # presence of dot notation), then the key/value pair here # are the final key value pair. if key.count('.') == 0: return key, value # otherwise, we will need to construct as many dictionaries # as there are dot components to hold the value. final_value = value reverse_split = key.split('.')[::-1] end = len(reverse_split) - 1 for idx, k in enumerate(reverse_split): if idx == end: return k, final_value final_value = {k: final_value}
python
{ "resource": "" }
q45204
DotDict.get
train
def get(self, key, default=None): """Get a value from the `DotDict`. The `key` parameter can either be a regular string key, e.g. "foo", or it can be a string key with dot notation, e.g. "foo.bar.baz", to signify a nested lookup. The default value is returned if any level of the key's components are not found. Args: key (str): The key to get the value for. default: The return value should the given key not exist in the `DotDict`. """ # if there are no dots in the key, its a normal get if key.count('.') == 0: return super(DotDict, self).get(key, default) # set the return value to the default value = default # split the key into the first component and the rest of # the components. the first component corresponds to this # DotDict. the remainder components correspond to any nested # DotDicts. first, remainder = key.split('.', 1) if first in self: value = super(DotDict, self).get(first, default) # if the value for the key at this level is a dictionary, # then pass the remainder to that DotDict. if isinstance(value, (dict, DotDict)): return DotDict(value).get(remainder, default) # TODO: support lists return value
python
{ "resource": "" }
q45205
DotDict.delete
train
def delete(self, key): """Remove a value from the `DotDict`. The `key` parameter can either be a regular string key, e.g. "foo", or it can be a string key with dot notation, e.g. "foo.bar.baz", to signify a nested element. If the key does not exist in the `DotDict`, it will continue silently. Args: key (str): The key to remove. """ dct = self keys = key.split('.') last_key = keys[-1] for k in keys: # if the key is the last one, e.g. 'z' in 'x.y.z', try # to delete it from its dict. if k == last_key: del dct[k] break # if the dct is a DotDict, get the value for the key `k` from it. if isinstance(dct, DotDict): dct = super(DotDict, dct).__getitem__(k) # otherwise, just get the value from the default __getitem__ # implementation. else: dct = dct.__getitem__(k) if not isinstance(dct, (DotDict, dict)): raise KeyError( 'Subkey "{}" in "{}" invalid for deletion'.format(k, key) )
python
{ "resource": "" }
q45206
make_extractor
train
def make_extractor(non_default): """ Return us a function to extract options Anything not in non_default is wrapped in a "Default" object """ def extract_options(template, options): for option, val in normalise_options(template): name = option.replace('-', '_') value = getattr(options, name) if option not in non_default: value = Default(value) yield name, value return extract_options
python
{ "resource": "" }
q45207
SpecRegister.set_option
train
def set_option(self, name, val, action=Empty, opts=Empty): """Determine which options were specified outside of the defaults""" if action is Empty and opts is Empty: self.specified.append(name) super(SpecRegister, self).set_option(name, val) else: super(SpecRegister, self).set_option(name, val, action, opts)
python
{ "resource": "" }
q45208
House.from_tibiadata
train
def from_tibiadata(cls, content): """ Parses a TibiaData response into a House object. Parameters ---------- content: :class:`str` The JSON content of the TibiaData response. Returns ------- :class:`House` The house contained in the response, if found. Raises ------ InvalidContent If the content is not a house JSON response from TibiaData """ json_content = parse_json(content) try: house_json = json_content["house"] if not house_json["name"]: return None house = cls(house_json["name"], house_json["world"]) house.type = try_enum(HouseType, house_json["type"]) house.id = house_json["houseid"] house.beds = house_json["beds"] house.size = house_json["size"] house.size = house_json["size"] house.rent = house_json["rent"] house.image_url = house_json["img"] # Parsing the original status string is easier than dealing with TibiaData fields house._parse_status(house_json["status"]["original"]) except KeyError: raise InvalidContent("content is not a TibiaData house response.") return house
python
{ "resource": "" }
q45209
House._parse_status
train
def _parse_status(self, status): """Parses the house's state description and applies the corresponding values Parameters ---------- status: :class:`str` Plain text string containing the current renting state of the house. """ m = rented_regex.search(status) if m: self.status = HouseStatus.RENTED self.owner = m.group("owner") self.owner_sex = Sex.MALE if m.group("pronoun") == "He" else Sex.FEMALE self.paid_until = parse_tibia_datetime(m.group("paid_until")) else: self.status = HouseStatus.AUCTIONED m = transfer_regex.search(status) if m: self.transfer_date = parse_tibia_datetime(m.group("transfer_date")) self.transfer_accepted = m.group("verb") == "will" self.transferee = m.group("transferee") price = m.group("transfer_price") self.transfer_price = int(price) if price is not None else 0 m = auction_regex.search(status) if m: self.auction_end = parse_tibia_datetime(m.group("auction_end")) m = bid_regex.search(status) if m: self.highest_bid = int(m.group("highest_bid")) self.highest_bidder = m.group("bidder")
python
{ "resource": "" }
q45210
ListedHouse._parse_status
train
def _parse_status(self, status): """ Parses the status string found in the table and applies the corresponding values. Parameters ---------- status: :class:`str` The string containing the status. """ if "rented" in status: self.status = HouseStatus.RENTED else: m = list_auction_regex.search(status) if m: self.highest_bid = int(m.group('bid')) if m.group("time_unit") == "day": self.time_left = datetime.timedelta(days=int(m.group("time_left"))) else: self.time_left = datetime.timedelta(hours=int(m.group("time_left"))) self.status = HouseStatus.AUCTIONED
python
{ "resource": "" }
q45211
Bison.config
train
def config(self): """Get the complete configuration where the default, config, environment, and override values are merged together. Returns: (DotDict): A dictionary of configuration values that allows lookups using dot notation. """ if self._full_config is None: self._full_config = DotDict() self._full_config.merge(self._default) self._full_config.merge(self._config) self._full_config.merge(self._environment) self._full_config.merge(self._override) return self._full_config
python
{ "resource": "" }
q45212
Bison.set
train
def set(self, key, value): """Set a value in the `Bison` configuration. Args: key (str): The configuration key to set a new value for. value: The value to set. """ # the configuration changes, so we invalidate the cached config self._full_config = None self._override[key] = value
python
{ "resource": "" }
q45213
Bison.parse
train
def parse(self, requires_cfg=True): """Parse the configuration sources into `Bison`. Args: requires_cfg (bool): Specify whether or not parsing should fail if a config file is not found. (default: True) """ self._parse_default() self._parse_config(requires_cfg) self._parse_env()
python
{ "resource": "" }
q45214
Bison._find_config
train
def _find_config(self): """Searches through the configured `config_paths` for the `config_name` file. If there are no `config_paths` defined, this will raise an error, so the caller should take care to check the value of `config_paths` first. Returns: str: The fully qualified path to the configuration that was found. Raises: Exception: No paths are defined in `config_paths` or no file with the `config_name` was found in any of the specified `config_paths`. """ for search_path in self.config_paths: for ext in self._fmt_to_ext.get(self.config_format): path = os.path.abspath(os.path.join(search_path, self.config_name + ext)) if os.path.isfile(path): self.config_file = path return raise BisonError('No file named {} found in search paths {}'.format( self.config_name, self.config_paths))
python
{ "resource": "" }
q45215
Bison._parse_config
train
def _parse_config(self, requires_cfg=True): """Parse the configuration file, if one is configured, and add it to the `Bison` state. Args: requires_cfg (bool): Specify whether or not parsing should fail if a config file is not found. (default: True) """ if len(self.config_paths) > 0: try: self._find_config() except BisonError: if not requires_cfg: return raise try: with open(self.config_file, 'r') as f: parsed = self._fmt_to_parser[self.config_format](f) except Exception as e: raise BisonError( 'Failed to parse config file: {}'.format(self.config_file) ) from e # the configuration changes, so we invalidate the cached config self._full_config = None self._config = parsed
python
{ "resource": "" }
q45216
Bison._parse_env
train
def _parse_env(self): """Parse the environment variables for any configuration if an `env_prefix` is set. """ env_cfg = DotDict() # if the env prefix doesn't end with '_', we'll append it here if self.env_prefix and not self.env_prefix.endswith('_'): self.env_prefix = self.env_prefix + '_' # if there is no scheme, we won't know what to look for so only parse # config if there is a scheme. if self.scheme: for k, v in self.scheme.flatten().items(): value = v.parse_env(k, self.env_prefix, self.auto_env) if value is not None: env_cfg[k] = value if len(env_cfg) > 0: # the configuration changes, so we invalidate the cached config self._full_config = None self._environment.update(env_cfg)
python
{ "resource": "" }
q45217
Bison._parse_default
train
def _parse_default(self): """Parse the `Schema` for the `Bison` instance to create the set of default values. If no defaults are specified in the `Schema`, the default dictionary will not contain anything. """ # the configuration changes, so we invalidate the cached config self._full_config = None if self.scheme: self._default.update(self.scheme.build_defaults())
python
{ "resource": "" }
q45218
Relation_usingList.get_codomain
train
def get_codomain(self, key): """ RETURN AN ARRAY OF OBJECTS THAT key MAPS TO """ return [v for k, v in self.all if k == key]
python
{ "resource": "" }
q45219
Session.timeout
train
def timeout(self, value): """Sets a custom timeout value for this session""" if value == TIMEOUT_SESSION: self._config.timeout = None self._backend_client.expires = None else: self._config.timeout = value self._calculate_expires()
python
{ "resource": "" }
q45220
Session._calculate_expires
train
def _calculate_expires(self): """Calculates the session expiry using the timeout""" self._backend_client.expires = None now = datetime.utcnow() self._backend_client.expires = now + timedelta(seconds=self._config.timeout)
python
{ "resource": "" }
q45221
Session._load_cookie
train
def _load_cookie(self): """Loads HTTP Cookie from environ""" cookie = SimpleCookie(self._environ.get('HTTP_COOKIE')) vishnu_keys = [key for key in cookie.keys() if key == self._config.cookie_name] # no session was started yet if not vishnu_keys: return morsel = cookie[vishnu_keys[0]] morsel_value = morsel.value if self._config.encrypt_key: cipher = AESCipher(self._config.encrypt_key) morsel_value = cipher.decrypt(morsel_value) received_sid = Session.decode_sid(self._config.secret, morsel_value) if received_sid: self._sid = received_sid else: logging.warn("found cookie with invalid signature")
python
{ "resource": "" }
q45222
Session.header
train
def header(self): """Generates HTTP header for this cookie.""" if self._send_cookie: morsel = Morsel() cookie_value = Session.encode_sid(self._config.secret, self._sid) if self._config.encrypt_key: cipher = AESCipher(self._config.encrypt_key) cookie_value = cipher.encrypt(cookie_value) if sys.version_info > (3, 0): cookie_value = cookie_value.decode() morsel.set(self._config.cookie_name, cookie_value, cookie_value) # domain if self._config.domain: morsel["domain"] = self._config.domain # path if self._config.path: morsel["path"] = self._config.path # expires if self._expire_cookie: morsel["expires"] = "Wed, 01-Jan-1970 00:00:00 GMT" elif self._backend_client.expires: morsel["expires"] = self._backend_client.expires.strftime(EXPIRES_FORMAT) # secure if self._config.secure: morsel["secure"] = True # http only if self._config.http_only: morsel["httponly"] = True return morsel.OutputString() else: return None
python
{ "resource": "" }
q45223
Session.encode_sid
train
def encode_sid(cls, secret, sid): """Computes the HMAC for the given session id.""" secret_bytes = secret.encode("utf-8") sid_bytes = sid.encode("utf-8") sig = hmac.new(secret_bytes, sid_bytes, hashlib.sha512).hexdigest() return "%s%s" % (sig, sid)
python
{ "resource": "" }
q45224
Session.is_signature_equal
train
def is_signature_equal(cls, sig_a, sig_b): """Compares two signatures using a constant time algorithm to avoid timing attacks.""" if len(sig_a) != len(sig_b): return False invalid_chars = 0 for char_a, char_b in zip(sig_a, sig_b): if char_a != char_b: invalid_chars += 1 return invalid_chars == 0
python
{ "resource": "" }
q45225
Session.decode_sid
train
def decode_sid(cls, secret, cookie_value): """Decodes a cookie value and returns the sid if value or None if invalid.""" if len(cookie_value) > SIG_LENGTH + SID_LENGTH: logging.warn("cookie value is incorrect length") return None cookie_sig = cookie_value[:SIG_LENGTH] cookie_sid = cookie_value[SIG_LENGTH:] secret_bytes = secret.encode("utf-8") cookie_sid_bytes = cookie_sid.encode("utf-8") actual_sig = hmac.new(secret_bytes, cookie_sid_bytes, hashlib.sha512).hexdigest() if not Session.is_signature_equal(cookie_sig, actual_sig): return None return cookie_sid
python
{ "resource": "" }
q45226
Session.terminate
train
def terminate(self): """Terminates an active session""" self._backend_client.clear() self._needs_save = False self._started = False self._expire_cookie = True self._send_cookie = True
python
{ "resource": "" }
q45227
Session.get
train
def get(self, key): """Retrieve a value from the session dictionary""" self._started = self._backend_client.load() self._needs_save = True return self._backend_client.get(key)
python
{ "resource": "" }
q45228
new_collection_percolator
train
def new_collection_percolator(target): """Create new percolator associated with the new collection. :param target: Collection where the percolator will be atached. """ query = IQ(target.dbquery) for name in current_search.mappings.keys(): if target.name and target.dbquery: current_search.client.index( index=name, doc_type='.percolator', id='collection-{}'.format(target.name), body={'query': query.to_dict()} )
python
{ "resource": "" }
q45229
delete_collection_percolator
train
def delete_collection_percolator(target): """Delete percolator associated with the new collection. :param target: Collection where the percolator was attached. """ for name in current_search.mappings.keys(): if target.name and target.dbquery: current_search.client.delete( index=name, doc_type='.percolator', id='collection-{}'.format(target.name), ignore=[404] )
python
{ "resource": "" }
q45230
collection_updated_percolator
train
def collection_updated_percolator(mapper, connection, target): """Create percolator when collection is created. :param mapper: Not used. It keeps the function signature. :param connection: Not used. It keeps the function signature. :param target: Collection where the percolator should be updated. """ delete_collection_percolator(target) if target.dbquery is not None: new_collection_percolator(target)
python
{ "resource": "" }
q45231
_find_matching_collections_externally
train
def _find_matching_collections_externally(collections, record): """Find matching collections with percolator engine. :param collections: set of collections where search :param record: record to match """ index, doc_type = RecordIndexer().record_to_index(record) body = {"doc": record.dumps()} results = current_search_client.percolate( index=index, doc_type=doc_type, allow_no_indices=True, ignore_unavailable=True, body=body ) prefix_len = len('collection-') for match in results['matches']: collection_name = match['_id'] if collection_name.startswith('collection-'): name = collection_name[prefix_len:] if name in collections: yield collections[name]['ancestors'] raise StopIteration
python
{ "resource": "" }
q45232
_str
train
def _str(value, depth): """ FOR DEBUGGING POSSIBLY RECURSIVE STRUCTURES """ output = [] if depth >0 and _get(value, CLASS) in data_types: for k, v in value.items(): output.append(str(k) + "=" + _str(v, depth - 1)) return "{" + ",\n".join(output) + "}" elif depth >0 and is_list(value): for v in value: output.append(_str(v, depth-1)) return "[" + ",\n".join(output) + "]" else: return str(type(value))
python
{ "resource": "" }
q45233
_AppState.cache
train
def cache(self): """Return a cache instance.""" cache = self._cache or self.app.config.get('COLLECTIONS_CACHE') return import_string(cache) if isinstance(cache, six.string_types) \ else cache
python
{ "resource": "" }
q45234
_AppState.collections
train
def collections(self): """Get list of collections.""" # if cache server is configured, load collection from there if self.cache: return self.cache.get( self.app.config['COLLECTIONS_CACHE_KEY'])
python
{ "resource": "" }
q45235
_AppState.collections
train
def collections(self, values): """Set list of collections.""" # if cache server is configured, save collection list if self.cache: self.cache.set( self.app.config['COLLECTIONS_CACHE_KEY'], values)
python
{ "resource": "" }
q45236
_post_vote
train
def _post_vote(user_bingo_board, field, vote): """ change vote on a field @param user_bingo_board: the user's bingo board or None @param field: the BingoField to vote on @param vote: the vote property from the HTTP POST @raises: VoteException: if user_bingo_board is None or the field does not belong to the user's bingo board. @raises Http404: If the BingoField does not exist. """ if field.board != user_bingo_board: raise VoteException( "the voted field does not belong to the user's BingoBoard") if vote == "0": field.vote = 0 elif vote == "+": field.vote = +1 elif vote == "-": field.vote = -1 field.save() # update last_used with current timestamp game = user_bingo_board.game Game.objects.filter(id=game.id).update( last_used=times.now()) # invalidate vote cache vote_counts_cachename = 'vote_counts_game={0:d}'.format( field.board.game.id) cache.delete(vote_counts_cachename) # publish the new vote counts for server-sent events if USE_SSE: try: votes = field.num_votes() redis.publish("word_votes", json.dumps({ 'site_id': game.site.id, 'word_id': field.word.id, 'vote_count': votes, })) redis.publish("field_vote", json.dumps({ 'site_id': game.site.id, 'field_id': field.id, 'vote': vote, })) except RedisConnectionError: # redis server not available? pass
python
{ "resource": "" }
q45237
split_qs
train
def split_qs(string, delimiter='&'): """Split a string by the specified unquoted, not enclosed delimiter""" open_list = '[<{(' close_list = ']>})' quote_chars = '"\'' level = index = last_index = 0 quoted = False result = [] for index, letter in enumerate(string): if letter in quote_chars: if not quoted: quoted = True level += 1 else: quoted = False level -= 1 elif letter in open_list: level += 1 elif letter in close_list: level -= 1 elif letter == delimiter and level == 0: # Split here element = string[last_index: index] if element: result.append(element) last_index = index + 1 if index: element = string[last_index: index + 1] if element: result.append(element) return result
python
{ "resource": "" }
q45238
parse_qs
train
def parse_qs(string): """Intelligently parse the query string""" result = {} for item in split_qs(string): # Split the query string by unquotes ampersants ('&') try: # Split the item by unquotes equal signs key, value = split_qs(item, delimiter='=') except ValueError: # Single value without equals sign result[item] = '' else: result[key] = value return result
python
{ "resource": "" }
q45239
ceiling
train
def ceiling(value, mod=1): """ RETURN SMALLEST INTEGER GREATER THAN value """ if value == None: return None mod = int(mod) v = int(math_floor(value + mod)) return v - (v % mod)
python
{ "resource": "" }
q45240
DataLookup
train
def DataLookup(fieldVal, db, lookupType, fieldName, histObj={}): """ Return new field value based on single-value lookup against MongoDB :param string fieldVal: input value to lookup :param MongoClient db: MongoClient instance connected to MongoDB :param string lookupType: Type of lookup to perform/MongoDB collection name. One of 'genericLookup', 'fieldSpecificLookup', 'normLookup' :param string fieldName: Field name to query against :param dict histObj: History object to which changes should be appended """ if lookupType == 'genericLookup': lookup_dict = {"find": _DataClean_(fieldVal)} elif lookupType in ['fieldSpecificLookup', 'normLookup']: lookup_dict = {"fieldName": fieldName, "find": _DataClean_(fieldVal)} else: raise ValueError("Invalid lookupType") field_val_new = fieldVal coll = db[lookupType] l_val = coll.find_one(lookup_dict, ['replace']) if l_val: field_val_new = l_val['replace'] if 'replace' in l_val else '' change = _CollectHistory_(lookupType=lookupType, fromVal=fieldVal, toVal=field_val_new) hist_obj_upd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change, fieldName=fieldName) return field_val_new, hist_obj_upd
python
{ "resource": "" }
q45241
IncludesLookup
train
def IncludesLookup(fieldVal, lookupType, db, fieldName, deriveFieldName='', deriveInput={}, histObj={}, overwrite=False, blankIfNoMatch=False): """ Return new field value based on whether or not original value includes AND excludes all words in a comma-delimited list queried from MongoDB :param string fieldVal: input value to lookup :param string lookupType: Type of lookup to perform/MongoDB collection name. One of 'normIncludes', 'deriveIncludes' :param MongoClient db: MongoClient instance connected to MongoDB :param string fieldName: Field name to query against :param string deriveFieldName: Field name from which to derive value :param dict deriveInput: Values to perform lookup against: {"deriveFieldName": "deriveVal1"} :param dict histObj: History object to which changes should be appended :param bool overwrite: Should an existing field value be replaced :param bool blankIfNoMatch: Should field value be set to blank if no match is found """ lookup_dict = { 'fieldName': fieldName } if lookupType == 'normIncludes': field_val_clean = _DataClean_(fieldVal) elif lookupType == 'deriveIncludes': if deriveFieldName == '' or deriveInput == {}: raise ValueError("for 'deriveIncludes' must specify both \ 'deriveFieldName' and 'deriveInput'") lookup_dict['deriveFieldName'] = deriveFieldName field_val_clean = _DataClean_(deriveInput[list(deriveInput.keys())[0]]) else: raise ValueError("Invalid lookupType") field_val_new = fieldVal check_match = False using = {} coll = db[lookupType] inc_val = coll.find(lookup_dict, ['includes', 'excludes', 'begins', 'ends', 'replace']) if inc_val and (lookupType == 'normIncludes' or (lookupType == 'deriveIncludes' and (overwrite or fieldVal == ''))): for row in inc_val: try: if (row['includes'] != '' or row['excludes'] != '' or row['begins'] != '' or row['ends'] != ''): if all((a in field_val_clean) for a in row['includes'].split(",")): if all((b not in field_val_clean) for b in row['excludes'].split(",")) \ or row['excludes'] == '': if field_val_clean.startswith(row['begins']): if field_val_clean.endswith(row['ends']): field_val_new = row['replace'] if lookupType == 'deriveIncludes': using[deriveFieldName] = deriveInput using['includes'] = row['includes'] using['excludes'] = row['excludes'] using['begins'] = row['begins'] using['ends'] = row['ends'] check_match = True break except KeyError as Key_error_obj: warnings.warn('schema error', Key_error_obj) if inc_val: inc_val.close() if (field_val_new == fieldVal and blankIfNoMatch and lookupType == 'deriveIncludes'): field_val_new = '' using['blankIfNoMatch'] = 'no match found' change = _CollectHistory_(lookupType=lookupType, fromVal=fieldVal, toVal=field_val_new, using=using) histObjUpd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change, fieldName=fieldName) return field_val_new, histObjUpd, check_match
python
{ "resource": "" }
q45242
RegexLookup
train
def RegexLookup(fieldVal, db, fieldName, lookupType, histObj={}): """ Return a new field value based on match against regex queried from MongoDB :param string fieldVal: input value to lookup :param MongoClient db: MongoClient instance connected to MongoDB :param string lookupType: Type of lookup to perform/MongoDB collection name. One of 'genericRegex', 'fieldSpecificRegex', 'normRegex' :param string fieldName: Field name to query against :param dict histObj: History object to which changes should be appended """ if lookupType == 'genericRegex': lookup_dict = {} elif lookupType in ['fieldSpecificRegex', 'normRegex']: lookup_dict = {"fieldName": fieldName} else: raise ValueError("Invalid type") field_val_new = fieldVal pattern = '' coll = db[lookupType] re_val = coll.find(lookup_dict, ['pattern', 'replace']) for row in re_val: try: match = re.match(row['pattern'], _DataClean_(field_val_new), flags=re.IGNORECASE) if match: if 'replace' in row: field_val_new = re.sub(row['pattern'], row['replace'], _DataClean_(field_val_new), flags=re.IGNORECASE) else: field_val_new = re.sub(row['pattern'], '', _DataClean_(field_val_new), flags=re.IGNORECASE) pattern = row['pattern'] break except KeyError as Key_error_obj: warnings.warn('schema error', Key_error_obj) if re_val: re_val.close() change = _CollectHistory_(lookupType=lookupType, fromVal=fieldVal, toVal=field_val_new, pattern=pattern) histObjUpd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change, fieldName=fieldName) return field_val_new, histObjUpd
python
{ "resource": "" }
q45243
DeriveDataLookup
train
def DeriveDataLookup(fieldName, db, deriveInput, overwrite=True, fieldVal='', histObj={}, blankIfNoMatch=False): """ Return new field value based on single or multi-value lookup against MongoDB :param string fieldName: Field name to query against :param MongoClient db: MongoClient instance connected to MongoDB :param dict deriveInput: Values to perform lookup against: {"lookupField1": "lookupVal1", "lookupField2": "lookupVal2"} :param bool overwrite: Should an existing field value be replaced :param string fieldVal: Current field value :param dict histObj: History object to which changes should be appended :param bool blankIfNoMatch: Should field value be set to blank if no match is found """ lookup_vals = OrderedDict() for val in sorted(deriveInput.keys()): lookup_vals[val] = _DataClean_(deriveInput[val]) lookup_dict = { 'fieldName': fieldName, 'lookupVals': lookup_vals } coll = db['deriveValue'] l_val = coll.find_one(lookup_dict, ['value']) field_val_new = fieldVal derive_using = deriveInput # If match found return True else False check_match = True if l_val else False if l_val and (overwrite or (fieldVal == '')): try: field_val_new = l_val['value'] except KeyError as Key_error_obj: warnings.warn('schema error', Key_error_obj) elif blankIfNoMatch and not l_val: field_val_new = '' derive_using = {'blankIfNoMatch': 'no match found'} change = _CollectHistory_(lookupType='deriveValue', fromVal=fieldVal, toVal=field_val_new, using=derive_using) hist_obj_upd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change, fieldName=fieldName) return field_val_new, hist_obj_upd, check_match
python
{ "resource": "" }
q45244
DeriveDataCopyValue
train
def DeriveDataCopyValue(fieldName, deriveInput, overwrite, fieldVal, histObj={}): """ Return new value based on value from another field :param string fieldName: Field name to query against :param dict deriveInput: Values to perform lookup against: {"copyField1": "copyVal1"} :param bool overwrite: Should an existing field value be replaced :param string fieldVal: Current field value :param dict histObj: History object to which changes should be appended """ if len(deriveInput) > 1: raise Exception("more than one field/value in deriveInput") field_val_new = fieldVal row = list(deriveInput.keys())[0] if deriveInput[row] != '' and (overwrite or (fieldVal == '')): field_val_new = deriveInput[row] check_match = True else: check_match = False change = _CollectHistory_(lookupType='copyValue', fromVal=fieldVal, toVal=field_val_new, using=deriveInput) hist_obj_upd = _CollectHistoryAgg_(contactHist=histObj, fieldHistObj=change, fieldName=fieldName) return field_val_new, hist_obj_upd, check_match
python
{ "resource": "" }
q45245
_normalize_select_no_context
train
def _normalize_select_no_context(select, schema=None): """ SAME NORMALIZE, BUT NO SOURCE OF COLUMNS """ if not _Column: _late_import() if is_text(select): select = Data(value=select) else: select = wrap(select) output = select.copy() if not select.value: output.name = coalesce(select.name, select.aggregate) if output.name: output.value = jx_expression(".", schema=schema) else: return Null elif is_text(select.value): if select.value.endswith(".*"): name = select.value[:-2].lstrip(".") output.name = coalesce(select.name, name) output.value = LeavesOp(Variable(name), prefix=coalesce(select.prefix, name)) else: if select.value == ".": output.name = coalesce(select.name, select.aggregate, ".") output.value = jx_expression(select.value, schema=schema) elif select.value == "*": output.name = coalesce(select.name, select.aggregate, ".") output.value = LeavesOp(Variable(".")) else: output.name = coalesce(select.name, select.value.lstrip("."), select.aggregate) output.value = jx_expression(select.value, schema=schema) elif is_number(output.value): if not output.name: output.name = text_type(output.value) output.value = jx_expression(select.value, schema=schema) else: output.value = jx_expression(select.value, schema=schema) if not output.name: Log.error("expecting select to have a name: {{select}}", select= select) if output.name.endswith(".*"): Log.error("{{name|quote}} is invalid select", name=output.name) output.aggregate = coalesce(canonical_aggregates[select.aggregate].name, select.aggregate, "none") output.default = coalesce(select.default, canonical_aggregates[output.aggregate].default) return output
python
{ "resource": "" }
q45246
_map_term_using_schema
train
def _map_term_using_schema(master, path, term, schema_edges): """ IF THE WHERE CLAUSE REFERS TO FIELDS IN THE SCHEMA, THEN EXPAND THEM """ output = FlatList() for k, v in term.items(): dimension = schema_edges[k] if isinstance(dimension, Dimension): domain = dimension.getDomain() if dimension.fields: if is_data(dimension.fields): # EXPECTING A TUPLE for local_field, es_field in dimension.fields.items(): local_value = v[local_field] if local_value == None: output.append({"missing": {"field": es_field}}) else: output.append({"term": {es_field: local_value}}) continue if len(dimension.fields) == 1 and is_variable_name(dimension.fields[0]): # SIMPLE SINGLE-VALUED FIELD if domain.getPartByKey(v) is domain.NULL: output.append({"missing": {"field": dimension.fields[0]}}) else: output.append({"term": {dimension.fields[0]: v}}) continue if AND(is_variable_name(f) for f in dimension.fields): # EXPECTING A TUPLE if not isinstance(v, tuple): Log.error("expecing {{name}}={{value}} to be a tuple", name= k, value= v) for i, f in enumerate(dimension.fields): vv = v[i] if vv == None: output.append({"missing": {"field": f}}) else: output.append({"term": {f: vv}}) continue if len(dimension.fields) == 1 and is_variable_name(dimension.fields[0]): if domain.getPartByKey(v) is domain.NULL: output.append({"missing": {"field": dimension.fields[0]}}) else: output.append({"term": {dimension.fields[0]: v}}) continue if domain.partitions: part = domain.getPartByKey(v) if part is domain.NULL or not part.esfilter: Log.error("not expected to get NULL") output.append(part.esfilter) continue else: Log.error("not expected") elif is_data(v): sub = _map_term_using_schema(master, path + [k], v, schema_edges[k]) output.append(sub) continue output.append({"term": {k: v}}) return {"and": output}
python
{ "resource": "" }
q45247
QueryOp.wrap
train
def wrap(query, container, namespace): """ NORMALIZE QUERY SO IT CAN STILL BE JSON """ if is_op(query, QueryOp) or query == None: return query query = wrap(query) table = container.get_table(query['from']) schema = table.schema output = QueryOp( frum=table, format=query.format, limit=mo_math.min(MAX_LIMIT, coalesce(query.limit, DEFAULT_LIMIT)) ) if query.select or isinstance(query.select, (Mapping, list)): output.select = _normalize_selects(query.select, query.frum, schema=schema) else: if query.edges or query.groupby: output.select = DEFAULT_SELECT else: output.select = _normalize_selects(".", query.frum) if query.groupby and query.edges: Log.error("You can not use both the `groupby` and `edges` clauses in the same query!") elif query.edges: output.edges = _normalize_edges(query.edges, limit=output.limit, schema=schema) output.groupby = Null elif query.groupby: output.edges = Null output.groupby = _normalize_groupby(query.groupby, limit=output.limit, schema=schema) else: output.edges = Null output.groupby = Null output.where = _normalize_where(query.where, schema=schema) output.window = [_normalize_window(w) for w in listwrap(query.window)] output.having = None output.sort = _normalize_sort(query.sort) if not mo_math.is_integer(output.limit) or output.limit < 0: Log.error("Expecting limit >= 0") output.isLean = query.isLean return output
python
{ "resource": "" }
q45248
CacheManager.invalidate_cache
train
def invalidate_cache(self, klass, extra=None, **kwargs): """ Invalidate a cache for a specific class. This will loop through all registered groups that have registered the given model class and call their invalidate_cache method. All keyword arguments will be directly passed through to the group's invalidate_cache method, with the exception of **extra** as noted below. :param klass: The model class that need some invalidation. :param extra: A dictionary where the key corresponds to the name \ of a group where this model is registered and a value that is a \ list that will be passed as the extra keyword argument when \ calling invalidate_cache on that group. In this way you can \ specify specific extra values to invalidate only for specific \ groups. """ extra = extra or kwargs.pop('extra', {}) for group in self._registry.values(): if klass in group.models: e = extra.get(group.key) group.invalidate_cache(klass, extra=e, **kwargs)
python
{ "resource": "" }
q45249
create_store_prompt
train
def create_store_prompt(name): """Create a prompt which implements the `store` feature. :param name: name of the generator :return: prompt """ def _prompt(questions, answers=None, **kwargs): stored_answers = _read_stored_answers(name) to_store = [] for q in questions: if 'store' in q: if q.pop('store'): # remove it so whaaaaat does not bark to_store.append(q['name']) if q['name'] in stored_answers: q['default'] = stored_answers[q['name']] answers = prompt(questions, answers, **kwargs) if to_store: # save answers for questions flagged with 'store' for s in to_store: if s in answers: stored_answers[s] = answers[s] _store_answers(name, stored_answers) return answers return _prompt
python
{ "resource": "" }
q45250
memoize
train
def memoize(function): """Memoizing function. Potentially not thread-safe, since it will return resuts across threads. Make sure this is okay with callers.""" _cache = {} @wraps(function) def wrapper(*args, **kwargs): key = str(args) + str(kwargs) if key not in _cache: _cache[key] = function(*args, **kwargs) return _cache[key] return wrapper
python
{ "resource": "" }
q45251
dwmAll
train
def dwmAll(data, db, configName='', config={}, udfNamespace=__name__, verbose=False): """ Return list of dictionaries after cleaning rules have been applied; optionally with a history record ID appended. :param list data: list of dictionaries (records) to which cleaning rules should be applied :param MongoClient db: MongoDB connection :param string configName: name of configuration to use; will be queried from 'config' collection of MongoDB :param OrderedDict config: pre-queried config dict :param namespace udfNamespace: namespace of current working script; must be passed if using user-defined functions :param bool verbose: use tqdm to display progress of cleaning records """ if config=={} and configName=='': raise Exception("Please either specify configName or pass a config") if config!={} and configName!='': raise Exception("Please either specify configName or pass a config") if config=={}: configColl = db['config'] config = configColl.find_one({"configName": configName}) if not config: raise Exception("configName '" + configName + "' not found in collection 'config'") writeContactHistory = config["history"]["writeContactHistory"] returnHistoryId = config["history"]["returnHistoryId"] returnHistoryField = config["history"]["returnHistoryField"] histIdField = config["history"]["histIdField"] for field in config["fields"]: config["fields"][field]["derive"] = OrderedDict(sorted(config["fields"][field]["derive"].items())) for position in config["userDefinedFunctions"]: config["userDefinedFunctions"][position] = OrderedDict(sorted(config["userDefinedFunctions"][position].items())) if verbose: for row in tqdm(data): row, historyId = dwmOne(data=row, db=db, config=config, writeContactHistory=writeContactHistory, returnHistoryId=returnHistoryId, histIdField=histIdField, udfNamespace=udfNamespace) if returnHistoryId and writeContactHistory: row[returnHistoryField] = historyId else: for row in data: row, historyId = dwmOne(data=row, db=db, config=config, writeContactHistory=writeContactHistory, returnHistoryId=returnHistoryId, histIdField=histIdField, udfNamespace=udfNamespace) if returnHistoryId and writeContactHistory: row[returnHistoryField] = historyId return data
python
{ "resource": "" }
q45252
Signal.wait
train
def wait(self): """ PUT THREAD IN WAIT STATE UNTIL SIGNAL IS ACTIVATED """ if self._go: return True with self.lock: if self._go: return True stopper = _allocate_lock() stopper.acquire() if not self.waiting_threads: self.waiting_threads = [stopper] else: self.waiting_threads.append(stopper) DEBUG and self._name and Log.note("wait for go {{name|quote}}", name=self.name) stopper.acquire() DEBUG and self._name and Log.note("GOing! {{name|quote}}", name=self.name) return True
python
{ "resource": "" }
q45253
Signal.on_go
train
def on_go(self, target): """ RUN target WHEN SIGNALED """ if not target: Log.error("expecting target") with self.lock: if not self._go: DEBUG and self._name and Log.note("Adding target to signal {{name|quote}}", name=self.name) if not self.job_queue: self.job_queue = [target] else: self.job_queue.append(target) return (DEBUG_SIGNAL) and Log.note("Signal {{name|quote}} already triggered, running job immediately", name=self.name) target()
python
{ "resource": "" }
q45254
Signal.remove_go
train
def remove_go(self, target): """ FOR SAVING MEMORY """ with self.lock: if not self._go: try: self.job_queue.remove(target) except ValueError: pass
python
{ "resource": "" }
q45255
_jx_expression
train
def _jx_expression(expr, lang): """ WRAP A JSON EXPRESSION WITH OBJECT REPRESENTATION """ if is_expression(expr): # CONVERT TO lang new_op = lang[expr.id] if not new_op: # CAN NOT BE FOUND, TRY SOME PARTIAL EVAL return language[expr.id].partial_eval() return expr # return new_op(expr.args) # THIS CAN BE DONE, BUT IT NEEDS MORE CODING, AND I WOULD EXPECT IT TO BE SLOW if expr is None: return TRUE elif expr in (True, False, None) or expr == None or isinstance(expr, (float, int, Decimal, Date)): return Literal(expr) elif is_text(expr): return Variable(expr) elif is_sequence(expr): return lang[TupleOp([_jx_expression(e, lang) for e in expr])] # expr = wrap(expr) try: items = items_(expr) for op, term in items: # ONE OF THESE IS THE OPERATOR full_op = operators.get(op) if full_op: class_ = lang.ops[full_op.id] if class_: return class_.define(expr) # THIS LANGUAGE DOES NOT SUPPORT THIS OPERATOR, GOTO BASE LANGUAGE AND GET THE MACRO class_ = language[op.id] output = class_.define(expr).partial_eval() return _jx_expression(output, lang) else: if not items: return NULL raise Log.error("{{instruction|json}} is not known", instruction=items) except Exception as e: Log.error("programmer error expr = {{value|quote}}", value=expr, cause=e)
python
{ "resource": "" }
q45256
ElasticsearchMultilingualSearchBackend.setup
train
def setup(self): """ Defers loading until needed. Compares the existing mapping for each language with the current codebase. If they differ, it automatically updates the index. """ # Get the existing mapping & cache it. We'll compare it # during the ``update`` & if it doesn't match, we'll put the new # mapping. for language in self.languages: self.index_name = self._index_name_for_language(language) try: self.existing_mapping[language] = self.conn.indices.get_mapping( index=self.index_name) except NotFoundError: pass except Exception: if not self.silently_fail: raise unified_index = haystack.connections[self.connection_alias].get_unified_index() self.content_field_name, field_mapping = self.build_schema( unified_index.all_searchfields(), language) current_mapping = { 'modelresult': { 'properties': field_mapping, '_boost': { 'name': 'boost', 'null_value': 1.0 } } } if current_mapping != self.existing_mapping[language]: try: # Make sure the index is there first. self.conn.indices.create( index=self.index_name, body=self.DEFAULT_SETTINGS, ignore=400) self.conn.indices.put_mapping( index=self.index_name, doc_type='modelresult', body=current_mapping ) self.existing_mapping[language] = current_mapping except Exception: if not self.silently_fail: raise self.setup_complete = True
python
{ "resource": "" }
q45257
get
train
def get(): """Returns the current version without importing pymds.""" pkgnames = find_packages() if len(pkgnames) == 0: raise ValueError("Can't find any packages") pkgname = pkgnames[0] content = open(join(pkgname, '__init__.py')).read() c = re.compile(r"__version__ *= *('[^']+'|\"[^\"]+\")") m = c.search(content) if m is None: raise ValueError("Can't find __version__ = ... in __init__.py") return m.groups()[0][1: -1]
python
{ "resource": "" }
q45258
store_integers
train
def store_integers(items, allow_zero=True): """Store integers from the given list in a storage. This is an example function to show autodoc style. Return :class:`Storage` instance with integers from the given list. Examples:: >>> storage = store_integers([1, 'foo', 2, 'bar', 0]) >>> storage.items [1, 2, 0] >>> storage = store_integers([1, 'foo', 2, 'bar', 0], allow_zero=False) >>> storage.items [1, 2] :param items: List of objects of any type, only :class:`int` instances will be stored. :param allow_zero: Boolean -- if ``False``, ``0`` integers will be skipped. Defaults to ``True``. """ ints = [x for x in items if isinstance(x, int) and (allow_zero or x != 0)] storage = Storage(ints) return storage
python
{ "resource": "" }
q45259
Storage.add_item
train
def add_item(self, item): """Append item to the list. :attr:`last_updated` will be set to :py:meth:`datetime.datetime.now`. :param item: Something to append to :attr:`items`. """ self.items.append(item) self.last_updated = datetime.datetime.now()
python
{ "resource": "" }
q45260
HttpError.map_http_status_to_exception
train
def map_http_status_to_exception(http_code): """ Bind a HTTP status to an HttpError. :param http_code: The HTTP code :type http_code: int :return The HttpError that fits to the http_code or HttpError. :rtype Any subclass of HttpError or HttpError """ http_exceptions = HttpError.__subclasses__() for http_exception in http_exceptions: http_statuses = http_exception.HTTP_STATUSES if isinstance(http_statuses, int): http_statuses = [http_exception.HTTP_STATUSES] try: if http_code in http_statuses: return http_exception except TypeError: # Pass if statuses is not iterable (≈ None) pass return HttpError
python
{ "resource": "" }
q45261
Ident.to_xml_string
train
def to_xml_string(self): """ Exports the element in XML format. :returns: element in XML format. :rtype: str """ self.update_xml_element() xml = self.xml_element return etree.tostring(xml, pretty_print=True).decode('utf-8')
python
{ "resource": "" }
q45262
Draft.fetch
train
def fetch(self): """Fetch data corresponding to this draft and store it as ``self.data``.""" if self.message_id is None: raise Exception(".message_id not set.") response = self.session.request("find:Message.content", [ self.message_id ]) if response == None: raise Exception("Message not found.") self.data = response return self
python
{ "resource": "" }
q45263
Draft.save
train
def save(self): """Save current draft state.""" response = self.session.request("save:Message", [ self.data ]) self.data = response self.message_id = self.data["id"] return self
python
{ "resource": "" }
q45264
Draft.send_preview
train
def send_preview(self): # pragma: no cover """Send a preview of this draft.""" response = self.session.request("method:queuePreview", [ self.data ]) self.data = response return self
python
{ "resource": "" }
q45265
Draft.send
train
def send(self): # pragma: no cover """Send the draft.""" response = self.session.request("method:queue", [ self.data ]) self.data = response return self
python
{ "resource": "" }
q45266
Draft.delete
train
def delete(self): """Delete the draft.""" response = self.session.request("delete:Message", [ self.message_id ]) self.data = response return self
python
{ "resource": "" }
q45267
_get_query_sets_for_object
train
def _get_query_sets_for_object(o): """ Determines the correct query set based on the object. If the object is a literal, it will return a query set over LiteralStatements. If the object is a URIRef or BNode, it will return a query set over Statements. If the object is unknown, it will return both the LiteralStatement and Statement query sets. This method always returns a list of size at least one. """ if o: if isinstance(o, Literal): query_sets = [models.LiteralStatement.objects] else: query_sets = [models.URIStatement.objects] else: query_sets = [models.URIStatement.objects, models.LiteralStatement.objects] return query_sets
python
{ "resource": "" }
q45268
_get_named_graph
train
def _get_named_graph(context): """ Returns the named graph for this context. """ if context is None: return None return models.NamedGraph.objects.get_or_create(identifier=context.identifier)[0]
python
{ "resource": "" }
q45269
DjangoStore.destroy
train
def destroy(self, configuration=None): """ Completely destroys a store and all the contexts and triples in the store. >>> store = DjangoStore() >>> g = rdflib.Graph(store=store) >>> g.open(configuration=None, create=True) == rdflib.store.VALID_STORE True >>> g.open(configuration=None, create=False) == rdflib.store.VALID_STORE True >>> g.destroy(configuration=None) >>> g.open(configuration=None, create=False) == rdflib.store.VALID_STORE True """ models.NamedGraph.objects.all().delete() models.URIStatement.objects.all().delete() models.LiteralStatement.objects.all().delete()
python
{ "resource": "" }
q45270
DjangoStore.add
train
def add(self, (s, p, o), context, quoted=False): """ Adds a triple to the store. >>> from rdflib.term import URIRef >>> from rdflib.namespace import RDF >>> subject = URIRef('http://zoowizard.org/resource/Artis') >>> object = URIRef('http://schema.org/Zoo') >>> g = rdflib.Graph('Django') >>> g.add((subject, RDF.type, object)) >>> len(g) 1 """ assert isinstance(s, Identifier) assert isinstance(p, Identifier) assert isinstance(o, Identifier) assert not quoted named_graph = _get_named_graph(context) query_set = _get_query_sets_for_object(o)[0] query_set.get_or_create( subject=s, predicate=p, object=o, context=named_graph, )
python
{ "resource": "" }
q45271
DjangoStore.remove
train
def remove(self, (s, p, o), context=None): """ Removes a triple from the store. """ named_graph = _get_named_graph(context) query_sets = _get_query_sets_for_object(o) filter_parameters = dict() if named_graph is not None: filter_parameters['context_id'] = named_graph.id if s: filter_parameters['subject'] = s if p: filter_parameters['predicate'] = p if o: filter_parameters['object'] = o query_sets = [qs.filter(**filter_parameters) for qs in query_sets] # pylint: disable=W0142 for qs in query_sets: qs.delete()
python
{ "resource": "" }
q45272
DjangoStore.triples
train
def triples(self, (s, p, o), context=None): """ Returns all triples in the current store. """ named_graph = _get_named_graph(context) query_sets = _get_query_sets_for_object(o) filter_parameters = dict() if named_graph is not None: filter_parameters['context_id'] = named_graph.id if s: filter_parameters['subject'] = s if p: filter_parameters['predicate'] = p if o: filter_parameters['object'] = o query_sets = [qs.filter(**filter_parameters) for qs in query_sets] # pylint: disable=W0142 for qs in query_sets: for statement in qs: triple = statement.as_triple() yield triple, context
python
{ "resource": "" }
q45273
Handler.load
train
def load(self): """load ALL_VERS_DATA from disk""" basepath = os.path.dirname(os.path.abspath(__file__)) filename = os.sep.join([basepath, c.FOLDER_JSON, c.FILE_GAME_VERSIONS]) Handler.ALL_VERS_DATA = {} # reset known data; do not retain defunct information with open(filename, "r") as f: data = json.loads( f.read() ) self.update(data) self._updated = False
python
{ "resource": "" }
q45274
Handler.save
train
def save(self, new=None, timeout=2): """write ALL_VERS_DATA to disk in 'pretty' format""" if new: self.update(new) # allow two operations (update + save) with a single command if not self._updated: return # nothing to do thisPkg = os.path.dirname(__file__) filename = os.path.join(thisPkg, c.FOLDER_JSON, c.FILE_GAME_VERSIONS) fParts = c.FILE_GAME_VERSIONS.split('.') newFile = os.path.join(thisPkg, c.FOLDER_JSON, "%s_%s.%s"%(fParts[0], dateFormat.now(), fParts[1])) if not os.path.isfile(newFile): #fParts = c.FILE_GAME_VERSIONS.split('.') #newFile = "%s%s%s_%s.%s"%(c.FOLDER_JSON, os.sep, fParts[0], dateFormat.now(), fParts[1]) #if not os.path.isfile(newFile): #print(filename) #print(newFile) os.rename(filename, newFile) # backup existing version file recordKeys = [(record["version"], record) for record in Handler.ALL_VERS_DATA.values()] data = [r for k,r in sorted(recordKeys)] # i.e. get values sorted by version key start = time.time() while time.time()-start < timeout: # allow multiple retries if multiple processes fight over the version file try: with open(filename, "wb") as f: f.write(str.encode(json.dumps(data, indent=4, sort_keys=True))) # python3 requires encoding str => bytes to write to file self._updated = False return except IOError: pass # continue waiting for file to be available raise # after timeout, prior exception is what matters
python
{ "resource": "" }
q45275
Handler.update
train
def update(self, data): """update known data with with newly provided data""" if not isinstance(data, list): data = [data] # otherwise no conversion is necessary master = Handler.ALL_VERS_DATA for record in data: #print(record) for k,v in iteritems(record): # ensure record contents aretyped appropriately try: record[k] = int(v) except ValueError: record[k] = v try: label = record["label"] # verify this record has the required 'label' key except KeyError: raise ValueError("Must provide a valid label argument. Given:%s%s"%(\ os.linesep, ("%s "%(os.linesep)).join( ["%15s:%s"%(k,v) for k,v in iteritems(kwargs)] ))) try: masterLabel = master[label] # identify the already existing record that matches this to-be-updated record, if any except KeyError: # master hasn't been defined yet master[label] = record self._updated = True # a new record should also be saved continue for k,v in iteritems(record): # determine whether master needs to be updated try: if masterLabel[k] == v: continue # whether an entry in the record needs to be updated (doesn't match) except KeyError: pass # this condition means that k is a new key, so the record must be updated self._updated = True try: master[label].update(record) # index each record by its label except KeyError: break
python
{ "resource": "" }
q45276
getLocalIPaddress
train
def getLocalIPaddress(): """visible to other machines on LAN""" try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(('google.com', 0)) my_local_ip = s.getsockname()[0] # takes ~0.005s #from netifaces import interfaces, ifaddresses, AF_INET #full solution in the event of multiple NICs (network interface cards) on the PC #def ip4_addresses(): # ip_list = [] # for interface in interfaces(): # for link in ifaddresses(interface)[AF_INET]: # If IPv6 addresses are needed instead, use AF_INET6 instead of AF_INET # ip_list.append(link['addr']) # return ip_list except Exception: my_local_ip = None return my_local_ip
python
{ "resource": "" }
q45277
getPublicIPaddress
train
def getPublicIPaddress(timeout=c.DEFAULT_TIMEOUT): """visible on public internet""" start = time.time() my_public_ip = None e = Exception while my_public_ip == None: if time.time() - start > timeout: break try: #httpbin.org -- site is useful to test scripts / applications. my_public_ip = json.load(urlopen('http://httpbin.org/ip'))['origin'] # takes ~0.14s as ipv4 if my_public_ip: break except Exception as e: print(type(e), e, "http://httpbin.org/ip") try: #jsonip.com -- Seemingly the sole purpose of this domain is to return IP address in JSON. my_public_ip = json.load(urlopen('http://jsonip.com'))['ip'] # takes ~0.24s as ipv6 if my_public_ip: break except Exception as e: print(type(e), e, "http://jsonip.com") try: #ipify.org -- Power of this service results from lack of limits (there is no rate limiting), infrastructure (placed on Heroku, with high availability in mind) and flexibility (works for both IPv4 and IPv6). my_public_ip = load(urlopen('https://api.ipify.org/?format=json'))['ip'] # takes ~0.33s if my_public_ip: break except Exception as e: print(type(e), e, "https://api.ipify.org/") try: #ip.42.pl -- This is very convenient for scripts, you don't need JSON parsing here. my_public_ip = urlopen('http://ip.42.pl/raw').read() # takes ~0.35s if my_public_ip: break except Exception as e: print(type(e), e, "http://ip.42.pl/raw") if not my_public_ip: raise e return my_public_ip
python
{ "resource": "" }
q45278
bits_to_dict
train
def bits_to_dict(bits): """Convert a Django template tag's kwargs into a dictionary of Python types. The only necessary types are number, boolean, list, and string. http://pygments.org/docs/formatters/#HtmlFormatter from: ["style='monokai'", "cssclass='cssclass',", "boolean='true',", 'num=0,', "list='[]'"] to: {'style': 'monokai', 'cssclass': 'cssclass', 'boolean': True, 'num': 0, 'list': [],} """ # Strip any trailing commas cleaned_bits = [bit[:-1] if bit.endswith(',') else bit for bit in bits] # Create dictionary by splitting on equal signs options = dict(bit.split('=') for bit in cleaned_bits) # Coerce strings of types to Python types for key in options: if options[key] == "'true'" or options[key] == "'false'": options[key] = options[key].title() options[key] = ast.literal_eval(options[key]) return options
python
{ "resource": "" }
q45279
pygmentify
train
def pygmentify(value, **kwargs): """Return a highlighted code block with Pygments.""" soup = BeautifulSoup(value, 'html.parser') for pre in soup.find_all('pre'): # Get code code = ''.join([to_string(item) for item in pre.contents]) code = code.replace('&lt;', '<') code = code.replace('&gt;', '>') code = code.replace('&#39;', "'") code = code.replace('&quot;', '"') code = code.replace('&amp;', '&') # Get lexer by language class_list = pre.get('class', []) lexers = [] options = { 'stripall': True } # Collect all found lexers for c in class_list: try: lexers.append(get_lexer_by_name(c, **options)) except ClassNotFound: pass # Get first lexer match or none try: lexer = lexers[0] except IndexError: lexer = None # If no lexer, try guessing if lexer is None: try: lexer = guess_lexer(pre.text, **options) class_list += [alias for alias in lexer.aliases] except ClassNotFound: pass if lexer is not None: # Get formatter formatter = HtmlFormatter(**kwargs) # Highlight code highlighted = highlight(code, lexer, formatter) class_string = ' '.join([c for c in class_list]) highlighted = highlighted.replace( '<div class="%s"><pre>' % kwargs['cssclass'], '<div class="%s"><pre class="%s">' % (kwargs['cssclass'], class_string) ) pre.replace_with(highlighted) return soup.decode(formatter=None).strip()
python
{ "resource": "" }
q45280
_encode_multipart_formdata
train
def _encode_multipart_formdata(fields, files): """ Create a multipart encoded form for use in PUTing and POSTing. fields is a sequence of (name, value) elements for regular form fields. files is a sequence of (name, filename, value) elements for data to be uploaded as files Return (content_type, body) ready for httplib.HTTP instance """ BOUNDARY = '----------A_vEry_UnlikelY_bouNdary_$' CRLF = '\r\n' L = [] for (key, value) in fields: L.append('--' + BOUNDARY) L.append(str('Content-Disposition: form-data; name="%s"' % key)) L.append('') L.append(value) for (key, filename, value) in files: L.append('--' + BOUNDARY) L.append(str('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))) L.append('Content-Type: %s' % get_content_type(filename)) L.append('') L.append(value) L.append('--' + BOUNDARY + '--') L.append('') body = CRLF.join(L) content_type = 'multipart/form-data; boundary=%s' % BOUNDARY return content_type, body
python
{ "resource": "" }
q45281
_generate_read_callable
train
def _generate_read_callable(name, display_name, arguments, regex, doc, supported): """ Returns a callable which conjures the URL for the resource and GETs a response """ def f(self, *args, **kwargs): url = self._generate_url(regex, args) if 'params' in kwargs: url += "?" + urllib.urlencode(kwargs['params']) return self._get_data(url, accept=(kwargs.get('accept'))) f.__name__ = str('read_%s' % name) f.__doc__ = doc f._resource_uri = regex f._get_args = arguments f._put_or_post_args = None f.resource_name = display_name f.is_api_call = True f.is_supported_api = supported return f
python
{ "resource": "" }
q45282
_generate_create_callable
train
def _generate_create_callable(name, display_name, arguments, regex, doc, supported, post_arguments, is_action): """ Returns a callable which conjures the URL for the resource and POSTs data """ def f(self, *args, **kwargs): for key, value in args[-1].items(): if type(value) == file: return self._put_or_post_multipart('POST', self._generate_url(regex, args[:-1]), args[-1]) return self._put_or_post_json('POST', self._generate_url(regex, args[:-1]), args[-1]) if is_action: f.__name__ = str(name) else: f.__name__ = str('create_%s' % name) f.__doc__ = doc f._resource_uri = regex f._get_args = arguments f._put_or_post_args = post_arguments f.resource_name = display_name f.is_api_call = True f.is_supported_api = supported return f
python
{ "resource": "" }
q45283
Client.print_help
train
def print_help(self): """ Prints the api method info to stdout for debugging. """ keyfunc = lambda x: (x.resource_name, x.__doc__.strip()) resources = groupby(sorted(filter(lambda x: (hasattr(x, 'is_api_call') and x.is_api_call and x.is_supported_api), [getattr(self, resource) for resource in dir(self)]), key=keyfunc), key=keyfunc) for resource_desc, resource_methods in resources: print resource_desc[0] print '\t', resource_desc[1] print print '\t', 'Available methods:' for r in resource_methods: method_header = r.__name__ + '(' if r._get_args: method_header += ','.join(r._get_args) if r._put_or_post_args: put_or_post_args = [arg['name'] for arg in reduce(lambda x, y: x+y, r._put_or_post_args.values())] method_header += ',{' + ','.join(put_or_post_args) + '}' method_header += ')' method_desc = "" if r.__name__.startswith('create'): method_desc = 'Corresponding API call: POST to ' + r._resource_uri if r.__name__.startswith('update'): method_desc = 'Corresponding API call: PUT to ' + r._resource_uri if r.__name__.startswith('read'): method_desc = 'Corresponding API call: GET to ' + r._resource_uri if r.__name__.startswith('delete'): method_desc = 'Corresponding API call: DELETE to ' + r._resource_uri print '\t\t', method_header, ' - ', method_desc print
python
{ "resource": "" }
q45284
Client._construct_request
train
def _construct_request(self): """ Utility for constructing the request header and connection """ if self.parsed_endpoint.scheme == 'https': conn = httplib.HTTPSConnection(self.parsed_endpoint.netloc) else: conn = httplib.HTTPConnection(self.parsed_endpoint.netloc) head = { "Accept": "application/json", "User-Agent": USER_AGENT, API_TOKEN_HEADER_NAME: self.api_token, } if self.api_version in ['0.1', '0.01a']: head[API_VERSION_HEADER_NAME] = self.api_version return conn, head
python
{ "resource": "" }
q45285
Client._delete_resource
train
def _delete_resource(self, url): """ DELETEs the resource at url """ conn, head = self._construct_request() conn.request("DELETE", url, "", head) resp = conn.getresponse() self._handle_response_errors('DELETE', url, resp)
python
{ "resource": "" }
q45286
Client._get_data
train
def _get_data(self, url, accept=None): """ GETs the resource at url and returns the raw response If the accept parameter is not None, the request passes is as the Accept header """ if self.parsed_endpoint.scheme == 'https': conn = httplib.HTTPSConnection(self.parsed_endpoint.netloc) else: conn = httplib.HTTPConnection(self.parsed_endpoint.netloc) head = { "User-Agent": USER_AGENT, API_TOKEN_HEADER_NAME: self.api_token, } if self.api_version in ['0.1', '0.01a']: head[API_VERSION_HEADER_NAME] = self.api_version if accept: head['Accept'] = accept conn.request("GET", url, "", head) resp = conn.getresponse() self._handle_response_errors('GET', url, resp) content_type = resp.getheader('content-type') if 'application/json' in content_type: return json.loads(resp.read()) return resp.read()
python
{ "resource": "" }
q45287
Client._put_or_post_multipart
train
def _put_or_post_multipart(self, method, url, data): """ encodes the data as a multipart form and PUTs or POSTs to the url the response is parsed as JSON and the returns the resulting data structure """ fields = [] files = [] for key, value in data.items(): if type(value) == file: files.append((key, value.name, value.read())) else: fields.append((key, value)) content_type, body = _encode_multipart_formdata(fields, files) if self.parsed_endpoint.scheme == 'https': h = httplib.HTTPS(self.parsed_endpoint.netloc) else: h = httplib.HTTP(self.parsed_endpoint.netloc) h.putrequest(method, url) h.putheader('Content-Type', content_type) h.putheader('Content-Length', str(len(body))) h.putheader('Accept', 'application/json') h.putheader('User-Agent', USER_AGENT) h.putheader(API_TOKEN_HEADER_NAME, self.api_token) if self.api_version in ['0.1', '0.01a']: h.putheader(API_VERSION_HEADER_NAME, self.api_version) h.endheaders() h.send(body) errcode, errmsg, headers = h.getreply() if errcode not in [200, 202]: raise IOError('Response to %s to URL %s was status code %s: %s' % (method, url, errcode, h.file.read())) return json.loads(h.file.read())
python
{ "resource": "" }
q45288
Client._put_or_post_json
train
def _put_or_post_json(self, method, url, data): """ urlencodes the data and PUTs it to the url the response is parsed as JSON and the resulting data type is returned """ if self.parsed_endpoint.scheme == 'https': conn = httplib.HTTPSConnection(self.parsed_endpoint.netloc) else: conn = httplib.HTTPConnection(self.parsed_endpoint.netloc) head = { "Content-Type": "application/json", "Accept": "application/json", "User-Agent": USER_AGENT, API_TOKEN_HEADER_NAME: self.api_token, } if self.api_version in ['0.1', '0.01a']: head[API_VERSION_HEADER_NAME] = self.api_version conn.request(method, url, json.dumps(data), head) resp = conn.getresponse() self._handle_response_errors(method, url, resp) return json.loads(resp.read())
python
{ "resource": "" }
q45289
find_importer_frame
train
def find_importer_frame(): """Returns the outer frame importing this "end" module. If this module is being imported by other means than import statement, None is returned. Returns: A frame object or None. """ byte = lambda ch: ord(ch) if PY2 else ch frame = inspect.currentframe() try: while frame: code = frame.f_code lasti = frame.f_lasti if byte(code.co_code[lasti]) == dis.opmap['IMPORT_NAME']: # FIXME: Support EXTENDED_ARG. arg = ( byte(code.co_code[lasti + 1]) + byte(code.co_code[lasti + 2]) * 256) name = code.co_names[arg] if name == 'end': break end end frame = frame.f_back end return frame finally: del frame end
python
{ "resource": "" }
q45290
is_end_node
train
def is_end_node(node): """Checks if a node is the "end" keyword. Args: node: AST node. Returns: True if the node is the "end" keyword, otherwise False. """ return (isinstance(node, ast.Expr) and isinstance(node.value, ast.Name) and node.value.id == 'end')
python
{ "resource": "" }
q45291
get_compound_bodies
train
def get_compound_bodies(node): """Returns a list of bodies of a compound statement node. Args: node: AST node. Returns: A list of bodies of the node. If the given node does not represent a compound statement, an empty list is returned. """ if isinstance(node, (ast.Module, ast.FunctionDef, ast.ClassDef, ast.With)): return [node.body] elif isinstance(node, (ast.If, ast.While, ast.For)): return [node.body, node.orelse] elif PY2 and isinstance(node, ast.TryFinally): return [node.body, node.finalbody] elif PY2 and isinstance(node, ast.TryExcept): return [node.body, node.orelse] + [h.body for h in node.handlers] elif PY3 and isinstance(node, ast.Try): return ([node.body, node.orelse, node.finalbody] + [h.body for h in node.handlers]) end return []
python
{ "resource": "" }
q45292
check_end_blocks
train
def check_end_blocks(frame): """Performs end-block check. Args: frame: A frame object of the module to be checked. Raises: SyntaxError: If check failed. """ try: try: module_name = frame.f_globals['__name__'] except KeyError: warnings.warn( 'Can not get the source of an uknown module. ' 'End-of-block syntax check is skipped.', EndSyntaxWarning) return end filename = frame.f_globals.get('__file__', '<unknown>') try: source = inspect.getsource(sys.modules[module_name]) except Exception: warnings.warn( 'Can not get the source of module "%s". ' 'End-of-block syntax check is skipped.' % (module_name,), EndSyntaxWarning) return end finally: del frame end root = ast.parse(source) for node in ast.walk(root): bodies = get_compound_bodies(node) if not bodies: continue end # FIXME: This is an inaccurate hack to handle if-elif-else. if (isinstance(node, ast.If) and len(node.orelse) == 1 and isinstance(node.orelse[0], ast.If)): continue end # FIXME: This is an inaccurate hack to handle try-except-finally # statement which is parsed as ast.TryExcept in ast.TryFinally in # Python 2. if (PY2 and isinstance(node, ast.TryFinally) and len(node.body) == 1 and isinstance(node.body[0], ast.TryExcept)): continue end for body in bodies: skip_next = False for i, child in enumerate(body): if skip_next: skip_next = False elif is_end_node(child): raise SyntaxError( '"end" does not close a block.', [filename, child.lineno, child.col_offset, source.splitlines()[child.lineno - 1] + '\n']) elif get_compound_bodies(child): try: ok = is_end_node(body[i + 1]) except IndexError: ok = False end if not ok: raise SyntaxError( 'This block is not closed with "end".', [filename, child.lineno, child.col_offset, source.splitlines()[child.lineno - 1] + '\n']) end skip_next = True end end end end
python
{ "resource": "" }
q45293
metadata_to_buffers
train
def metadata_to_buffers(metadata): """ Transform a dict of metadata into a sequence of buffers. :param metadata: The metadata, as a dict. :returns: A list of buffers. """ results = [] for key, value in metadata.items(): assert len(key) < 256 assert len(value) < 2 ** 32 results.extend([ struct.pack('!B', len(key)), key, struct.pack('!I', len(value)), value, ]) return results
python
{ "resource": "" }
q45294
buffer_to_metadata
train
def buffer_to_metadata(buffer): """ Transform a buffer to a metadata dictionary. :param buffer: The buffer, as received in a READY command. :returns: A metadata dictionary, with its keys normalized (in lowercase). """ offset = 0 size = len(buffer) metadata = {} while offset < size: name_size = struct.unpack_from('B', buffer, offset)[0] offset += 1 if name_size > size - 4: raise ProtocolError( "Invalid name size in metadata", fatal=True, ) name = buffer[offset:offset + name_size] offset += name_size value_size = struct.unpack_from('!I', buffer, offset)[0] offset += 4 if value_size > size - name_size - 5: raise ProtocolError( "Invalid value size in metadata", fatal=True, ) value = buffer[offset:offset + value_size] offset += value_size metadata[name.lower()] = value return metadata
python
{ "resource": "" }
q45295
Socket.generate_identity
train
def generate_identity(self): """ Generate a unique but random identity. """ identity = struct.pack('!BI', 0, self._base_identity) self._base_identity += 1 if self._base_identity >= 2 ** 32: self._base_identity = 0 return identity
python
{ "resource": "" }
q45296
Socket._wait_peers
train
async def _wait_peers(self): """ Blocks until at least one non-dead peer is available. """ # Make sure we remove dead peers. for p in self._peers[:]: if p.dead: self._peers.remove(p) while not self._peers: await self._peers.wait_not_empty()
python
{ "resource": "" }
q45297
Socket._fair_get_in_peer
train
async def _fair_get_in_peer(self): """ Get the first available available inbound peer in a fair manner. :returns: A `Peer` inbox, whose inbox is guaranteed not to be empty (and thus can be read from without blocking). """ peer = None while not peer: await self._wait_peers() # This rotates the list, implementing fair-queuing. peers = list(self._in_peers) tasks = [asyncio.ensure_future(self._in_peers.wait_change())] tasks.extend([ asyncio.ensure_future( p.inbox.wait_not_empty(), loop=self.loop, ) for p in peers ]) try: done, pending = await asyncio.wait( tasks, return_when=asyncio.FIRST_COMPLETED, loop=self.loop, ) finally: for task in tasks: task.cancel() tasks.pop(0) # pop the wait_change task. peer = next( ( p for task, p in zip(tasks, peers) if task in done and not task.cancelled() ), None, ) return peer
python
{ "resource": "" }
q45298
Socket._fair_recv
train
async def _fair_recv(self): """ Receive from all the existing peers, rotating the list of peers every time. :returns: The frames. """ with await self._read_lock: peer = await self._fair_get_in_peer() result = peer.inbox.read_nowait() return result
python
{ "resource": "" }
q45299
Socket._fair_get_out_peer
train
async def _fair_get_out_peer(self): """ Get the first available peer, with non-blocking inbox or wait until one meets the condition. :returns: The peer whose outbox is ready to be written to. """ peer = None while not peer: await self._wait_peers() # This rotates the list, implementing fair-queuing. peers = list(self._out_peers) tasks = [asyncio.ensure_future(self._out_peers.wait_change())] tasks.extend([ asyncio.ensure_future( p.outbox.wait_not_full(), loop=self.loop, ) for p in peers ]) try: done, pending = await asyncio.wait( tasks, return_when=asyncio.FIRST_COMPLETED, loop=self.loop, ) finally: for task in tasks: task.cancel() tasks.pop(0) # pop the wait_change task. peer = next( # pragma: no cover ( p for task, p in zip(tasks, peers) if task in done and not p.outbox.full() ), None, ) return peer
python
{ "resource": "" }