_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q43600
NamespaceLoader2.load_module
train
def load_module(self, name): """Load a namespace module as if coming from an empty file. """ _verbose_message('namespace module loaded with path {!r}', self.path) # Adjusting code from LoaderBasics if name in sys.modules: mod = sys.modules[name] self.exec_module(mod) # In this case we do not want to remove the module in case of error # Ref : https://docs.python.org/3/reference/import.html#loaders else: try: # Building custom spec and loading as in _LoaderBasics... spec = ModuleSpec(name, self, origin='namespace', is_package=True) spec.submodule_search_locations = self.path # this will call create_module and also initialize the module properly (like for py3) mod = module_from_spec(spec) # as per https://docs.python.org/3/reference/import.html#loaders assert mod.__name__ in sys.modules self.exec_module(mod) # We don't ensure that the import-related module attributes get # set in the sys.modules replacement case. Such modules are on # their own. except: # as per https://docs.python.org/3/reference/import.html#loaders if name in sys.modules: del sys.modules[name] raise return sys.modules[name]
python
{ "resource": "" }
q43601
SourceLoader.get_source
train
def get_source(self, name): """Concrete implementation of InspectLoader.get_source.""" path = self.get_filename(name) try: source_bytes = self.get_data(path) except OSError as exc: e = _ImportError('source not available through get_data()', name=name) e.__cause__ = exc raise e return decode_source(source_bytes)
python
{ "resource": "" }
q43602
ImpFileLoader2.exec_module
train
def exec_module(self, module): """Execute the module using the old imp.""" path = [os.path.dirname(module.__file__)] # file should have been resolved before (module creation) file = None try: file, pathname, description = imp.find_module(module.__name__.rpartition('.')[-1], path) module = imp.load_module(module.__name__, file, pathname, description) finally: if file: file.close()
python
{ "resource": "" }
q43603
ImpFileLoader2.load_module
train
def load_module(self, name): """Load a module from a file. """ # Implementation inspired from pytest.rewrite and importlib # If there is an existing module object named 'name' in # sys.modules, the loader must use that existing module. (Otherwise, # the reload() builtin will not work correctly.) if name in sys.modules: return sys.modules[name] try: # we have already done the search, an gone through package layers # so we directly feed the latest module and correct path # to reuse the logic for choosing the proper loading behavior # TODO : double check maybe we do not need the loop here, already handled by finders in dir hierarchy # TODO : use exec_module (recent, more tested API) from here for name_idx, name_part in enumerate(name.split('.')): pkgname = ".".join(name.split('.')[:name_idx+1]) if pkgname not in sys.modules: if '.' in pkgname: # parent has to be in sys.modules. make sure it is a package, else fails if '__path__' in vars(sys.modules[pkgname.rpartition('.')[0]]): path = sys.modules[pkgname.rpartition('.')[0]].__path__ else: raise ImportError("{0} is not a package (no __path__ detected)".format(pkgname.rpartition('.')[0])) else: # using __file__ instead. should always be there. path = os.path.dirname(sys.modules[pkgname].__file__)if pkgname in sys.modules else None try: file, pathname, description = imp.find_module(pkgname.rpartition('.')[-1], path) sys.modules[pkgname] = imp.load_module(pkgname, file, pathname, description) finally: if file: file.close() except: # dont pollute the interpreter environment if we dont know what we are doing if name in sys.modules: del sys.modules[name] raise return sys.modules[name]
python
{ "resource": "" }
q43604
analyze_xml
train
def analyze_xml(xml): """Analyzes `file` against packtools' XMLValidator. """ f = StringIO(xml) try: xml = packtools.XMLValidator.parse(f, sps_version='sps-1.4') except packtools.exceptions.PacktoolsError as e: logger.exception(e) summary = {} summary['dtd_is_valid'] = False summary['sps_is_valid'] = False summary['is_valid'] = False summary['parsing_error'] = True summary['dtd_errors'] = [] summary['sps_errors'] = [] return summary except XMLSyntaxError as e: logger.exception(e) summary = {} summary['dtd_is_valid'] = False summary['sps_is_valid'] = False summary['is_valid'] = False summary['parsing_error'] = True summary['dtd_errors'] = [e.message] summary['sps_errors'] = [] return summary else: summary = summarize(xml) return summary
python
{ "resource": "" }
q43605
ServerBase.install
train
def install(self): """ install the server """ try: if self.args.server is not None: server = ServerLists(self.server_type) DynamicImporter( 'ezhost', server.name, args=self.args, configure=self.configure ) else: ServerCommand(self.args) except Exception as e: raise e
python
{ "resource": "" }
q43606
filehandles
train
def filehandles(path, openers_list=openers, pattern='', verbose=False): """Main function that iterates over list of openers and decides which opener to use. :param str path: Path. :param list openers_list: List of openers. :param str pattern: Regular expression pattern. :param verbose: Print additional information. :type verbose: :py:obj:`True` or :py:obj:`False` :return: Filehandle(s). """ if not verbose: logging.disable(logging.VERBOSE) for opener in openers_list: try: for filehandle in opener(path=path, pattern=pattern, verbose=verbose): with closing(filehandle): yield filehandle break # use the first successful opener function except (zipfile.BadZipfile, tarfile.ReadError, GZValidationError, BZ2ValidationError, IOError, NotADirectoryError): continue else: logger.verbose('No opener found for path: "{}"'.format(path)) yield None
python
{ "resource": "" }
q43607
directory_opener
train
def directory_opener(path, pattern='', verbose=False): """Directory opener. :param str path: Path. :param str pattern: Regular expression pattern. :return: Filehandle(s). """ if not os.path.isdir(path): raise NotADirectoryError else: openers_list = [opener for opener in openers if not opener.__name__.startswith('directory')] # remove directory for root, dirlist, filelist in os.walk(path): for filename in filelist: if pattern and not re.match(pattern, filename): logger.verbose('Skipping file: {}, did not match regex pattern "{}"'.format(os.path.abspath(filename), pattern)) continue filename_path = os.path.abspath(os.path.join(root, filename)) for filehandle in filehandles(filename_path, openers_list=openers_list, pattern=pattern, verbose=verbose): yield filehandle
python
{ "resource": "" }
q43608
ziparchive_opener
train
def ziparchive_opener(path, pattern='', verbose=False): """Opener that opens files from zip archive.. :param str path: Path. :param str pattern: Regular expression pattern. :return: Filehandle(s). """ with zipfile.ZipFile(io.BytesIO(urlopen(path).read()), 'r') if is_url(path) else zipfile.ZipFile(path, 'r') as ziparchive: for zipinfo in ziparchive.infolist(): if not zipinfo.filename.endswith('/'): source = os.path.join(path, zipinfo.filename) if pattern and not re.match(pattern, zipinfo.filename): logger.verbose('Skipping file: {}, did not match regex pattern "{}"'.format(os.path.abspath(zipinfo.filename), pattern)) continue logger.verbose('Processing file: {}'.format(source)) filehandle = ziparchive.open(zipinfo) yield filehandle
python
{ "resource": "" }
q43609
tararchive_opener
train
def tararchive_opener(path, pattern='', verbose=False): """Opener that opens files from tar archive. :param str path: Path. :param str pattern: Regular expression pattern. :return: Filehandle(s). """ with tarfile.open(fileobj=io.BytesIO(urlopen(path).read())) if is_url(path) else tarfile.open(path) as tararchive: for tarinfo in tararchive: if tarinfo.isfile(): source = os.path.join(path, tarinfo.name) if pattern and not re.match(pattern, tarinfo.name): logger.verbose('Skipping file: {}, did not match regex pattern "{}"'.format(os.path.abspath(tarinfo.name), pattern)) continue logger.verbose('Processing file: {}'.format(source)) filehandle = tararchive.extractfile(tarinfo) yield filehandle
python
{ "resource": "" }
q43610
gzip_opener
train
def gzip_opener(path, pattern='', verbose=False): """Opener that opens single gzip compressed file. :param str path: Path. :param str pattern: Regular expression pattern. :return: Filehandle(s). """ source = path if is_url(path) else os.path.abspath(path) filename = os.path.basename(path) if pattern and not re.match(pattern, filename): logger.verbose('Skipping file: {}, did not match regex pattern "{}"'.format(os.path.abspath(filename), pattern)) return try: filehandle = gzip.GzipFile(fileobj=io.BytesIO(urlopen(path).read())) if is_url(path) else gzip.open(path) filehandle.read(1) filehandle.seek(0) logger.verbose('Processing file: {}'.format(source)) yield filehandle except (OSError, IOError): raise GZValidationError
python
{ "resource": "" }
q43611
bz2_opener
train
def bz2_opener(path, pattern='', verbose=False): """Opener that opens single bz2 compressed file. :param str path: Path. :param str pattern: Regular expression pattern. :return: Filehandle(s). """ source = path if is_url(path) else os.path.abspath(path) filename = os.path.basename(path) if pattern and not re.match(pattern, filename): logger.verbose('Skipping file: {}, did not match regex pattern "{}"'.format(os.path.abspath(path), pattern)) return try: filehandle = bz2.open(io.BytesIO(urlopen(path).read())) if is_url(path) else bz2.open(path) filehandle.read(1) filehandle.seek(0) logger.verbose('Processing file: {}'.format(source)) yield filehandle except (OSError, IOError): raise BZ2ValidationError
python
{ "resource": "" }
q43612
text_opener
train
def text_opener(path, pattern='', verbose=False): """Opener that opens single text file. :param str path: Path. :param str pattern: Regular expression pattern. :return: Filehandle(s). """ source = path if is_url(path) else os.path.abspath(path) filename = os.path.basename(path) if pattern and not re.match(pattern, filename): logger.verbose('Skipping file: {}, did not match regex pattern "{}"'.format(os.path.abspath(path), pattern)) return filehandle = urlopen(path) if is_url(path) else open(path) logger.verbose('Processing file: {}'.format(source)) yield filehandle
python
{ "resource": "" }
q43613
random_string
train
def random_string(length, charset): """ Return a random string of the given length from the given character set. :param int length: The length of string to return :param str charset: A string of characters to choose from :returns: A random string :rtype: str """ n = len(charset) return ''.join(charset[random.randrange(n)] for _ in range(length))
python
{ "resource": "" }
q43614
random_alphanum
train
def random_alphanum(length): """ Return a random string of ASCII letters and digits. :param int length: The length of string to return :returns: A random string :rtype: str """ charset = string.ascii_letters + string.digits return random_string(length, charset)
python
{ "resource": "" }
q43615
random_hex
train
def random_hex(length): """ Return a random hex string. :param int length: The length of string to return :returns: A random string :rtype: str """ charset = ''.join(set(string.hexdigits.lower())) return random_string(length, charset)
python
{ "resource": "" }
q43616
format_obj_keys
train
def format_obj_keys(obj, formatter): """ Take a dictionary with string keys and recursively convert all keys from one form to another using the formatting function. The dictionary may contain lists as values, and any nested dictionaries within those lists will also be converted. :param object obj: The object to convert :param function formatter: The formatting function for keys, which takes and returns a string :returns: A new object with keys converted :rtype: object :Example: :: >>> obj = { ... 'dict-list': [ ... {'one-key': 123, 'two-key': 456}, ... {'threeKey': 789, 'four-key': 456}, ... ], ... 'some-other-key': 'some-unconverted-value' ... } >>> format_obj_keys(obj, lambda s: s.upper()) { 'DICT-LIST': [ {'ONE-KEY': 123, 'TWO-KEY': 456}, {'FOUR-KEY': 456, 'THREE-KEY': 789} ], 'SOME-OTHER-KEY': 'some-unconverted-value' } """ if type(obj) == list: return [format_obj_keys(o, formatter) for o in obj] elif type(obj) == dict: return {formatter(k): format_obj_keys(v, formatter) for k, v in obj.items()} else: return obj
python
{ "resource": "" }
q43617
Graph.merge_nodes
train
def merge_nodes(self, keep_node, kill_node): """ Merge two nodes in the graph. Takes two nodes and merges them together, merging their links by combining the two link lists and summing the weights of links which point to the same node. All links in the graph pointing to ``kill_node`` will be merged into ``keep_node``. Links belonging to ``kill_node`` which point to targets not in ``self.node_list`` will not be merged into ``keep_node`` Args: keep_node (Node): node to be kept kill_node (Node): node to be deleted Returns: None Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> node_2 = Node('Two') >>> node_3 = Node('Three') >>> node_1.add_link(node_3, 7) >>> node_2.add_link(node_1, 1) >>> node_2.add_link(node_2, 3) >>> node_3.add_link(node_2, 5) >>> graph = Graph([node_1, node_2, node_3]) >>> print([node.value for node in graph.node_list]) ['One', 'Two', 'Three'] >>> graph.merge_nodes(node_2, node_3) >>> print([node.value for node in graph.node_list]) ['One', 'Two'] >>> for link in graph.node_list[1].link_list: ... print('{} {}'.format(link.target.value, link.weight)) One 1 Two 8 """ # Merge links from kill_node to keep_node for kill_link in kill_node.link_list: if kill_link.target in self.node_list: keep_node.add_link(kill_link.target, kill_link.weight) # Merge any links in the graph pointing to kill_node into links # pointing to keep_node for node in self.node_list: for link in node.link_list: if link.target == kill_node: node.add_link(keep_node, link.weight) break # Remove kill_node from the graph self.remove_node(kill_node)
python
{ "resource": "" }
q43618
Graph.add_nodes
train
def add_nodes(self, nodes): """ Add a given node or list of nodes to self.node_list. Args: node (Node or list[Node]): the node or list of nodes to add to the graph Returns: None Examples: Adding one node: :: >>> from blur.markov.node import Node >>> graph = Graph() >>> node_1 = Node('One') >>> graph.add_nodes(node_1) >>> print([node.value for node in graph.node_list]) ['One'] Adding multiple nodes at a time in a list: :: >>> from blur.markov.node import Node >>> graph = Graph() >>> node_1 = Node('One') >>> node_2 = Node('Two') >>> graph.add_nodes([node_1, node_2]) >>> print([node.value for node in graph.node_list]) ['One', 'Two'] """ # Generalize nodes to a list if not isinstance(nodes, list): add_list = [nodes] else: add_list = nodes self.node_list.extend(add_list)
python
{ "resource": "" }
q43619
Graph.feather_links
train
def feather_links(self, factor=0.01, include_self=False): """ Feather the links of connected nodes. Go through every node in the network and make it inherit the links of the other nodes it is connected to. Because the link weight sum for any given node can be very different within a graph, the weights of inherited links are made proportional to the sum weight of the parent nodes. Args: factor (float): multiplier of neighbor links include_self (bool): whether nodes can inherit links pointing to themselves Returns: None Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> node_2 = Node('Two') >>> node_1.add_link(node_2, 1) >>> node_2.add_link(node_1, 1) >>> graph = Graph([node_1, node_2]) >>> for link in graph.node_list[0].link_list: ... print('{} {}'.format(link.target.value, link.weight)) Two 1 >>> graph.feather_links(include_self=True) >>> for link in graph.node_list[0].link_list: ... print('{} {}'.format(link.target.value, link.weight)) Two 1 One 0.01 """ def feather_node(node): node_weight_sum = sum(l.weight for l in node.link_list) # Iterate over a copy of the original link list since we will # need to refer to this while modifying node.link_list for original_link in node.link_list[:]: neighbor_node = original_link.target neighbor_weight = original_link.weight feather_weight = neighbor_weight / node_weight_sum neighbor_node_weight_sum = sum(l.weight for l in neighbor_node.link_list) # Iterate over the links belonging to the neighbor_node, # copying its links to ``node`` with proportional weights for neighbor_link in neighbor_node.link_list: if (not include_self) and (neighbor_link.target == node): continue relative_link_weight = (neighbor_link.weight / neighbor_node_weight_sum) feathered_link_weight = round((relative_link_weight * feather_weight * factor), 2) node.add_link(neighbor_link.target, feathered_link_weight) for n in self.node_list: feather_node(n)
python
{ "resource": "" }
q43620
Graph.apply_noise
train
def apply_noise(self, noise_weights=None, uniform_amount=0.1): """ Add noise to every link in the network. Can use either a ``uniform_amount`` or a ``noise_weight`` weight profile. If ``noise_weight`` is set, ``uniform_amount`` will be ignored. Args: noise_weights (list): a list of weight tuples of form ``(float, float)`` corresponding to ``(amount, weight)`` describing the noise to be added to each link in the graph uniform_amount (float): the maximum amount of uniform noise to be applied if ``noise_weights`` is not set Returns: None Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> node_2 = Node('Two') >>> node_1.add_link(node_1, 3) >>> node_1.add_link(node_2, 5) >>> node_2.add_link(node_1, 1) >>> graph = Graph([node_1, node_2]) >>> for link in graph.node_list[0].link_list: ... print('{} {}'.format(link.target.value, link.weight)) One 3 Two 5 >>> graph.apply_noise() >>> for link in graph.node_list[0].link_list: ... print('{} {}'.format( ... link.target.value, link.weight)) # doctest: +SKIP One 3.154 Two 5.321 """ # Main node loop for node in self.node_list: for link in node.link_list: if noise_weights is not None: noise_amount = round(weighted_rand(noise_weights), 3) else: noise_amount = round(random.uniform( 0, link.weight * uniform_amount), 3) link.weight += noise_amount
python
{ "resource": "" }
q43621
Graph.find_node_by_value
train
def find_node_by_value(self, value): """ Find and return a node in self.node_list with the value ``value``. If multiple nodes exist with the value ``value``, return the first one found. If no such node exists, this returns ``None``. Args: value (Any): The value of the node to find Returns: Node: A node with value ``value`` if it was found None: If no node exists with value ``value`` Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> graph = Graph([node_1]) >>> found_node = graph.find_node_by_value('One') >>> found_node == node_1 True """ try: return next(n for n in self.node_list if n.value == value) except StopIteration: return None
python
{ "resource": "" }
q43622
Graph.remove_node
train
def remove_node(self, node): """ Remove a node from ``self.node_list`` and links pointing to it. If ``node`` is not in the graph, do nothing. Args: node (Node): The node to be removed Returns: None Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> graph = Graph([node_1]) >>> graph.remove_node(node_1) >>> len(graph.node_list) 0 """ if node not in self.node_list: return self.node_list.remove(node) # Remove links pointing to the deleted node for n in self.node_list: n.link_list = [link for link in n.link_list if link.target != node]
python
{ "resource": "" }
q43623
Graph.remove_node_by_value
train
def remove_node_by_value(self, value): """ Delete all nodes in ``self.node_list`` with the value ``value``. Args: value (Any): The value to find and delete owners of. Returns: None Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> graph = Graph([node_1]) >>> graph.remove_node_by_value('One') >>> len(graph.node_list) 0 """ self.node_list = [node for node in self.node_list if node.value != value] # Remove links pointing to the deleted node for node in self.node_list: node.link_list = [link for link in node.link_list if link.target.value != value]
python
{ "resource": "" }
q43624
Graph.has_node_with_value
train
def has_node_with_value(self, value): """ Whether any node in ``self.node_list`` has the value ``value``. Args: value (Any): The value to find in ``self.node_list`` Returns: bool Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> graph = Graph([node_1]) >>> graph.has_node_with_value('One') True >>> graph.has_node_with_value('Foo') False """ for node in self.node_list: if node.value == value: return True else: return False
python
{ "resource": "" }
q43625
Graph.pick
train
def pick(self, starting_node=None): """ Pick a node on the graph based on the links in a starting node. Additionally, set ``self.current_node`` to the newly picked node. * if ``starting_node`` is specified, start from there * if ``starting_node`` is ``None``, start from ``self.current_node`` * if ``starting_node`` is ``None`` and ``self.current_node`` is ``None``, pick a uniformally random node in ``self.node_list`` Args: starting_node (Node): ``Node`` to pick from. Returns: Node Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> node_2 = Node('Two') >>> node_1.add_link(node_1, 5) >>> node_1.add_link(node_2, 2) >>> node_2.add_link(node_1, 1) >>> graph = Graph([node_1, node_2]) >>> [graph.pick().get_value() for i in range(5)] # doctest: +SKIP ['One', 'One', 'Two', 'One', 'One'] """ if starting_node is None: if self.current_node is None: random_node = random.choice(self.node_list) self.current_node = random_node return random_node else: starting_node = self.current_node # Use weighted_choice on start_node.link_list self.current_node = weighted_choice( [(link.target, link.weight) for link in starting_node.link_list]) return self.current_node
python
{ "resource": "" }
q43626
Graph.from_string
train
def from_string(cls, source, distance_weights=None, merge_same_words=False, group_marker_opening='<<', group_marker_closing='>>'): """ Read a string and derive of ``Graph`` from it. Words and punctuation marks are made into nodes. Punctuation marks are split into separate nodes unless they fall between other non-punctuation marks. ``'hello, world'`` is split into ``'hello'``, ``','``, and ``'world'``, while ``'who's there?'`` is split into ``"who's"``, ``'there'``, and ``'?'``. To group arbitrary characters together into a single node (e.g. to make ``'hello, world!'``), surround the text in question with ``group_marker_opening`` and ``group_marker_closing``. With the default value, this would look like ``'<<hello, world!>>'``. It is recommended that the group markers not appear anywhere in the source text where they aren't meant to act as such to prevent unexpected behavior. The exact regex for extracting nodes is defined by: :: expression = r'{0}(.+){1}|([^\w\s]+)\B|([\S]+\b)'.format( ''.join('\\' + c for c in group_marker_opening), ''.join('\\' + c for c in group_marker_closing) ) Args: source (str): the string to derive the graph from distance_weights (dict): dict of relative indices corresponding with word weights. For example, if a dict entry is ``1: 1000`` this means that every word is linked to the word which follows it with a weight of 1000. ``-4: 350`` would mean that every word is linked to the 4th word behind it with a weight of 350. A key of ``0`` refers to the weight words get pointing to themselves. Keys pointing beyond the edge of the word list will wrap around the list. The default value for ``distance_weights`` is ``{1: 1}``. This means that each word gets equal weight to whatever word follows it. Consequently, if this default value is used and ``merge_same_words`` is ``False``, the resulting graph behavior will simply move linearly through the source, wrapping at the end to the beginning. merge_same_words (bool): if nodes which have the same value should be merged or not. group_marker_opening (str): The string used to mark the beginning of word groups. group_marker_closing (str): The string used to mark the end of word groups. It is strongly recommended that this be different than ``group_marker_opening`` to prevent unexpected behavior with the regex pattern. Returns: Graph Example: >>> graph = Graph.from_string('i have nothing to say and ' ... 'i am saying it and that is poetry.') >>> ' '.join(graph.pick().value for i in range(8)) # doctest: +SKIP 'using chance algorithmic in algorithmic art easier blur' """ if distance_weights is None: distance_weights = {1: 1} # Convert distance_weights to a sorted list of tuples # To make output node list order more predictable sorted_weights_list = sorted(distance_weights.items(), key=lambda i: i[0]) # regex that matches: # * Anything surrounded by # group_marker_opening and group_marker_closing, # * Groups of punctuation marks followed by whitespace # * Any continuous group of non-whitespace characters # followed by whitespace expression = r'{0}(.+){1}|([^\w\s]+)\B|([\S]+\b)'.format( ''.join('\\' + c for c in group_marker_opening), ''.join('\\' + c for c in group_marker_closing) ) matches = re.findall(expression, source) # Un-tuple matches since we are only using groups to strip brackets # Is there a better way to do this? words = [next(t for t in match if t) for match in matches] if merge_same_words: # Ensure a 1:1 correspondence between words and nodes, # and that all links point to these nodes as well # Create nodes for every unique word temp_node_list = [] for word in words: if word not in (n.value for n in temp_node_list): temp_node_list.append(Node(word)) # Loop through words, attaching links to nodes which correspond # to the current word. Ensure links also point to valid # corresponding nodes in the node list. for i, word in enumerate(words): matching_node = next( (n for n in temp_node_list if n.value == word)) for key, weight in sorted_weights_list: # Wrap the index of edge items wrapped_index = (key + i) % len(words) target_word = words[wrapped_index] matching_target_node = next( (n for n in temp_node_list if n.value == target_word)) matching_node.add_link(matching_target_node, weight) else: # Create one node for every (not necessarily unique) word. temp_node_list = [Node(word) for word in words] for i, node in enumerate(temp_node_list): for key, weight in sorted_weights_list: # Wrap the index of edge items wrapped_index = (key + i) % len(temp_node_list) node.add_link(temp_node_list[wrapped_index], weight) graph = cls() graph.add_nodes(temp_node_list) return graph
python
{ "resource": "" }
q43627
Graph.from_file
train
def from_file(cls, source, distance_weights=None, merge_same_words=False, group_marker_opening='<<', group_marker_closing='>>'): """ Read a string from a file and derive a ``Graph`` from it. This is a convenience function for opening a file and passing its contents to ``Graph.from_string()`` (see that for more detail) Args: source (str): the file to read and derive the graph from distance_weights (dict): dict of relative indices corresponding with word weights. See ``Graph.from_string`` for more detail. merge_same_words (bool): whether nodes which have the same value should be merged or not. group_marker_opening (str): The string used to mark the beginning of word groups. group_marker_closing (str): The string used to mark the end of word groups. Returns: Graph Example: >>> graph = Graph.from_file('cage.txt') # doctest: +SKIP >>> ' '.join(graph.pick().value for i in range(8)) # doctest: +SKIP 'poetry i have nothing to say and i' """ source_string = open(source, 'r').read() return cls.from_string(source_string, distance_weights, merge_same_words, group_marker_opening=group_marker_opening, group_marker_closing=group_marker_closing)
python
{ "resource": "" }
q43628
ModelNotification.notify
train
def notify(self, force_notify=None, use_email=None, use_sms=None, **kwargs): """Overridden to only call `notify` if model matches. """ notified = False instance = kwargs.get("instance") if instance._meta.label_lower == self.model: notified = super().notify( force_notify=force_notify, use_email=use_email, use_sms=use_sms, **kwargs, ) return notified
python
{ "resource": "" }
q43629
ZooBorg.getList
train
def getList(self, listtype): ''' listtype must be a Zooborg constant ''' if listtype not in [ZooConst.CLIENT, ZooConst.WORKER, ZooConst.BROKER]: raise Exception('Zooborg.getList: invalid type') self.initconn() return self.zk.get_children('/distark/' + listtype + '/list')
python
{ "resource": "" }
q43630
get_object_or_none
train
def get_object_or_none(model, *args, **kwargs): """ Like get_object_or_404, but doesn't throw an exception. Allows querying for an object that might not exist without triggering an exception. """ try: return model._default_manager.get(*args, **kwargs) except model.DoesNotExist: return None
python
{ "resource": "" }
q43631
parse_response
train
async def parse_response(response: ClientResponse, schema: dict) -> Any: """ Validate and parse the BMA answer :param response: Response of aiohttp request :param schema: The expected response structure :return: the json data """ try: data = await response.json() response.close() if schema is not None: jsonschema.validate(data, schema) return data except (TypeError, json.decoder.JSONDecodeError) as e: raise jsonschema.ValidationError("Could not parse json : {0}".format(str(e)))
python
{ "resource": "" }
q43632
API.reverse_url
train
def reverse_url(self, scheme: str, path: str) -> str: """ Reverses the url using scheme and path given in parameter. :param scheme: Scheme of the url :param path: Path of the url :return: """ # remove starting slash in path if present path = path.lstrip('/') server, port = self.connection_handler.server, self.connection_handler.port if self.connection_handler.path: url = '{scheme}://{server}:{port}/{path}'.format(scheme=scheme, server=server, port=port, path=path) else: url = '{scheme}://{server}:{port}/'.format(scheme=scheme, server=server, port=port) return url + path
python
{ "resource": "" }
q43633
API.requests_get
train
async def requests_get(self, path: str, **kwargs) -> ClientResponse: """ Requests GET wrapper in order to use API parameters. :param path: the request path :return: """ logging.debug("Request : {0}".format(self.reverse_url(self.connection_handler.http_scheme, path))) url = self.reverse_url(self.connection_handler.http_scheme, path) response = await self.connection_handler.session.get(url, params=kwargs, headers=self.headers, proxy=self.connection_handler.proxy, timeout=15) if response.status != 200: try: error_data = parse_error(await response.text()) raise DuniterError(error_data) except (TypeError, jsonschema.ValidationError): raise ValueError('status code != 200 => %d (%s)' % (response.status, (await response.text()))) return response
python
{ "resource": "" }
q43634
API.requests_post
train
async def requests_post(self, path: str, **kwargs) -> ClientResponse: """ Requests POST wrapper in order to use API parameters. :param path: the request path :return: """ if 'self_' in kwargs: kwargs['self'] = kwargs.pop('self_') logging.debug("POST : {0}".format(kwargs)) response = await self.connection_handler.session.post( self.reverse_url(self.connection_handler.http_scheme, path), data=kwargs, headers=self.headers, proxy=self.connection_handler.proxy, timeout=15 ) return response
python
{ "resource": "" }
q43635
Client.post
train
async def post(self, url_path: str, params: dict = None, rtype: str = RESPONSE_JSON, schema: dict = None) -> Any: """ POST request on self.endpoint + url_path :param url_path: Url encoded path following the endpoint :param params: Url query string parameters dictionary :param rtype: Response type :param schema: Json Schema to validate response (optional, default None) :return: """ if params is None: params = dict() client = API(self.endpoint.conn_handler(self.session, self.proxy)) # get aiohttp response response = await client.requests_post(url_path, **params) # if schema supplied... if schema is not None: # validate response await parse_response(response, schema) # return the chosen type if rtype == RESPONSE_AIOHTTP: return response elif rtype == RESPONSE_TEXT: return await response.text() elif rtype == RESPONSE_JSON: return await response.json()
python
{ "resource": "" }
q43636
quote_value
train
def quote_value(value): """return the value ready to be used as a value in a SQL string. For example you can safely do this: cursor.execute('select * from table where key = %s' % quote_value(val)) and you don't have to worry about possible SQL injections. """ adapted = adapt(value) if hasattr(adapted, 'getquoted'): adapted = adapted.getquoted() return adapted
python
{ "resource": "" }
q43637
Analyze.load
train
def load(self,dset): '''load a dataset from given filename into the object''' self.dset_filename = dset self.dset = nib.load(dset) self.data = self.dset.get_data() self.header = self.dset.get_header()
python
{ "resource": "" }
q43638
Analyze.voxel_loop
train
def voxel_loop(self): '''iterator that loops through each voxel and yields the coords and time series as a tuple''' # Prob not the most efficient, but the best I can do for now: for x in xrange(len(self.data)): for y in xrange(len(self.data[x])): for z in xrange(len(self.data[x][y])): yield ((x,y,z),self.data[x][y][z])
python
{ "resource": "" }
q43639
Client.payment
train
def payment(self, amount, **kwargs): """Get payment URL and new transaction ID Usage:: >>> import sofort >>> client = sofort.Client('123456', '123456', '123456', abort_url='https://mysite.com/abort') >>> t = client.pay(12, success_url='http://mysite.com?paid') >>> t.transaction 123123-321231-56A3BE0E-ACAB >>> t.payment_url https://www.sofort.com/payment/go/136b2012718da216af4c20c2ec2f51100c90406e """ params = self.config.clone()\ .update({ 'amount': amount })\ .update(kwargs) mandatory = ['abort_url', 'reasons', 'success_url'] for field in mandatory: if not params.has(field): raise ValueError('Mandatory field "{}" is not specified'.format(field)) params.reasons = [sofort.internals.strip_reason(reason) for reason in params.reasons] return self._request(sofort.xml.multipay(params), params)
python
{ "resource": "" }
q43640
load_env
train
def load_env(print_vars=False): """Load environment variables from a .env file, if present. If an .env file is found in the working directory, and the listed environment variables are not already set, they will be set according to the values listed in the file. """ env_file = os.environ.get('ENV_FILE', '.env') try: variables = open(env_file).read().splitlines() for v in variables: if '=' in v: key, value = v.split('=', 1) if key.startswith('#'): continue if key not in os.environ: if value.startswith('"') and value.endswith('"') or \ value.startswith("'") and value.endswith("'"): os.environ[key] = ast.literal_eval(value) else: os.environ[key] = value if print_vars: print(key, os.environ[key]) except IOError: pass
python
{ "resource": "" }
q43641
get_config
train
def get_config(config_schema, env=None): """Parse config from the environment against a given schema Args: config_schema: A dictionary mapping keys in the environment to envpy Schema objects describing the expected value. env: An optional dictionary used to override the environment rather than getting it from the os. Returns: A dictionary which maps the values pulled from the environment and parsed against the given schema. Raises: MissingConfigError: A value in the schema with no default could not be found in the environment. ParsingError: A value was found in the environment but could not be parsed into the given value type. """ if env is None: env = os.environ return parser.parse_env( config_schema, env, )
python
{ "resource": "" }
q43642
get_line_matches
train
def get_line_matches(input_file: str, pattern: str, max_occurrencies: int = 0, loose_matching: bool = True) -> dict: r"""Get the line numbers of matched patterns. :parameter input_file: the file that needs to be read. :parameter pattern: the pattern that needs to be searched. :parameter max_occurrencies: the maximum number of expected occurrencies. Defaults to ``0`` which means that all occurrencies will be matched. :parameter loose_matching: ignore leading and trailing whitespace characters for both pattern and matched strings. Defaults to ``True``. :type input_file: str :type pattern: str :type max_occurrencies: int :type loose_matching: bool :returns: occurrency_matches, A dictionary where each key corresponds to the number of occurrencies and each value to the matched line number. If no match was found for that particular occurrency, the key is not set. This means means for example: if the first occurrency of pattern is at line y then: x[1] = y. :rtype: dict :raises: a built-in exception. .. note:: Line numbers start from ``1``. """ assert max_occurrencies >= 0 occurrency_counter = 0.0 occurrency_matches = dict() if max_occurrencies == 0: max_occurrencies = float('inf') if loose_matching: pattern = pattern.strip() line_counter = 1 with open(input_file, 'r') as f: line = f.readline() while line and occurrency_counter < max_occurrencies: if loose_matching: line = line.strip() if line == pattern: occurrency_counter += 1.0 occurrency_matches[int(occurrency_counter)] = line_counter line = f.readline() line_counter += 1 return occurrency_matches
python
{ "resource": "" }
q43643
insert_string_at_line
train
def insert_string_at_line(input_file: str, string_to_be_inserted: str, put_at_line_number: int, output_file: str, append: bool = True, newline_character: str = '\n'): r"""Write a string at the specified line. :parameter input_file: the file that needs to be read. :parameter string_to_be_inserted: the string that needs to be added. :parameter put_at_line_number: the line number on which to append the string. :parameter output_file: the file that needs to be written with the new content. :parameter append: decides whether to append or prepend the string at the selected line. Defaults to ``True``. :parameter newline_character: set the character used to fill the file in case line_number is greater than the number of lines of input_file. Defaults to ``\n``. :type input_file: str :type string_to_be_inserted: str :type line_number: int :type output_file: str :type append: bool :type newline_character: str :returns: None :raises: LineOutOfFileBoundsError or a built-in exception. .. note:: Line numbers start from ``1``. """ assert put_at_line_number >= 1 with open(input_file, 'r') as f: lines = f.readlines() line_counter = 1 i = 0 loop = True extra_lines_done = False line_number_after_eof = len(lines) + 1 with atomic_write(output_file, overwrite=True) as f: while loop: if put_at_line_number > len( lines) and line_counter == line_number_after_eof: # There are extra lines to write. line = str() else: line = lines[i] # It is ok if the position of line to be written is greater # than the last line number of the input file. We just need to add # the appropriate number of new line characters which will fill # the non existing lines of the output file. if put_at_line_number > len( lines) and line_counter == line_number_after_eof: for additional_newlines in range( 0, put_at_line_number - len(lines) - 1): # Skip the newline in the line where we need to insert # the new string. f.write(newline_character) line_counter += 1 i += 1 extra_lines_done = True if line_counter == put_at_line_number: # A very simple append operation: if the original line ends # with a '\n' character, the string will be added on the next # line... if append: line = line + string_to_be_inserted # ...otherwise the string is prepended. else: line = string_to_be_inserted + line f.write(line) line_counter += 1 i += 1 # Quit the loop if there is nothing more to write. if i >= len(lines): loop = False # Continue looping if there are still extra lines to write. if put_at_line_number > len(lines) and not extra_lines_done: loop = True
python
{ "resource": "" }
q43644
remove_line_interval
train
def remove_line_interval(input_file: str, delete_line_from: int, delete_line_to: int, output_file: str): r"""Remove a line interval. :parameter input_file: the file that needs to be read. :parameter delete_line_from: the line number from which start deleting. :parameter delete_line_to: the line number to which stop deleting. :parameter output_file: the file that needs to be written without the selected lines. :type input_file: str :type delete_line_from: int :type delete_line_to: int :type output_file: str :returns: None :raises: LineOutOfFileBoundsError or a built-in exception. .. note:: Line numbers start from ``1``. .. note:: It is possible to remove a single line only. This happens when the parameters delete_line_from and delete_line_to are equal. """ assert delete_line_from >= 1 assert delete_line_to >= 1 with open(input_file, 'r') as f: lines = f.readlines() # Invalid line ranges. # Base case delete_line_to - delete_line_from == 0: single line. if delete_line_to - delete_line_from < 0: raise NegativeLineRangeError if delete_line_from > len(lines) or delete_line_to > len(lines): raise LineOutOfFileBoundsError line_counter = 1 # Rewrite the file without the string. with atomic_write(output_file, overwrite=True) as f: for line in lines: # Ignore the line interval where the content to be deleted lies. if line_counter >= delete_line_from and line_counter <= delete_line_to: pass # Write the rest of the file. else: f.write(line) line_counter += 1
python
{ "resource": "" }
q43645
upload_dataset
train
def upload_dataset( dataset_name, file_path, task=None, dataset_attributes=None, **kwargs): """Uploads the given file to dataset store. Parameters ---------- dataset_name : str The name of the dataset to upload. file_path : str The full path to the file to upload task : str, optional The task for which the given dataset is used for. If not given, a path for the corresponding task-agnostic directory is used. dataset_attributes : dict, optional Additional attributes of the datasets. Used to generate additional sub-folders on the blob "path". For example, providing 'lang=en' will results in a path such as '/lang_en/mydataset.csv'. Hierarchy always matches lexicographical order of keyword argument names, so 'lang=en' and 'animal=dog' will result in a path such as 'task_name/animal_dof/lang_en/dset.csv'. **kwargs : extra keyword arguments Extra keyword arguments are forwarded to azure.storage.blob.BlockBlobService.create_blob_from_path. """ fname = ntpath.basename(file_path) blob_name = _blob_name( dataset_name=dataset_name, file_name=fname, task=task, dataset_attributes=dataset_attributes, ) print(blob_name) _blob_service().create_blob_from_path( container_name=BARN_CFG['azure']['container_name'], blob_name=blob_name, file_path=file_path, **kwargs, )
python
{ "resource": "" }
q43646
download_dataset
train
def download_dataset( dataset_name, file_path, task=None, dataset_attributes=None, **kwargs): """Downloads the given dataset from dataset store. Parameters ---------- dataset_name : str The name of the dataset to upload. file_path : str The full path to the file to upload task : str, optional The task for which the given dataset is used for. If not given, a path for the corresponding task-agnostic directory is used. dataset_attributes : dict, optional Additional attributes of the datasets. Used to generate additional sub-folders on the blob "path". For example, providing 'lang=en' will results in a path such as '/lang_en/mydataset.csv'. Hierarchy always matches lexicographical order of keyword argument names, so 'lang=en' and 'animal=dog' will result in a path such as 'task_name/animal_dof/lang_en/dset.csv'. **kwargs : extra keyword arguments Extra keyword arguments are forwarded to azure.storage.blob.BlockBlobService.get_blob_to_path. """ fname = ntpath.basename(file_path) blob_name = _blob_name( dataset_name=dataset_name, file_name=fname, task=task, dataset_attributes=dataset_attributes, ) # print("Downloading blob: {}".format(blob_name)) try: _blob_service().get_blob_to_path( container_name=BARN_CFG['azure']['container_name'], blob_name=blob_name, file_path=file_path, **kwargs, ) except Exception as e: if os.path.isfile(file_path): os.remove(file_path) raise MissingDatasetError( "With blob {}.".format(blob_name)) from e
python
{ "resource": "" }
q43647
send_work
train
def send_work(baseurl, work_id=None, filename=None, command="make"): """Ask user for a file to send to a work""" while 1: if not work_id: try: work_id = input("id? ") except KeyboardInterrupt: exit(0) work = get_work(work_id) if not work: print("id '{0}' not found".format(work_id)) work_id = None continue if not work.is_open: # Verify it is open print('"It\'s too late for {0} baby..." (Arnold Schwarzenegger)'.format(work.title)) work_id = None continue if not filename: try: filename = input("filename? ") except KeyboardInterrupt: exit(0) while 1: try: if command: if not archive_compile(filename, command): print("Compilation failed") try: send = input("Send anyway [y/N] ") except KeyboardInterrupt: exit(0) if send != "y": exit(1) return work.upload(baseurl, filename) print("Uplodaed, but should verify it on the website") return except FileNotFoundError: print("{0} not found in current dir".format(filename)) filename = None
python
{ "resource": "" }
q43648
activate
train
def activate(): """Install the path-based import components.""" global PathFinder, FileFinder, ff_path_hook path_hook_index = len(sys.path_hooks) sys.path_hooks.append(ff_path_hook) # Resetting sys.path_importer_cache values, # to support the case where we have an implicit package inside an already loaded package, # since we need to replace the default importer. sys.path_importer_cache.clear() # Setting up the meta_path to change package finding logic pathfinder_index = len(sys.meta_path) sys.meta_path.append(PathFinder) return path_hook_index, pathfinder_index
python
{ "resource": "" }
q43649
BroadcastMessageBuilder.send
train
def send(self): """Sends the broadcast message. :returns: tuple of (:class:`adnpy.models.Message`, :class:`adnpy.models.APIMeta`) """ parse_links = self.parse_links or self.parse_markdown_links message = { 'annotations': [], 'entities': { 'parse_links': parse_links, 'parse_markdown_links': self.parse_markdown_links, } } if self.photo: photo, photo_meta = _upload_file(self.api, self.photo) message['annotations'].append({ 'type': 'net.app.core.oembed', 'value': { '+net.app.core.file': { 'file_id': photo.id, 'file_token': photo.file_token, 'format': 'oembed', } } }) if self.attachment: attachment, attachment_meta = _upload_file(self.api, self.attachment) message['annotations'].append({ 'type': 'net.app.core.attachments', 'value': { '+net.app.core.file_list': [ { 'file_id': attachment.id, 'file_token': attachment.file_token, 'format': 'metadata', } ] } }) if self.text: message['text'] = self.text else: message['machine_only'] = True if self.headline: message['annotations'].append({ 'type': 'net.app.core.broadcast.message.metadata', 'value': { 'subject': self.headline, }, }) if self.read_more_link: message['annotations'].append({ 'type': 'net.app.core.crosspost', 'value': { 'canonical_url': self.read_more_link, } }) return self.api.create_message(self.channel_id, data=message)
python
{ "resource": "" }
q43650
archive_compile
train
def archive_compile(filename, command="make"): """ Returns if the given archive properly compile. Extract it in a temporary directory, run the given command, and return True it's result is 0 """ if not tarfile.is_tarfile(filename): print("Cannot extract archive") return False if command == "": return True with tempfile.TemporaryDirectory(suffix="prof") as tmpdir: with tarfile.open(filename) as tararchive: tararchive.extractall(tmpdir) cwd = os.getcwd() # get current directory try: os.chdir(tmpdir) print("Running {} in {} for file {}".format(command, tmpdir, filename)) make = os.system(command) if make == 0: print("Successfully compiled") return True finally: os.chdir(cwd) return False
python
{ "resource": "" }
q43651
Request.raw
train
def raw(self): """Make request to url and return the raw response object. """ try: return urlopen(str(self.url)) except HTTPError as error: try: # parse error body as json and use message property as error message parsed = self._parsejson(error) exc = RequestError(parsed['message']) exc.__cause__ = None raise exc except ValueError: # when error body is not valid json, error might be caused by server exc = StatbankError() exc.__cause__ = None raise exc
python
{ "resource": "" }
q43652
Request.csv
train
def csv(self): """Parse raw response as csv and return row object list. """ lines = self._parsecsv(self.raw) # set keys from header line (first line) keys = next(lines) for line in lines: yield dict(zip(keys, line))
python
{ "resource": "" }
q43653
Request._parsecsv
train
def _parsecsv(x): """Deserialize file-like object containing csv to a Python generator. """ for line in x: # decode as utf-8, whitespace-strip and split on delimiter yield line.decode('utf-8').strip().split(config.DELIMITER)
python
{ "resource": "" }
q43654
construct_exc_class
train
def construct_exc_class(cls): """Constructs proxy class for the exception.""" class ProxyException(cls, BaseException): __pep3134__ = True @property def __traceback__(self): if self.__fixed_traceback__: return self.__fixed_traceback__ current_exc, current_tb = sys.exc_info()[1:] if current_exc is self: return current_tb def __init__(self, instance=None): # pylint: disable=W0231 self.__original_exception__ = instance self.__fixed_traceback__ = None def __getattr__(self, item): return getattr(self.__original_exception__, item) def __repr__(self): return repr(self.__original_exception__) def __str__(self): return str(self.__original_exception__) def with_traceback(self, traceback): instance = copy.copy(self) instance.__fixed_traceback__ = traceback return instance ProxyException.__name__ = cls.__name__ return ProxyException
python
{ "resource": "" }
q43655
BaseBuild.from_url
train
def from_url(cls, url, **kwargs): """ Downloads a zipped app source code from an url. :param url: url to download the app source from Returns A project instance. """ username = kwargs.get('username') password = kwargs.get('password') headers = kwargs.get('headers', {}) auth = None path = kwargs.get('path', '/tmp/app.zip') dest = kwargs.get('dest', '/app') if username and password: auth = base64.b64encode(b'%s:%s' % (username, password)) if auth: headers['Authorization'] = 'Basic %s' % auth.decode('utf8') r = request.get(url, headers=headers, stream=True) if r.status_code != 200: err_msg = 'Could not download resource from url (%s): %s' err_args = (r.status_code, url) raise errors.DownloadError(err_msg % err_args) with open('/tmp/app.zip', 'wb+') as f: chunks = r.iter_content(chunk_size=1024) [f.write(chunk) for chunk in chunks if chunk] return cls.from_zip(path, dest)
python
{ "resource": "" }
q43656
BaseBuild.from_path
train
def from_path(cls, path): """ Instantiates a project class from a given path. :param path: app folder path source code Returns A project instance. """ if os.path.exists(path) is False: raise errors.InvalidPathError(path) return cls(path=path)
python
{ "resource": "" }
q43657
BaseBuild.from_zip
train
def from_zip(cls, src='/tmp/app.zip', dest='/app'): """ Unzips a zipped app project file and instantiates it. :param src: zipfile path :param dest: destination folder to extract the zipfile content Returns A project instance. """ try: zf = zipfile.ZipFile(src, 'r') except FileNotFoundError: raise errors.InvalidPathError(src) except zipfile.BadZipFile: raise errors.InvalidZipFileError(src) [zf.extract(file, dest) for file in zf.namelist()] zf.close() return cls.from_path(dest)
python
{ "resource": "" }
q43658
BaseBuild.inspect
train
def inspect(self, tab_width=2, ident_char='-'): """ Inspects a project file structure based based on the instance folder property. :param tab_width: width size for subfolders and files. :param ident_char: char to be used to show identation level Returns A string containing the project structure. """ startpath = self.path output = [] for (root, dirs, files) in os.walk(startpath): level = root.replace(startpath, '').count(os.sep) indent = ident_char * tab_width * (level) if level == 0: output.append('{}{}/'.format(indent, os.path.basename(root))) else: output.append('|{}{}/'.format(indent, os.path.basename(root))) subindent = ident_char * tab_width * (level + 1) [output.append('|{}{}'.format(subindent, f)) for f in files] return '\n'.join(output)
python
{ "resource": "" }
q43659
BaseBuild.log
train
def log(self, ctx='all'): """ Gets the build log output. :param ctx: specifies which log message to show, it can be 'validate', 'build' or 'all'. """ path = '%s/%s.log' % (self.path, ctx) if os.path.exists(path) is True: with open(path, 'r') as f: print(f.read()) return validate_path = '%s/validate.log' % self.path build_path = '%s/build.log' % self.path out = [] with open(validate_path) as validate_log, open(build_path) as build_log: for line in validate_log.readlines(): out.append(line) for line in build_log.readlines(): out.append(line) print(''.join(out))
python
{ "resource": "" }
q43660
second_order_diff
train
def second_order_diff(arr, x): """Compute second order difference of an array. A 2nd order forward difference is used for the first point, 2nd order central difference for interior, and 2nd order backward difference for last point, returning an array the same length as the input array. """ # Convert to array, so this will work with pandas Series arr = np.array(arr) # Calculate dx for forward diff point dxf = (x[2] - x[0])/2 # Calculate dx for backward diff point dxb = (x[-1] - x[-3])/2 # Calculate dx array for central difference dx = (x[2:] - x[:-2])/2 # For first data point, use 2nd order forward difference first = (-3*arr[0] + 4*arr[1] - arr[2])/(2*dxf) # For last point, use 2nd order backward difference last = (3*arr[-1] - 4*arr[-2] + arr[-3])/(2*dxb) # For all interior points, use 2nd order central difference interior = (arr[2:] - arr[:-2])/(2*dx) # Create entire array darr = np.concatenate(([first], interior, [last])) return darr
python
{ "resource": "" }
q43661
_process_json
train
def _process_json(response_body): """ Returns a UwPassword objects """ data = json.loads(response_body) uwpassword = UwPassword(uwnetid=data["uwNetID"], kerb_status=data["kerbStatus"], interval=None, last_change=None, last_change_med=None, expires_med=None, interval_med=None, minimum_length=int(data["minimumLength"]), time_stamp=parse(data["timeStamp"]),) if "lastChange" in data: uwpassword.last_change = parse(data["lastChange"]) if "interval" in data: uwpassword.interval = timeparse(data["interval"]) if "lastChangeMed" in data: uwpassword.last_change_med = parse(data["lastChangeMed"]) if "expiresMed" in data: uwpassword.expires_med = parse(data["expiresMed"]) if "intervalMed" in data: uwpassword.interval_med = timeparse(data["intervalMed"]) if "netidStatus" in data: netid_status = [] for status in data["netidStatus"]: netid_status.append(status) uwpassword.netid_status = netid_status return uwpassword
python
{ "resource": "" }
q43662
create_next_tag
train
def create_next_tag(): """ creates a tag based on the date and previous tags """ date = datetime.utcnow() date_tag = '{}.{}.{}'.format(date.year, date.month, date.day) if date_tag in latest_tag(): # if there was an update already today latest = latest_tag().split('.') # split by spaces if len(latest) == 4: # if it is not the first revision of the day latest[-1]= str(int(latest[-1])+1) else: # if it is the first revision of the day latest+=['1'] date_tag = '.'.join(latest) return date_tag
python
{ "resource": "" }
q43663
sync_readmes
train
def sync_readmes(): """ just copies README.md into README for pypi documentation """ print("syncing README") with open("README.md", 'r') as reader: file_text = reader.read() with open("README", 'w') as writer: writer.write(file_text)
python
{ "resource": "" }
q43664
Number.similarity
train
def similarity(self, other): """Get similarity as a ratio of the two numbers.""" numerator, denominator = sorted((self.value, other.value)) try: ratio = float(numerator) / denominator except ZeroDivisionError: ratio = 0.0 if numerator else 1.0 similarity = self.Similarity(ratio) return similarity
python
{ "resource": "" }
q43665
Text.similarity
train
def similarity(self, other): """Get similarity as a ratio of the two texts.""" ratio = SequenceMatcher(a=self.value, b=other.value).ratio() similarity = self.Similarity(ratio) return similarity
python
{ "resource": "" }
q43666
TextTitle.similarity
train
def similarity(self, other): """Get similarity as a ratio of the stripped text.""" logging.debug("comparing %r and %r...", self.stripped, other.stripped) ratio = SequenceMatcher(a=self.stripped, b=other.stripped).ratio() similarity = self.Similarity(ratio) return similarity
python
{ "resource": "" }
q43667
skull_strip
train
def skull_strip(dset,suffix='_ns',prefix=None,unifize=True): ''' use bet to strip skull from given anatomy ''' # should add options to use betsurf and T1/T2 in the future # Since BET fails on weirdly distributed datasets, I added 3dUnifize in... I realize this makes this dependent on AFNI. Sorry, :) if prefix==None: prefix = nl.suffix(dset,suffix) unifize_dset = nl.suffix(dset,'_u') cmd = bet2 if bet2 else 'bet2' if unifize: info = nl.dset_info(dset) if info==None: nl.notify('Error: could not read info for dset %s' % dset,level=nl.level.error) return False cmd = os.path.join(fsl_dir,cmd) if fsl_dir else cmd cutoff_value = nl.max(dset) * 0.05 nl.run(['3dUnifize','-prefix',unifize_dset,nl.calc(dset,'step(a-%f)*a' % cutoff_value)],products=unifize_dset) else: unifize_dset = dset nl.run([cmd,unifize_dset,prefix,'-w',0.5],products=prefix)
python
{ "resource": "" }
q43668
query_sum
train
def query_sum(queryset, field): """ Let the DBMS perform a sum on a queryset """ return queryset.aggregate(s=models.functions.Coalesce(models.Sum(field), 0))['s']
python
{ "resource": "" }
q43669
get_env
train
def get_env(env_file='.env'): """ Set default environment variables from .env file """ try: with open(env_file) as f: for line in f.readlines(): try: key, val = line.split('=', maxsplit=1) os.environ.setdefault(key.strip(), val.strip()) except ValueError: pass except FileNotFoundError: pass
python
{ "resource": "" }
q43670
to_dict_formatter
train
def to_dict_formatter(row, cursor): """ Take a row and use the column names from cursor to turn the row into a dictionary. Note: converts column names to lower-case! :param row: one database row, sequence of column values :type row: (value, ...) :param cursor: the cursor which was used to make the query :type cursor: DB-API cursor object """ # Empty row? Return. if not row: return row # No cursor? Raise runtime error. if cursor is None or cursor.description is None: raise RuntimeError("No DB-API cursor or description available.") # Give each value the appropriate column name within in the resulting # dictionary. column_names = (d[0] for d in cursor.description) # 0 is the name return {name: value for value, name in zip(row, column_names)}
python
{ "resource": "" }
q43671
Query.show
train
def show(self, *args, **kwds): """ Show how the SQL looks like when executed by the DB. This might not be supported by all connection types. For example: PostgreSQL does support it, SQLite does not. :rtype: str """ # Same as in __call__, arguments win over keywords arg = args if not arg: arg = kwds # pylint: disable=redefined-variable-type return self._db.show(self._sql, arg)
python
{ "resource": "" }
q43672
Select._produce_return
train
def _produce_return(self, cursor): """ Get the rows from the cursor and apply the row formatter. :return: sequence of rows, or a generator if a row formatter has to be applied """ results = cursor.fetchall() # Format rows within a generator? if self._row_formatter is not None: return (self._row_formatter(r, cursor) for r in results) return results
python
{ "resource": "" }
q43673
SelectOne._produce_return
train
def _produce_return(self, cursor): """ Return the one result. """ results = cursor.fetchmany(2) if len(results) != 1: return None # Return the one row, or the one column. row = results[0] if self._row_formatter is not None: row = self._row_formatter(row, cursor) elif len(row) == 1: row = row[0] return row
python
{ "resource": "" }
q43674
SelectIterator._row_generator
train
def _row_generator(self, cursor): """ Yields individual rows until no more rows exist in query result. Applies row formatter if such exists. """ rowset = cursor.fetchmany(self._arraysize) while rowset: if self._row_formatter is not None: rowset = (self._row_formatter(r, cursor) for r in rowset) for row in rowset: yield row rowset = cursor.fetchmany(self._arraysize)
python
{ "resource": "" }
q43675
Manipulation._produce_return
train
def _produce_return(self, cursor): """ Return the rowcount property from the used cursor. Checks the count first, if a count was given. :raise ManipulationCheckError: if a row count was set but does not match """ rowcount = cursor.rowcount # Check the row count? if self._rowcount is not None and self._rowcount != rowcount: raise ManipulationCheckError( "Count was {}, expected {}.".format(rowcount, self._rowcount)) return rowcount
python
{ "resource": "" }
q43676
LinterRunner.get_results
train
def get_results(self): """Run the linter, parse, and return result list. If a linter specified by the user is not found, return an error message as result. """ try: stdout, stderr = self._lint() # Can't return a generator from a subprocess return list(stdout), stderr or [] except FileNotFoundError as exception: # Error if the linter was not found but was chosen by the user if self._linter.name in self.config.user_linters: error_msg = 'Could not find {}. Did you install it? ' \ 'Got exception: {}'.format(self._linter.name, exception) return [[], [error_msg]] # If the linter was not chosen by the user, do nothing return [[], []]
python
{ "resource": "" }
q43677
LinterRunner._get_command
train
def _get_command(self): """Return command with options and targets, ready for execution.""" targets = ' '.join(self.targets) cmd_str = self._linter.command_with_options + ' ' + targets cmd_shlex = shlex.split(cmd_str) return list(cmd_shlex)
python
{ "resource": "" }
q43678
LinterRunner._lint
train
def _lint(self): """Run linter in a subprocess.""" command = self._get_command() process = subprocess.run(command, stdout=subprocess.PIPE, # nosec stderr=subprocess.PIPE) LOG.info('Finished %s', ' '.join(command)) stdout, stderr = self._get_output_lines(process) return self._linter.parse(stdout), self._parse_stderr(stderr)
python
{ "resource": "" }
q43679
Main.lint
train
def lint(self, targets): """Run linters in parallel and sort all results. Args: targets (list): List of files and folders to lint. """ LinterRunner.targets = targets linters = self._config.get_linter_classes() with Pool() as pool: out_err_none = pool.map(LinterRunner.run, linters) out_err = [item for item in out_err_none if item is not None] stdout, stderr = zip(*out_err) return sorted(chain.from_iterable(stdout)), chain.from_iterable(stderr)
python
{ "resource": "" }
q43680
Main.run_from_cli
train
def run_from_cli(self, args): """Read arguments, run and print results. Args: args (dict): Arguments parsed by docopt. """ if args['--dump-config']: self._config.print_config() else: stdout, stderr = self.lint(args['<path>']) self.print_results(stdout, stderr)
python
{ "resource": "" }
q43681
Main.print_results
train
def print_results(cls, stdout, stderr): """Print linter results and exits with an error if there's any.""" for line in stderr: print(line, file=sys.stderr) if stdout: if stderr: # blank line to separate stdout from stderr print(file=sys.stderr) cls._print_stdout(stdout) else: print(':) No issues found.')
python
{ "resource": "" }
q43682
notification_on_post_create_historical_record
train
def notification_on_post_create_historical_record( instance, history_date, history_user, history_change_reason, **kwargs ): """Checks and processes any notifications for this model. Processes if `label_lower` is in site_notifications.models. Note, this is the post_create of the historical model. """ if ( site_notifications.loaded and instance._meta.label_lower in site_notifications.models ): opts = dict( instance=instance, user=instance.user_modified or instance.user_created, history_date=history_date, history_user=history_user, history_change_reason=history_change_reason, fail_silently=True, **kwargs ) site_notifications.notify(**opts)
python
{ "resource": "" }
q43683
manage_mailists_on_userprofile_m2m_changed
train
def manage_mailists_on_userprofile_m2m_changed( action, instance, pk_set, sender, **kwargs ): """Updates the mail server mailing lists based on the selections in the UserProfile model. """ try: instance.email_notifications except AttributeError: pass else: if action == "post_remove": update_mailing_lists_in_m2m( sender=sender, userprofile=instance, unsubscribe=True, pk_set=pk_set, verbose=True, ) elif action == "post_add": update_mailing_lists_in_m2m( sender=sender, userprofile=instance, subscribe=True, pk_set=pk_set, verbose=True, )
python
{ "resource": "" }
q43684
parse
train
def parse(file_contents, file_name): ''' Takes a list of files which are assumed to be jinja2 templates and tries to parse the contents of the files Args: file_contents (str): File contents of a jinja file Raises: Exception: An exception is raised if the contents of the file cannot be parsed. ''' env = Environment() result = "" try: env.parse(file_contents) except Exception: _, exc_value, _ = sys.exc_info() result += "ERROR: Jinja2 Template File: {0}".format(file_name) result += repr(exc_value) + '\n' return result
python
{ "resource": "" }
q43685
BaseHttpStreamReader.read_until
train
async def read_until( self, separator: bytes=b"\n", *, keep_separator: bool=True) -> bytes: """ Read until the separator has been found. When the max size of the buffer has been reached, and the separator is not found, this method will raise a :class:`MaxBufferLengthReachedError`. Similarly, if the end has been reached before found the separator it will raise a :class:`SeparatorNotFoundError`. When :method:`.finished()` is `True`, this method will raise any errors occurred during the read or a :class:`ReadFinishedError`. """ async with self._read_lock: self._raise_exc_if_finished() start_pos = 0 while True: separator_pos = self._buf.find(separator, start_pos) if separator_pos != -1: break if len(self) > self.max_buf_len: raise MaxBufferLengthReachedError try: await self._wait_for_data() except asyncio.CancelledError: # pragma: no cover raise except Exception as e: if len(self) > 0: raise SeparatorNotFoundError from e else: raise new_start_pos = len(self) - len(separator) if new_start_pos > 0: start_pos = new_start_pos full_pos = separator_pos + len(separator) if keep_separator: data_pos = full_pos else: data_pos = separator_pos data = bytes(self._buf[0:data_pos]) del self._buf[0:full_pos] return data
python
{ "resource": "" }
q43686
HttpRequestReader.write_response
train
def write_response( self, status_code: Union[int, constants.HttpStatusCode], *, headers: Optional[_HeaderType]=None ) -> "writers.HttpResponseWriter": """ Write a response to the client. """ self._writer = self.__delegate.write_response( constants.HttpStatusCode(status_code), headers=headers) return self._writer
python
{ "resource": "" }
q43687
createDbusProxyObject
train
def createDbusProxyObject(bus_name, object_path, bus=None): ''' Create dbus proxy object ''' bus = bus or dbus.SessionBus.get_session() return bus.get_object(bus_name, object_path)
python
{ "resource": "" }
q43688
translate
train
def translate(text, target_lang='en', source_lang=None): """ Use the Google v2 API to translate the text. You had better have set the API key on this function before calling it. """ url_base = 'https://www.googleapis.com/language/translate/v2' params = dict( key=translate.API_key, q=text, target=target_lang, ) if source_lang: params['source'] = source_lang resp = requests.get(url_base, params=params) resp.raise_for_status() return resp.json()['data']['translations'][0]['translatedText']
python
{ "resource": "" }
q43689
SigningKey.from_credentials
train
def from_credentials(cls: Type[SigningKeyType], salt: Union[str, bytes], password: Union[str, bytes], scrypt_params: Optional[ScryptParams] = None) -> SigningKeyType: """ Create a SigningKey object from credentials :param salt: Secret salt passphrase credential :param password: Secret password credential :param scrypt_params: ScryptParams instance """ if scrypt_params is None: scrypt_params = ScryptParams() salt = ensure_bytes(salt) password = ensure_bytes(password) seed = scrypt(password, salt, scrypt_params.N, scrypt_params.r, scrypt_params.p, scrypt_params.seed_length) return cls(seed)
python
{ "resource": "" }
q43690
SigningKey.save_seedhex_file
train
def save_seedhex_file(self, path: str) -> None: """ Save hexadecimal seed file from seed :param path: Authentication file path """ seedhex = convert_seed_to_seedhex(self.seed) with open(path, 'w') as fh: fh.write(seedhex)
python
{ "resource": "" }
q43691
SigningKey.from_seedhex_file
train
def from_seedhex_file(path: str) -> SigningKeyType: """ Return SigningKey instance from Seedhex file :param str path: Hexadecimal seed file path """ with open(path, 'r') as fh: seedhex = fh.read() return SigningKey.from_seedhex(seedhex)
python
{ "resource": "" }
q43692
SigningKey.from_seedhex
train
def from_seedhex(cls: Type[SigningKeyType], seedhex: str) -> SigningKeyType: """ Return SigningKey instance from Seedhex :param str seedhex: Hexadecimal seed string """ regex_seedhex = compile("([0-9a-fA-F]{64})") match = search(regex_seedhex, seedhex) if not match: raise Exception('Error: Bad seed hexadecimal format') seedhex = match.groups()[0] seed = convert_seedhex_to_seed(seedhex) return cls(seed)
python
{ "resource": "" }
q43693
SigningKey.from_private_key
train
def from_private_key(path: str) -> SigningKeyType: """ Read authentication file Add public key attribute :param path: Authentication file path """ key = load_key(path) key.pubkey = Base58Encoder.encode(key.vk) return key
python
{ "resource": "" }
q43694
SigningKey.decrypt_seal
train
def decrypt_seal(self, data: bytes) -> bytes: """ Decrypt bytes data with a curve25519 version of the ed25519 key pair :param data: Encrypted data :return: """ curve25519_public_key = libnacl.crypto_sign_ed25519_pk_to_curve25519(self.vk) curve25519_secret_key = libnacl.crypto_sign_ed25519_sk_to_curve25519(self.sk) return libnacl.crypto_box_seal_open(data, curve25519_public_key, curve25519_secret_key)
python
{ "resource": "" }
q43695
SigningKey.from_wif_or_ewif_file
train
def from_wif_or_ewif_file(path: str, password: Optional[str] = None) -> SigningKeyType: """ Return SigningKey instance from Duniter WIF or EWIF file :param path: Path to WIF of EWIF file :param password: Password needed for EWIF file """ with open(path, 'r') as fh: wif_content = fh.read() # check data field regex = compile('Data: ([1-9A-HJ-NP-Za-km-z]+)', MULTILINE) match = search(regex, wif_content) if not match: raise Exception('Error: Bad format WIF or EWIF v1 file') # capture hexa wif key wif_hex = match.groups()[0] return SigningKey.from_wif_or_ewif_hex(wif_hex, password)
python
{ "resource": "" }
q43696
SigningKey.from_wif_or_ewif_hex
train
def from_wif_or_ewif_hex(wif_hex: str, password: Optional[str] = None) -> SigningKeyType: """ Return SigningKey instance from Duniter WIF or EWIF in hexadecimal format :param wif_hex: WIF or EWIF string in hexadecimal format :param password: Password of EWIF encrypted seed """ wif_bytes = Base58Encoder.decode(wif_hex) fi = wif_bytes[0:1] if fi == b"\x01": return SigningKey.from_wif_hex(wif_hex) elif fi == b"\x02" and password is not None: return SigningKey.from_ewif_hex(wif_hex, password) else: raise Exception("Error: Bad format: not WIF nor EWIF")
python
{ "resource": "" }
q43697
SigningKey.from_wif_hex
train
def from_wif_hex(cls: Type[SigningKeyType], wif_hex: str) -> SigningKeyType: """ Return SigningKey instance from Duniter WIF in hexadecimal format :param wif_hex: WIF string in hexadecimal format """ wif_bytes = Base58Encoder.decode(wif_hex) if len(wif_bytes) != 35: raise Exception("Error: the size of WIF is invalid") # extract data checksum_from_wif = wif_bytes[-2:] fi = wif_bytes[0:1] seed = wif_bytes[1:-2] seed_fi = wif_bytes[0:-2] # check WIF format flag if fi != b"\x01": raise Exception("Error: bad format version, not WIF") # checksum control checksum = libnacl.crypto_hash_sha256(libnacl.crypto_hash_sha256(seed_fi))[0:2] if checksum_from_wif != checksum: raise Exception("Error: bad checksum of the WIF") return cls(seed)
python
{ "resource": "" }
q43698
SigningKey.from_ewif_file
train
def from_ewif_file(path: str, password: str) -> SigningKeyType: """ Return SigningKey instance from Duniter EWIF file :param path: Path to EWIF file :param password: Password of the encrypted seed """ with open(path, 'r') as fh: wif_content = fh.read() # check data field regex = compile('Data: ([1-9A-HJ-NP-Za-km-z]+)', MULTILINE) match = search(regex, wif_content) if not match: raise Exception('Error: Bad format EWIF v1 file') # capture ewif key ewif_hex = match.groups()[0] return SigningKey.from_ewif_hex(ewif_hex, password)
python
{ "resource": "" }
q43699
SigningKey.from_ewif_hex
train
def from_ewif_hex(cls: Type[SigningKeyType], ewif_hex: str, password: str) -> SigningKeyType: """ Return SigningKey instance from Duniter EWIF in hexadecimal format :param ewif_hex: EWIF string in hexadecimal format :param password: Password of the encrypted seed """ ewif_bytes = Base58Encoder.decode(ewif_hex) if len(ewif_bytes) != 39: raise Exception("Error: the size of EWIF is invalid") # extract data fi = ewif_bytes[0:1] checksum_from_ewif = ewif_bytes[-2:] ewif_no_checksum = ewif_bytes[0:-2] salt = ewif_bytes[1:5] encryptedhalf1 = ewif_bytes[5:21] encryptedhalf2 = ewif_bytes[21:37] # check format flag if fi != b"\x02": raise Exception("Error: bad format version, not EWIF") # checksum control checksum = libnacl.crypto_hash_sha256(libnacl.crypto_hash_sha256(ewif_no_checksum))[0:2] if checksum_from_ewif != checksum: raise Exception("Error: bad checksum of the EWIF") # SCRYPT password_bytes = password.encode("utf-8") scrypt_seed = scrypt(password_bytes, salt, 16384, 8, 8, 64) derivedhalf1 = scrypt_seed[0:32] derivedhalf2 = scrypt_seed[32:64] # AES aes = pyaes.AESModeOfOperationECB(derivedhalf2) decryptedhalf1 = aes.decrypt(encryptedhalf1) decryptedhalf2 = aes.decrypt(encryptedhalf2) # XOR seed1 = xor_bytes(decryptedhalf1, derivedhalf1[0:16]) seed2 = xor_bytes(decryptedhalf2, derivedhalf1[16:32]) seed = bytes(seed1 + seed2) # Password Control signer = SigningKey(seed) salt_from_seed = libnacl.crypto_hash_sha256( libnacl.crypto_hash_sha256( Base58Encoder.decode(signer.pubkey)))[0:4] if salt_from_seed != salt: raise Exception("Error: bad Password of EWIF address") return cls(seed)
python
{ "resource": "" }