id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
240,900
dillonhicks/rekt
rekt/utils.py
load_config
def load_config(path): """ Loads a yaml configuration. :param path: a pathlib Path object pointing to the configuration """ with path.open('rb') as fi: file_bytes = fi.read() config = yaml.load(file_bytes.decode('utf-8')) return config
python
def load_config(path): """ Loads a yaml configuration. :param path: a pathlib Path object pointing to the configuration """ with path.open('rb') as fi: file_bytes = fi.read() config = yaml.load(file_bytes.decode('utf-8')) return config
[ "def", "load_config", "(", "path", ")", ":", "with", "path", ".", "open", "(", "'rb'", ")", "as", "fi", ":", "file_bytes", "=", "fi", ".", "read", "(", ")", "config", "=", "yaml", ".", "load", "(", "file_bytes", ".", "decode", "(", "'utf-8'", ")", ")", "return", "config" ]
Loads a yaml configuration. :param path: a pathlib Path object pointing to the configuration
[ "Loads", "a", "yaml", "configuration", "." ]
3848b272726c78214cb96b906f9b9f289497f27e
https://github.com/dillonhicks/rekt/blob/3848b272726c78214cb96b906f9b9f289497f27e/rekt/utils.py#L56-L66
240,901
EwilDawe/typy
typy/mouse.py
drag
def drag(*args, function = move): """ Drags the mouse along a specified path :param args: list of arguments passed to function :param function: path to traverse :return: None """ x, y = win32api.GetCursorPos() win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, x, y, 0, 0) function(*args) x, y = win32api.GetCursorPos() win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x, y, 0, 0)
python
def drag(*args, function = move): """ Drags the mouse along a specified path :param args: list of arguments passed to function :param function: path to traverse :return: None """ x, y = win32api.GetCursorPos() win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, x, y, 0, 0) function(*args) x, y = win32api.GetCursorPos() win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x, y, 0, 0)
[ "def", "drag", "(", "*", "args", ",", "function", "=", "move", ")", ":", "x", ",", "y", "=", "win32api", ".", "GetCursorPos", "(", ")", "win32api", ".", "mouse_event", "(", "win32con", ".", "MOUSEEVENTF_LEFTDOWN", ",", "x", ",", "y", ",", "0", ",", "0", ")", "function", "(", "*", "args", ")", "x", ",", "y", "=", "win32api", ".", "GetCursorPos", "(", ")", "win32api", ".", "mouse_event", "(", "win32con", ".", "MOUSEEVENTF_LEFTUP", ",", "x", ",", "y", ",", "0", ",", "0", ")" ]
Drags the mouse along a specified path :param args: list of arguments passed to function :param function: path to traverse :return: None
[ "Drags", "the", "mouse", "along", "a", "specified", "path" ]
0349e7176567a4dbef318e75d9b3d6868950a1a9
https://github.com/EwilDawe/typy/blob/0349e7176567a4dbef318e75d9b3d6868950a1a9/typy/mouse.py#L72-L85
240,902
mikerhodes/actionqueues
actionqueues/actionqueue.py
ActionQueue.add
def add(self, action): """Add an action to the execution queue.""" self._state_machine.transition_to_add() self._actions.append(action)
python
def add(self, action): """Add an action to the execution queue.""" self._state_machine.transition_to_add() self._actions.append(action)
[ "def", "add", "(", "self", ",", "action", ")", ":", "self", ".", "_state_machine", ".", "transition_to_add", "(", ")", "self", ".", "_actions", ".", "append", "(", "action", ")" ]
Add an action to the execution queue.
[ "Add", "an", "action", "to", "the", "execution", "queue", "." ]
a7a78ab116abe88af95b5315dc9f34d40ce81eb2
https://github.com/mikerhodes/actionqueues/blob/a7a78ab116abe88af95b5315dc9f34d40ce81eb2/actionqueues/actionqueue.py#L22-L25
240,903
mikerhodes/actionqueues
actionqueues/actionqueue.py
ActionQueue.execute
def execute(self): """Execute all actions, throwing an ExecutionException on failure. Catch the ExecutionException and call rollback() to rollback. """ self._state_machine.transition_to_execute() for action in self._actions: self._executed_actions.append(action) self.execute_with_retries(action, lambda a: a.execute()) self._state_machine.transition_to_execute_complete()
python
def execute(self): """Execute all actions, throwing an ExecutionException on failure. Catch the ExecutionException and call rollback() to rollback. """ self._state_machine.transition_to_execute() for action in self._actions: self._executed_actions.append(action) self.execute_with_retries(action, lambda a: a.execute()) self._state_machine.transition_to_execute_complete()
[ "def", "execute", "(", "self", ")", ":", "self", ".", "_state_machine", ".", "transition_to_execute", "(", ")", "for", "action", "in", "self", ".", "_actions", ":", "self", ".", "_executed_actions", ".", "append", "(", "action", ")", "self", ".", "execute_with_retries", "(", "action", ",", "lambda", "a", ":", "a", ".", "execute", "(", ")", ")", "self", ".", "_state_machine", ".", "transition_to_execute_complete", "(", ")" ]
Execute all actions, throwing an ExecutionException on failure. Catch the ExecutionException and call rollback() to rollback.
[ "Execute", "all", "actions", "throwing", "an", "ExecutionException", "on", "failure", "." ]
a7a78ab116abe88af95b5315dc9f34d40ce81eb2
https://github.com/mikerhodes/actionqueues/blob/a7a78ab116abe88af95b5315dc9f34d40ce81eb2/actionqueues/actionqueue.py#L27-L36
240,904
mikerhodes/actionqueues
actionqueues/actionqueue.py
ActionQueue.rollback
def rollback(self): """Call rollback on executed actions.""" self._state_machine.transition_to_rollback() for action in reversed(self._executed_actions): try: self.execute_with_retries(action, lambda a: a.rollback()) except: # pylint: disable=bare-except pass # on exception, carry on with rollback of other steps self._state_machine.transition_to_rollback_complete()
python
def rollback(self): """Call rollback on executed actions.""" self._state_machine.transition_to_rollback() for action in reversed(self._executed_actions): try: self.execute_with_retries(action, lambda a: a.rollback()) except: # pylint: disable=bare-except pass # on exception, carry on with rollback of other steps self._state_machine.transition_to_rollback_complete()
[ "def", "rollback", "(", "self", ")", ":", "self", ".", "_state_machine", ".", "transition_to_rollback", "(", ")", "for", "action", "in", "reversed", "(", "self", ".", "_executed_actions", ")", ":", "try", ":", "self", ".", "execute_with_retries", "(", "action", ",", "lambda", "a", ":", "a", ".", "rollback", "(", ")", ")", "except", ":", "# pylint: disable=bare-except", "pass", "# on exception, carry on with rollback of other steps", "self", ".", "_state_machine", ".", "transition_to_rollback_complete", "(", ")" ]
Call rollback on executed actions.
[ "Call", "rollback", "on", "executed", "actions", "." ]
a7a78ab116abe88af95b5315dc9f34d40ce81eb2
https://github.com/mikerhodes/actionqueues/blob/a7a78ab116abe88af95b5315dc9f34d40ce81eb2/actionqueues/actionqueue.py#L38-L46
240,905
mikerhodes/actionqueues
actionqueues/actionqueue.py
ActionQueue.execute_with_retries
def execute_with_retries(self, action, f): """Execute function f with single argument action. Retry if ActionRetryException is raised. """ # Run action until either it succeeds or throws an exception # that's not an ActionRetryException retry = True while retry: retry = False try: f(action) except ActionRetryException as ex: # other exceptions should bubble out retry = True time.sleep(ex.ms_backoff / 1000.0)
python
def execute_with_retries(self, action, f): """Execute function f with single argument action. Retry if ActionRetryException is raised. """ # Run action until either it succeeds or throws an exception # that's not an ActionRetryException retry = True while retry: retry = False try: f(action) except ActionRetryException as ex: # other exceptions should bubble out retry = True time.sleep(ex.ms_backoff / 1000.0)
[ "def", "execute_with_retries", "(", "self", ",", "action", ",", "f", ")", ":", "# Run action until either it succeeds or throws an exception", "# that's not an ActionRetryException", "retry", "=", "True", "while", "retry", ":", "retry", "=", "False", "try", ":", "f", "(", "action", ")", "except", "ActionRetryException", "as", "ex", ":", "# other exceptions should bubble out", "retry", "=", "True", "time", ".", "sleep", "(", "ex", ".", "ms_backoff", "/", "1000.0", ")" ]
Execute function f with single argument action. Retry if ActionRetryException is raised.
[ "Execute", "function", "f", "with", "single", "argument", "action", ".", "Retry", "if", "ActionRetryException", "is", "raised", "." ]
a7a78ab116abe88af95b5315dc9f34d40ce81eb2
https://github.com/mikerhodes/actionqueues/blob/a7a78ab116abe88af95b5315dc9f34d40ce81eb2/actionqueues/actionqueue.py#L48-L61
240,906
luismasuelli/python-cantrips
cantrips/console.py
input_option
def input_option(message, options="yn", error_message=None): """ Reads an option from the screen, with a specified prompt. Keeps asking until a valid option is sent by the user. """ def _valid(character): if character not in options: print(error_message % character) return input("%s [%s]" % (message, options), _valid, True, lambda a: a.lower())
python
def input_option(message, options="yn", error_message=None): """ Reads an option from the screen, with a specified prompt. Keeps asking until a valid option is sent by the user. """ def _valid(character): if character not in options: print(error_message % character) return input("%s [%s]" % (message, options), _valid, True, lambda a: a.lower())
[ "def", "input_option", "(", "message", ",", "options", "=", "\"yn\"", ",", "error_message", "=", "None", ")", ":", "def", "_valid", "(", "character", ")", ":", "if", "character", "not", "in", "options", ":", "print", "(", "error_message", "%", "character", ")", "return", "input", "(", "\"%s [%s]\"", "%", "(", "message", ",", "options", ")", ",", "_valid", ",", "True", ",", "lambda", "a", ":", "a", ".", "lower", "(", ")", ")" ]
Reads an option from the screen, with a specified prompt. Keeps asking until a valid option is sent by the user.
[ "Reads", "an", "option", "from", "the", "screen", "with", "a", "specified", "prompt", ".", "Keeps", "asking", "until", "a", "valid", "option", "is", "sent", "by", "the", "user", "." ]
dba2742c1d1a60863bb65f4a291464f6e68eb2ee
https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/console.py#L66-L74
240,907
aquatix/python-utilkit
utilkit/stringutil.py
safe_unicode
def safe_unicode(obj, *args): """ return the unicode representation of obj """ try: return unicode(obj, *args) # noqa for undefined-variable except UnicodeDecodeError: # obj is byte string ascii_text = str(obj).encode('string_escape') try: return unicode(ascii_text) # noqa for undefined-variable except NameError: # This is Python 3, just return the obj as it's already unicode return obj except NameError: # This is Python 3, just return the obj as it's already unicode return obj
python
def safe_unicode(obj, *args): """ return the unicode representation of obj """ try: return unicode(obj, *args) # noqa for undefined-variable except UnicodeDecodeError: # obj is byte string ascii_text = str(obj).encode('string_escape') try: return unicode(ascii_text) # noqa for undefined-variable except NameError: # This is Python 3, just return the obj as it's already unicode return obj except NameError: # This is Python 3, just return the obj as it's already unicode return obj
[ "def", "safe_unicode", "(", "obj", ",", "*", "args", ")", ":", "try", ":", "return", "unicode", "(", "obj", ",", "*", "args", ")", "# noqa for undefined-variable", "except", "UnicodeDecodeError", ":", "# obj is byte string", "ascii_text", "=", "str", "(", "obj", ")", ".", "encode", "(", "'string_escape'", ")", "try", ":", "return", "unicode", "(", "ascii_text", ")", "# noqa for undefined-variable", "except", "NameError", ":", "# This is Python 3, just return the obj as it's already unicode", "return", "obj", "except", "NameError", ":", "# This is Python 3, just return the obj as it's already unicode", "return", "obj" ]
return the unicode representation of obj
[ "return", "the", "unicode", "representation", "of", "obj" ]
1b4a4175381d2175592208619315f399610f915c
https://github.com/aquatix/python-utilkit/blob/1b4a4175381d2175592208619315f399610f915c/utilkit/stringutil.py#L6-L20
240,908
aquatix/python-utilkit
utilkit/stringutil.py
safe_str
def safe_str(obj): """ return the byte string representation of obj """ try: return str(obj) except UnicodeEncodeError: # obj is unicode try: return unicode(obj).encode('unicode_escape') # noqa for undefined-variable except NameError: # This is Python 3, just return the obj as it's already unicode return obj
python
def safe_str(obj): """ return the byte string representation of obj """ try: return str(obj) except UnicodeEncodeError: # obj is unicode try: return unicode(obj).encode('unicode_escape') # noqa for undefined-variable except NameError: # This is Python 3, just return the obj as it's already unicode return obj
[ "def", "safe_str", "(", "obj", ")", ":", "try", ":", "return", "str", "(", "obj", ")", "except", "UnicodeEncodeError", ":", "# obj is unicode", "try", ":", "return", "unicode", "(", "obj", ")", ".", "encode", "(", "'unicode_escape'", ")", "# noqa for undefined-variable", "except", "NameError", ":", "# This is Python 3, just return the obj as it's already unicode", "return", "obj" ]
return the byte string representation of obj
[ "return", "the", "byte", "string", "representation", "of", "obj" ]
1b4a4175381d2175592208619315f399610f915c
https://github.com/aquatix/python-utilkit/blob/1b4a4175381d2175592208619315f399610f915c/utilkit/stringutil.py#L23-L33
240,909
kathyxchen/crosstalk-correction
crosstalk_correction/crosstalk_correction.py
crosstalk_correction
def crosstalk_correction(pathway_definitions, random_seed=2015, gene_set=set(), all_genes=True, max_iters=1000): """A wrapper function around the maximum impact estimation algorithm. Parameters ----------- pathway_definitions : dict(str -> set(str)) The original pathway definitions. A pathway (key) is defined by a set of genes (value). random_seed : int (default=2015) Sets the numpy random seed gene_set : set(str) (default=set()) Donato et al. (2013) uses this algorithm to remove crosstalk from definitions in case-control studies. Here, `gene_set` is equivalent to the DE (differentially expressed) genes they refer to in their paper. Because crosstalk removal is a preprocessing step applicable to pathway analyses in general, we keep the variable name nonspecific. all_genes : bool (default=True) This value is checked if `gene_set` is not empty. If False, crosstalk correction is only applied to annotations in the `gene_set`. max_iters : int (default=1000) The maximum number of expectation-maximization steps to take in the maximum impact estimation algorithm. Returns ---------- dict(str -> tup(set(str), set(str))), where the (str) keys are the pathway names. tup[0] : crosstalk-correction applied to genes in the pathway definition that are also in `gene_set` tup[1] : - `all_genes` is True. Correction is applied to genes outside of `gene_set`. - `all_genes` is False. The second element in the tuple is all genes remaining in the original definition (definition - `gene_set`). """ np.random.seed(seed=random_seed) genes_in_pathway_definitions = set.union(*pathway_definitions.values()) pathway_column_names = index_element_map(pathway_definitions.keys()) corrected_pathway_defns = {} if gene_set: gene_set = gene_set & genes_in_pathway_definitions if not gene_set and not all_genes: print("`gene_set` parameter was {0}, returning original" "pathway definitions".format(gene_set)) for pathway, definition in pathway_definitions.items(): corrected_pathway_defns[pathway] = (set(), definition) return pathway_definitions corrected_pathway_defns = _apply_correction_on_genes( gene_set, pathway_column_names, pathway_definitions) # crosstalk correction is _only_ applied to `gene_set` if not all_genes: for pathway, definition in pathway_definitions.items(): if pathway not in corrected_pathway_defns: corrected_pathway_defns[pathway] = set() gene_set_defn = corrected_pathway_defns[pathway] remaining_defn = definition - gene_set corrected_pathway_defns[pathway] = ( gene_set_defn, remaining_defn) return corrected_pathway_defns remaining_genes = genes_in_pathway_definitions - gene_set if not remaining_genes: for pathway, definition in corrected_pathway_defns.items(): corrected_pathway_defns[pathway] = (definition, set()) return corrected_pathway_defns pathway_remaining_defns = _apply_correction_on_genes( remaining_genes, pathway_column_names, pathway_definitions) for pathway, definitions in pathway_definitions.items(): if pathway not in corrected_pathway_defns: corrected_pathway_defns[pathway] = set() if pathway not in pathway_remaining_defns: pathway_remaining_defns[pathway] = set() corrected_pathway_defns[pathway] = ( corrected_pathway_defns[pathway], pathway_remaining_defns[pathway]) return corrected_pathway_defns
python
def crosstalk_correction(pathway_definitions, random_seed=2015, gene_set=set(), all_genes=True, max_iters=1000): """A wrapper function around the maximum impact estimation algorithm. Parameters ----------- pathway_definitions : dict(str -> set(str)) The original pathway definitions. A pathway (key) is defined by a set of genes (value). random_seed : int (default=2015) Sets the numpy random seed gene_set : set(str) (default=set()) Donato et al. (2013) uses this algorithm to remove crosstalk from definitions in case-control studies. Here, `gene_set` is equivalent to the DE (differentially expressed) genes they refer to in their paper. Because crosstalk removal is a preprocessing step applicable to pathway analyses in general, we keep the variable name nonspecific. all_genes : bool (default=True) This value is checked if `gene_set` is not empty. If False, crosstalk correction is only applied to annotations in the `gene_set`. max_iters : int (default=1000) The maximum number of expectation-maximization steps to take in the maximum impact estimation algorithm. Returns ---------- dict(str -> tup(set(str), set(str))), where the (str) keys are the pathway names. tup[0] : crosstalk-correction applied to genes in the pathway definition that are also in `gene_set` tup[1] : - `all_genes` is True. Correction is applied to genes outside of `gene_set`. - `all_genes` is False. The second element in the tuple is all genes remaining in the original definition (definition - `gene_set`). """ np.random.seed(seed=random_seed) genes_in_pathway_definitions = set.union(*pathway_definitions.values()) pathway_column_names = index_element_map(pathway_definitions.keys()) corrected_pathway_defns = {} if gene_set: gene_set = gene_set & genes_in_pathway_definitions if not gene_set and not all_genes: print("`gene_set` parameter was {0}, returning original" "pathway definitions".format(gene_set)) for pathway, definition in pathway_definitions.items(): corrected_pathway_defns[pathway] = (set(), definition) return pathway_definitions corrected_pathway_defns = _apply_correction_on_genes( gene_set, pathway_column_names, pathway_definitions) # crosstalk correction is _only_ applied to `gene_set` if not all_genes: for pathway, definition in pathway_definitions.items(): if pathway not in corrected_pathway_defns: corrected_pathway_defns[pathway] = set() gene_set_defn = corrected_pathway_defns[pathway] remaining_defn = definition - gene_set corrected_pathway_defns[pathway] = ( gene_set_defn, remaining_defn) return corrected_pathway_defns remaining_genes = genes_in_pathway_definitions - gene_set if not remaining_genes: for pathway, definition in corrected_pathway_defns.items(): corrected_pathway_defns[pathway] = (definition, set()) return corrected_pathway_defns pathway_remaining_defns = _apply_correction_on_genes( remaining_genes, pathway_column_names, pathway_definitions) for pathway, definitions in pathway_definitions.items(): if pathway not in corrected_pathway_defns: corrected_pathway_defns[pathway] = set() if pathway not in pathway_remaining_defns: pathway_remaining_defns[pathway] = set() corrected_pathway_defns[pathway] = ( corrected_pathway_defns[pathway], pathway_remaining_defns[pathway]) return corrected_pathway_defns
[ "def", "crosstalk_correction", "(", "pathway_definitions", ",", "random_seed", "=", "2015", ",", "gene_set", "=", "set", "(", ")", ",", "all_genes", "=", "True", ",", "max_iters", "=", "1000", ")", ":", "np", ".", "random", ".", "seed", "(", "seed", "=", "random_seed", ")", "genes_in_pathway_definitions", "=", "set", ".", "union", "(", "*", "pathway_definitions", ".", "values", "(", ")", ")", "pathway_column_names", "=", "index_element_map", "(", "pathway_definitions", ".", "keys", "(", ")", ")", "corrected_pathway_defns", "=", "{", "}", "if", "gene_set", ":", "gene_set", "=", "gene_set", "&", "genes_in_pathway_definitions", "if", "not", "gene_set", "and", "not", "all_genes", ":", "print", "(", "\"`gene_set` parameter was {0}, returning original\"", "\"pathway definitions\"", ".", "format", "(", "gene_set", ")", ")", "for", "pathway", ",", "definition", "in", "pathway_definitions", ".", "items", "(", ")", ":", "corrected_pathway_defns", "[", "pathway", "]", "=", "(", "set", "(", ")", ",", "definition", ")", "return", "pathway_definitions", "corrected_pathway_defns", "=", "_apply_correction_on_genes", "(", "gene_set", ",", "pathway_column_names", ",", "pathway_definitions", ")", "# crosstalk correction is _only_ applied to `gene_set`", "if", "not", "all_genes", ":", "for", "pathway", ",", "definition", "in", "pathway_definitions", ".", "items", "(", ")", ":", "if", "pathway", "not", "in", "corrected_pathway_defns", ":", "corrected_pathway_defns", "[", "pathway", "]", "=", "set", "(", ")", "gene_set_defn", "=", "corrected_pathway_defns", "[", "pathway", "]", "remaining_defn", "=", "definition", "-", "gene_set", "corrected_pathway_defns", "[", "pathway", "]", "=", "(", "gene_set_defn", ",", "remaining_defn", ")", "return", "corrected_pathway_defns", "remaining_genes", "=", "genes_in_pathway_definitions", "-", "gene_set", "if", "not", "remaining_genes", ":", "for", "pathway", ",", "definition", "in", "corrected_pathway_defns", ".", "items", "(", ")", ":", "corrected_pathway_defns", "[", "pathway", "]", "=", "(", "definition", ",", "set", "(", ")", ")", "return", "corrected_pathway_defns", "pathway_remaining_defns", "=", "_apply_correction_on_genes", "(", "remaining_genes", ",", "pathway_column_names", ",", "pathway_definitions", ")", "for", "pathway", ",", "definitions", "in", "pathway_definitions", ".", "items", "(", ")", ":", "if", "pathway", "not", "in", "corrected_pathway_defns", ":", "corrected_pathway_defns", "[", "pathway", "]", "=", "set", "(", ")", "if", "pathway", "not", "in", "pathway_remaining_defns", ":", "pathway_remaining_defns", "[", "pathway", "]", "=", "set", "(", ")", "corrected_pathway_defns", "[", "pathway", "]", "=", "(", "corrected_pathway_defns", "[", "pathway", "]", ",", "pathway_remaining_defns", "[", "pathway", "]", ")", "return", "corrected_pathway_defns" ]
A wrapper function around the maximum impact estimation algorithm. Parameters ----------- pathway_definitions : dict(str -> set(str)) The original pathway definitions. A pathway (key) is defined by a set of genes (value). random_seed : int (default=2015) Sets the numpy random seed gene_set : set(str) (default=set()) Donato et al. (2013) uses this algorithm to remove crosstalk from definitions in case-control studies. Here, `gene_set` is equivalent to the DE (differentially expressed) genes they refer to in their paper. Because crosstalk removal is a preprocessing step applicable to pathway analyses in general, we keep the variable name nonspecific. all_genes : bool (default=True) This value is checked if `gene_set` is not empty. If False, crosstalk correction is only applied to annotations in the `gene_set`. max_iters : int (default=1000) The maximum number of expectation-maximization steps to take in the maximum impact estimation algorithm. Returns ---------- dict(str -> tup(set(str), set(str))), where the (str) keys are the pathway names. tup[0] : crosstalk-correction applied to genes in the pathway definition that are also in `gene_set` tup[1] : - `all_genes` is True. Correction is applied to genes outside of `gene_set`. - `all_genes` is False. The second element in the tuple is all genes remaining in the original definition (definition - `gene_set`).
[ "A", "wrapper", "function", "around", "the", "maximum", "impact", "estimation", "algorithm", "." ]
eac11d54b39ea7ec301b54f8e498adce91713223
https://github.com/kathyxchen/crosstalk-correction/blob/eac11d54b39ea7ec301b54f8e498adce91713223/crosstalk_correction/crosstalk_correction.py#L8-L98
240,910
kathyxchen/crosstalk-correction
crosstalk_correction/crosstalk_correction.py
maximum_impact_estimation
def maximum_impact_estimation(membership_matrix, max_iters=1000): """An expectation maximization technique that produces pathway definitions devoid of crosstalk. That is, each gene is mapped to the pathway in which it has the greatest predicted impact; this removes any overlap between pathway definitions. Parameters ----------- membership_matrix : numpy.array(float), shape = [n, k] The observed gene-to-pathway membership matrix, where n is the number of genes and k is the number of pathways we are interested in. max_iters : int (default=1000) The maximum number of expectation-maximization steps to take. Returns ----------- dict(int -> set(int)), a dictionary mapping a pathway to a set of genes. These are the pathway definitions after the maximum impact estimation procedure has been applied to remove crosstalk. - The keys are ints corresponding to the pathway column indices in the membership matrix. - The values are sets of ints corresponding to gene row indices in the membership matrix. """ # Initialize the probability vector as the sum of each column in the # membership matrix normalized by the sum of the entire membership matrix. # The probability at some index j in the vector represents the likelihood # that a pathway (column) j is defined by the current set of genes (rows) # in the membership matrix. pr_0 = np.sum(membership_matrix, axis=0) / np.sum(membership_matrix) pr_1 = _update_probabilities(pr_0, membership_matrix) epsilon = np.linalg.norm(pr_1 - pr_0)/100. pr_old = pr_1 check_for_convergence = epsilon count = 0 while epsilon > NEAR_ZERO and check_for_convergence >= epsilon: count += 1 if count > max_iters: print("Reached the maximum number of iterations {0}".format( max_iters)) break pr_new = _update_probabilities(pr_old, membership_matrix) check_for_convergence = np.linalg.norm(pr_new - pr_old) pr_old = pr_new pr_final = pr_old # renaming for readability corrected_pathway_definitions = {} n, k = membership_matrix.shape for gene_index in range(n): gene_membership = membership_matrix[gene_index] denominator = np.dot(gene_membership, pr_final) # Approximation is used to prevent divide by zero warning. # Since we are only looking for the _most_ probable pathway in which a # gene contributes its maximum impact, precision is not as important # as maintaining the relative differences between each # pathway's probability. if denominator < NEAR_ZERO: denominator = NEAR_ZERO # This is equivalent to one row in what Donato et al. (2013) refer # to as the underlying (latent) Z matrix. conditional_pathway_pr = (np.multiply(gene_membership, pr_final) / denominator) all_pathways_at_max = np.where( conditional_pathway_pr == conditional_pathway_pr.max())[0] gene_in_pathways = np.where(gene_membership == 1)[0] all_pathways_at_max = np.intersect1d( all_pathways_at_max, gene_in_pathways) pathway_index = np.random.choice(all_pathways_at_max) if pathway_index not in corrected_pathway_definitions: corrected_pathway_definitions[pathway_index] = set() corrected_pathway_definitions[pathway_index].add(gene_index) return corrected_pathway_definitions
python
def maximum_impact_estimation(membership_matrix, max_iters=1000): """An expectation maximization technique that produces pathway definitions devoid of crosstalk. That is, each gene is mapped to the pathway in which it has the greatest predicted impact; this removes any overlap between pathway definitions. Parameters ----------- membership_matrix : numpy.array(float), shape = [n, k] The observed gene-to-pathway membership matrix, where n is the number of genes and k is the number of pathways we are interested in. max_iters : int (default=1000) The maximum number of expectation-maximization steps to take. Returns ----------- dict(int -> set(int)), a dictionary mapping a pathway to a set of genes. These are the pathway definitions after the maximum impact estimation procedure has been applied to remove crosstalk. - The keys are ints corresponding to the pathway column indices in the membership matrix. - The values are sets of ints corresponding to gene row indices in the membership matrix. """ # Initialize the probability vector as the sum of each column in the # membership matrix normalized by the sum of the entire membership matrix. # The probability at some index j in the vector represents the likelihood # that a pathway (column) j is defined by the current set of genes (rows) # in the membership matrix. pr_0 = np.sum(membership_matrix, axis=0) / np.sum(membership_matrix) pr_1 = _update_probabilities(pr_0, membership_matrix) epsilon = np.linalg.norm(pr_1 - pr_0)/100. pr_old = pr_1 check_for_convergence = epsilon count = 0 while epsilon > NEAR_ZERO and check_for_convergence >= epsilon: count += 1 if count > max_iters: print("Reached the maximum number of iterations {0}".format( max_iters)) break pr_new = _update_probabilities(pr_old, membership_matrix) check_for_convergence = np.linalg.norm(pr_new - pr_old) pr_old = pr_new pr_final = pr_old # renaming for readability corrected_pathway_definitions = {} n, k = membership_matrix.shape for gene_index in range(n): gene_membership = membership_matrix[gene_index] denominator = np.dot(gene_membership, pr_final) # Approximation is used to prevent divide by zero warning. # Since we are only looking for the _most_ probable pathway in which a # gene contributes its maximum impact, precision is not as important # as maintaining the relative differences between each # pathway's probability. if denominator < NEAR_ZERO: denominator = NEAR_ZERO # This is equivalent to one row in what Donato et al. (2013) refer # to as the underlying (latent) Z matrix. conditional_pathway_pr = (np.multiply(gene_membership, pr_final) / denominator) all_pathways_at_max = np.where( conditional_pathway_pr == conditional_pathway_pr.max())[0] gene_in_pathways = np.where(gene_membership == 1)[0] all_pathways_at_max = np.intersect1d( all_pathways_at_max, gene_in_pathways) pathway_index = np.random.choice(all_pathways_at_max) if pathway_index not in corrected_pathway_definitions: corrected_pathway_definitions[pathway_index] = set() corrected_pathway_definitions[pathway_index].add(gene_index) return corrected_pathway_definitions
[ "def", "maximum_impact_estimation", "(", "membership_matrix", ",", "max_iters", "=", "1000", ")", ":", "# Initialize the probability vector as the sum of each column in the", "# membership matrix normalized by the sum of the entire membership matrix.", "# The probability at some index j in the vector represents the likelihood", "# that a pathway (column) j is defined by the current set of genes (rows)", "# in the membership matrix.", "pr_0", "=", "np", ".", "sum", "(", "membership_matrix", ",", "axis", "=", "0", ")", "/", "np", ".", "sum", "(", "membership_matrix", ")", "pr_1", "=", "_update_probabilities", "(", "pr_0", ",", "membership_matrix", ")", "epsilon", "=", "np", ".", "linalg", ".", "norm", "(", "pr_1", "-", "pr_0", ")", "/", "100.", "pr_old", "=", "pr_1", "check_for_convergence", "=", "epsilon", "count", "=", "0", "while", "epsilon", ">", "NEAR_ZERO", "and", "check_for_convergence", ">=", "epsilon", ":", "count", "+=", "1", "if", "count", ">", "max_iters", ":", "print", "(", "\"Reached the maximum number of iterations {0}\"", ".", "format", "(", "max_iters", ")", ")", "break", "pr_new", "=", "_update_probabilities", "(", "pr_old", ",", "membership_matrix", ")", "check_for_convergence", "=", "np", ".", "linalg", ".", "norm", "(", "pr_new", "-", "pr_old", ")", "pr_old", "=", "pr_new", "pr_final", "=", "pr_old", "# renaming for readability", "corrected_pathway_definitions", "=", "{", "}", "n", ",", "k", "=", "membership_matrix", ".", "shape", "for", "gene_index", "in", "range", "(", "n", ")", ":", "gene_membership", "=", "membership_matrix", "[", "gene_index", "]", "denominator", "=", "np", ".", "dot", "(", "gene_membership", ",", "pr_final", ")", "# Approximation is used to prevent divide by zero warning.", "# Since we are only looking for the _most_ probable pathway in which a", "# gene contributes its maximum impact, precision is not as important", "# as maintaining the relative differences between each", "# pathway's probability.", "if", "denominator", "<", "NEAR_ZERO", ":", "denominator", "=", "NEAR_ZERO", "# This is equivalent to one row in what Donato et al. (2013) refer", "# to as the underlying (latent) Z matrix.", "conditional_pathway_pr", "=", "(", "np", ".", "multiply", "(", "gene_membership", ",", "pr_final", ")", "/", "denominator", ")", "all_pathways_at_max", "=", "np", ".", "where", "(", "conditional_pathway_pr", "==", "conditional_pathway_pr", ".", "max", "(", ")", ")", "[", "0", "]", "gene_in_pathways", "=", "np", ".", "where", "(", "gene_membership", "==", "1", ")", "[", "0", "]", "all_pathways_at_max", "=", "np", ".", "intersect1d", "(", "all_pathways_at_max", ",", "gene_in_pathways", ")", "pathway_index", "=", "np", ".", "random", ".", "choice", "(", "all_pathways_at_max", ")", "if", "pathway_index", "not", "in", "corrected_pathway_definitions", ":", "corrected_pathway_definitions", "[", "pathway_index", "]", "=", "set", "(", ")", "corrected_pathway_definitions", "[", "pathway_index", "]", ".", "add", "(", "gene_index", ")", "return", "corrected_pathway_definitions" ]
An expectation maximization technique that produces pathway definitions devoid of crosstalk. That is, each gene is mapped to the pathway in which it has the greatest predicted impact; this removes any overlap between pathway definitions. Parameters ----------- membership_matrix : numpy.array(float), shape = [n, k] The observed gene-to-pathway membership matrix, where n is the number of genes and k is the number of pathways we are interested in. max_iters : int (default=1000) The maximum number of expectation-maximization steps to take. Returns ----------- dict(int -> set(int)), a dictionary mapping a pathway to a set of genes. These are the pathway definitions after the maximum impact estimation procedure has been applied to remove crosstalk. - The keys are ints corresponding to the pathway column indices in the membership matrix. - The values are sets of ints corresponding to gene row indices in the membership matrix.
[ "An", "expectation", "maximization", "technique", "that", "produces", "pathway", "definitions", "devoid", "of", "crosstalk", ".", "That", "is", "each", "gene", "is", "mapped", "to", "the", "pathway", "in", "which", "it", "has", "the", "greatest", "predicted", "impact", ";", "this", "removes", "any", "overlap", "between", "pathway", "definitions", "." ]
eac11d54b39ea7ec301b54f8e498adce91713223
https://github.com/kathyxchen/crosstalk-correction/blob/eac11d54b39ea7ec301b54f8e498adce91713223/crosstalk_correction/crosstalk_correction.py#L101-L174
240,911
kathyxchen/crosstalk-correction
crosstalk_correction/crosstalk_correction.py
initialize_membership_matrix
def initialize_membership_matrix(gene_row_names, pathway_definitions): """Create the binary gene-to-pathway membership matrix that will be considered in the maximum impact estimation procedure. Parameters ----------- gene_row_names : set(str) The genes for which we want to assess pathway membership pathway_definitions : dict(str -> set(str)) Pathway definitions, pre-crosstalk-removal. A pathway (key) is defined by a set of genes (value). Returns ----------- numpy.array, shape = [n, k], the membership matrix """ membership = [] for pathway, full_definition in pathway_definitions.items(): pathway_genes = list(full_definition & gene_row_names) membership.append(np.in1d(list(gene_row_names), pathway_genes)) membership = np.array(membership).astype("float").T return membership
python
def initialize_membership_matrix(gene_row_names, pathway_definitions): """Create the binary gene-to-pathway membership matrix that will be considered in the maximum impact estimation procedure. Parameters ----------- gene_row_names : set(str) The genes for which we want to assess pathway membership pathway_definitions : dict(str -> set(str)) Pathway definitions, pre-crosstalk-removal. A pathway (key) is defined by a set of genes (value). Returns ----------- numpy.array, shape = [n, k], the membership matrix """ membership = [] for pathway, full_definition in pathway_definitions.items(): pathway_genes = list(full_definition & gene_row_names) membership.append(np.in1d(list(gene_row_names), pathway_genes)) membership = np.array(membership).astype("float").T return membership
[ "def", "initialize_membership_matrix", "(", "gene_row_names", ",", "pathway_definitions", ")", ":", "membership", "=", "[", "]", "for", "pathway", ",", "full_definition", "in", "pathway_definitions", ".", "items", "(", ")", ":", "pathway_genes", "=", "list", "(", "full_definition", "&", "gene_row_names", ")", "membership", ".", "append", "(", "np", ".", "in1d", "(", "list", "(", "gene_row_names", ")", ",", "pathway_genes", ")", ")", "membership", "=", "np", ".", "array", "(", "membership", ")", ".", "astype", "(", "\"float\"", ")", ".", "T", "return", "membership" ]
Create the binary gene-to-pathway membership matrix that will be considered in the maximum impact estimation procedure. Parameters ----------- gene_row_names : set(str) The genes for which we want to assess pathway membership pathway_definitions : dict(str -> set(str)) Pathway definitions, pre-crosstalk-removal. A pathway (key) is defined by a set of genes (value). Returns ----------- numpy.array, shape = [n, k], the membership matrix
[ "Create", "the", "binary", "gene", "-", "to", "-", "pathway", "membership", "matrix", "that", "will", "be", "considered", "in", "the", "maximum", "impact", "estimation", "procedure", "." ]
eac11d54b39ea7ec301b54f8e498adce91713223
https://github.com/kathyxchen/crosstalk-correction/blob/eac11d54b39ea7ec301b54f8e498adce91713223/crosstalk_correction/crosstalk_correction.py#L177-L198
240,912
kathyxchen/crosstalk-correction
crosstalk_correction/crosstalk_correction.py
index_element_map
def index_element_map(arr): """Map the indices of the array to the respective elements. Parameters ----------- arr : list(a) The array to process, of generic type a Returns ----------- dict(int -> a), a dictionary corresponding the index to the element """ index_to_element = {} for index, element in enumerate(arr): index_to_element[index] = element return index_to_element
python
def index_element_map(arr): """Map the indices of the array to the respective elements. Parameters ----------- arr : list(a) The array to process, of generic type a Returns ----------- dict(int -> a), a dictionary corresponding the index to the element """ index_to_element = {} for index, element in enumerate(arr): index_to_element[index] = element return index_to_element
[ "def", "index_element_map", "(", "arr", ")", ":", "index_to_element", "=", "{", "}", "for", "index", ",", "element", "in", "enumerate", "(", "arr", ")", ":", "index_to_element", "[", "index", "]", "=", "element", "return", "index_to_element" ]
Map the indices of the array to the respective elements. Parameters ----------- arr : list(a) The array to process, of generic type a Returns ----------- dict(int -> a), a dictionary corresponding the index to the element
[ "Map", "the", "indices", "of", "the", "array", "to", "the", "respective", "elements", "." ]
eac11d54b39ea7ec301b54f8e498adce91713223
https://github.com/kathyxchen/crosstalk-correction/blob/eac11d54b39ea7ec301b54f8e498adce91713223/crosstalk_correction/crosstalk_correction.py#L201-L216
240,913
kathyxchen/crosstalk-correction
crosstalk_correction/crosstalk_correction.py
_apply_correction_on_genes
def _apply_correction_on_genes(genes, pathway_column_names, pathway_definitions): """Helper function to create the gene-to-pathway membership matrix and apply crosstalk correction on that matrix. Returns the crosstalk-corrected pathway definitions for the input `genes.` """ gene_row_names = index_element_map(genes) membership_matrix = initialize_membership_matrix( genes, pathway_definitions) crosstalk_corrected_index_map = maximum_impact_estimation( membership_matrix) updated_pathway_definitions = _update_pathway_definitions( crosstalk_corrected_index_map, gene_row_names, pathway_column_names) return updated_pathway_definitions
python
def _apply_correction_on_genes(genes, pathway_column_names, pathway_definitions): """Helper function to create the gene-to-pathway membership matrix and apply crosstalk correction on that matrix. Returns the crosstalk-corrected pathway definitions for the input `genes.` """ gene_row_names = index_element_map(genes) membership_matrix = initialize_membership_matrix( genes, pathway_definitions) crosstalk_corrected_index_map = maximum_impact_estimation( membership_matrix) updated_pathway_definitions = _update_pathway_definitions( crosstalk_corrected_index_map, gene_row_names, pathway_column_names) return updated_pathway_definitions
[ "def", "_apply_correction_on_genes", "(", "genes", ",", "pathway_column_names", ",", "pathway_definitions", ")", ":", "gene_row_names", "=", "index_element_map", "(", "genes", ")", "membership_matrix", "=", "initialize_membership_matrix", "(", "genes", ",", "pathway_definitions", ")", "crosstalk_corrected_index_map", "=", "maximum_impact_estimation", "(", "membership_matrix", ")", "updated_pathway_definitions", "=", "_update_pathway_definitions", "(", "crosstalk_corrected_index_map", ",", "gene_row_names", ",", "pathway_column_names", ")", "return", "updated_pathway_definitions" ]
Helper function to create the gene-to-pathway membership matrix and apply crosstalk correction on that matrix. Returns the crosstalk-corrected pathway definitions for the input `genes.`
[ "Helper", "function", "to", "create", "the", "gene", "-", "to", "-", "pathway", "membership", "matrix", "and", "apply", "crosstalk", "correction", "on", "that", "matrix", ".", "Returns", "the", "crosstalk", "-", "corrected", "pathway", "definitions", "for", "the", "input", "genes", "." ]
eac11d54b39ea7ec301b54f8e498adce91713223
https://github.com/kathyxchen/crosstalk-correction/blob/eac11d54b39ea7ec301b54f8e498adce91713223/crosstalk_correction/crosstalk_correction.py#L219-L236
240,914
kathyxchen/crosstalk-correction
crosstalk_correction/crosstalk_correction.py
_update_probabilities
def _update_probabilities(pr, membership_matrix): """Updates the probability vector for each iteration of the expectation maximum algorithm in maximum impact estimation. Parameters ----------- pr : numpy.array(float), shape = [k] The current vector of probabilities. An element at index j, where j is between 0 and k - 1, corresponds to the probability that, given a gene g_i, g_i has the greatest impact in pathway j. membership_matrix : numpy.array(float), shape = [n, k] The observed gene-to-pathway membership matrix, where n is the number of genes and k is the number of pathways we are interested in. Returns ----------- numpy.array(float), shape = [k], a vector of updated probabilities """ n, k = membership_matrix.shape pathway_col_sums = np.sum(membership_matrix, axis=0) weighted_pathway_col_sums = np.multiply(pathway_col_sums, pr) sum_of_col_sums = np.sum(weighted_pathway_col_sums) try: new_pr = weighted_pathway_col_sums / sum_of_col_sums except FloatingPointError: # In the event that we encounter underflow or overflow issues, # apply this approximation. cutoff = 1e-150 / k log_cutoff = np.log(cutoff) weighted_pathway_col_sums = _replace_zeros( weighted_pathway_col_sums, cutoff) log_weighted_col_sums = np.log(weighted_pathway_col_sums) log_weighted_col_sums -= np.max(log_weighted_col_sums) below_cutoff = log_weighted_col_sums < log_cutoff geq_cutoff = log_weighted_col_sums >= log_cutoff print("{1} adjustments made to a vector of length {0}" " containing the raw weight values" " in a call to 'update_probabilities'".format( k, len(log_weighted_col_sums[below_cutoff]))) new_pr = np.zeros(k) new_pr[below_cutoff] = cutoff col_sums_geq_cutoff = log_weighted_col_sums[geq_cutoff] new_pr[geq_cutoff] = np.exp( col_sums_geq_cutoff) / np.sum(np.exp(sorted(col_sums_geq_cutoff))) difference = np.abs(1. - np.sum(new_pr)) assert difference < 1e-12, "Probabilities sum to {0}.".format( np.sum(new_pr)) return new_pr
python
def _update_probabilities(pr, membership_matrix): """Updates the probability vector for each iteration of the expectation maximum algorithm in maximum impact estimation. Parameters ----------- pr : numpy.array(float), shape = [k] The current vector of probabilities. An element at index j, where j is between 0 and k - 1, corresponds to the probability that, given a gene g_i, g_i has the greatest impact in pathway j. membership_matrix : numpy.array(float), shape = [n, k] The observed gene-to-pathway membership matrix, where n is the number of genes and k is the number of pathways we are interested in. Returns ----------- numpy.array(float), shape = [k], a vector of updated probabilities """ n, k = membership_matrix.shape pathway_col_sums = np.sum(membership_matrix, axis=0) weighted_pathway_col_sums = np.multiply(pathway_col_sums, pr) sum_of_col_sums = np.sum(weighted_pathway_col_sums) try: new_pr = weighted_pathway_col_sums / sum_of_col_sums except FloatingPointError: # In the event that we encounter underflow or overflow issues, # apply this approximation. cutoff = 1e-150 / k log_cutoff = np.log(cutoff) weighted_pathway_col_sums = _replace_zeros( weighted_pathway_col_sums, cutoff) log_weighted_col_sums = np.log(weighted_pathway_col_sums) log_weighted_col_sums -= np.max(log_weighted_col_sums) below_cutoff = log_weighted_col_sums < log_cutoff geq_cutoff = log_weighted_col_sums >= log_cutoff print("{1} adjustments made to a vector of length {0}" " containing the raw weight values" " in a call to 'update_probabilities'".format( k, len(log_weighted_col_sums[below_cutoff]))) new_pr = np.zeros(k) new_pr[below_cutoff] = cutoff col_sums_geq_cutoff = log_weighted_col_sums[geq_cutoff] new_pr[geq_cutoff] = np.exp( col_sums_geq_cutoff) / np.sum(np.exp(sorted(col_sums_geq_cutoff))) difference = np.abs(1. - np.sum(new_pr)) assert difference < 1e-12, "Probabilities sum to {0}.".format( np.sum(new_pr)) return new_pr
[ "def", "_update_probabilities", "(", "pr", ",", "membership_matrix", ")", ":", "n", ",", "k", "=", "membership_matrix", ".", "shape", "pathway_col_sums", "=", "np", ".", "sum", "(", "membership_matrix", ",", "axis", "=", "0", ")", "weighted_pathway_col_sums", "=", "np", ".", "multiply", "(", "pathway_col_sums", ",", "pr", ")", "sum_of_col_sums", "=", "np", ".", "sum", "(", "weighted_pathway_col_sums", ")", "try", ":", "new_pr", "=", "weighted_pathway_col_sums", "/", "sum_of_col_sums", "except", "FloatingPointError", ":", "# In the event that we encounter underflow or overflow issues,", "# apply this approximation.", "cutoff", "=", "1e-150", "/", "k", "log_cutoff", "=", "np", ".", "log", "(", "cutoff", ")", "weighted_pathway_col_sums", "=", "_replace_zeros", "(", "weighted_pathway_col_sums", ",", "cutoff", ")", "log_weighted_col_sums", "=", "np", ".", "log", "(", "weighted_pathway_col_sums", ")", "log_weighted_col_sums", "-=", "np", ".", "max", "(", "log_weighted_col_sums", ")", "below_cutoff", "=", "log_weighted_col_sums", "<", "log_cutoff", "geq_cutoff", "=", "log_weighted_col_sums", ">=", "log_cutoff", "print", "(", "\"{1} adjustments made to a vector of length {0}\"", "\" containing the raw weight values\"", "\" in a call to 'update_probabilities'\"", ".", "format", "(", "k", ",", "len", "(", "log_weighted_col_sums", "[", "below_cutoff", "]", ")", ")", ")", "new_pr", "=", "np", ".", "zeros", "(", "k", ")", "new_pr", "[", "below_cutoff", "]", "=", "cutoff", "col_sums_geq_cutoff", "=", "log_weighted_col_sums", "[", "geq_cutoff", "]", "new_pr", "[", "geq_cutoff", "]", "=", "np", ".", "exp", "(", "col_sums_geq_cutoff", ")", "/", "np", ".", "sum", "(", "np", ".", "exp", "(", "sorted", "(", "col_sums_geq_cutoff", ")", ")", ")", "difference", "=", "np", ".", "abs", "(", "1.", "-", "np", ".", "sum", "(", "new_pr", ")", ")", "assert", "difference", "<", "1e-12", ",", "\"Probabilities sum to {0}.\"", ".", "format", "(", "np", ".", "sum", "(", "new_pr", ")", ")", "return", "new_pr" ]
Updates the probability vector for each iteration of the expectation maximum algorithm in maximum impact estimation. Parameters ----------- pr : numpy.array(float), shape = [k] The current vector of probabilities. An element at index j, where j is between 0 and k - 1, corresponds to the probability that, given a gene g_i, g_i has the greatest impact in pathway j. membership_matrix : numpy.array(float), shape = [n, k] The observed gene-to-pathway membership matrix, where n is the number of genes and k is the number of pathways we are interested in. Returns ----------- numpy.array(float), shape = [k], a vector of updated probabilities
[ "Updates", "the", "probability", "vector", "for", "each", "iteration", "of", "the", "expectation", "maximum", "algorithm", "in", "maximum", "impact", "estimation", "." ]
eac11d54b39ea7ec301b54f8e498adce91713223
https://github.com/kathyxchen/crosstalk-correction/blob/eac11d54b39ea7ec301b54f8e498adce91713223/crosstalk_correction/crosstalk_correction.py#L254-L307
240,915
kathyxchen/crosstalk-correction
crosstalk_correction/crosstalk_correction.py
_replace_zeros
def _replace_zeros(arr, default_min_value): """Substitute 0s in the list with a near-zero value. Parameters ----------- arr : numpy.array(float) default_min_value : float If the smallest non-zero element in `arr` is greater than the default, use the default instead. Returns ----------- numpy.array(float) """ min_nonzero_value = min(default_min_value, np.min(arr[arr > 0])) closest_to_zero = np.nextafter(min_nonzero_value, min_nonzero_value - 1) arr[arr == 0] = closest_to_zero return arr
python
def _replace_zeros(arr, default_min_value): """Substitute 0s in the list with a near-zero value. Parameters ----------- arr : numpy.array(float) default_min_value : float If the smallest non-zero element in `arr` is greater than the default, use the default instead. Returns ----------- numpy.array(float) """ min_nonzero_value = min(default_min_value, np.min(arr[arr > 0])) closest_to_zero = np.nextafter(min_nonzero_value, min_nonzero_value - 1) arr[arr == 0] = closest_to_zero return arr
[ "def", "_replace_zeros", "(", "arr", ",", "default_min_value", ")", ":", "min_nonzero_value", "=", "min", "(", "default_min_value", ",", "np", ".", "min", "(", "arr", "[", "arr", ">", "0", "]", ")", ")", "closest_to_zero", "=", "np", ".", "nextafter", "(", "min_nonzero_value", ",", "min_nonzero_value", "-", "1", ")", "arr", "[", "arr", "==", "0", "]", "=", "closest_to_zero", "return", "arr" ]
Substitute 0s in the list with a near-zero value. Parameters ----------- arr : numpy.array(float) default_min_value : float If the smallest non-zero element in `arr` is greater than the default, use the default instead. Returns ----------- numpy.array(float)
[ "Substitute", "0s", "in", "the", "list", "with", "a", "near", "-", "zero", "value", "." ]
eac11d54b39ea7ec301b54f8e498adce91713223
https://github.com/kathyxchen/crosstalk-correction/blob/eac11d54b39ea7ec301b54f8e498adce91713223/crosstalk_correction/crosstalk_correction.py#L310-L327
240,916
noobermin/lspreader
bin/pmov.py
hdfoutput
def hdfoutput(outname, frames, dozip=False): '''Outputs the frames to an hdf file.''' with h5.File(outname,'a') as f: for frame in frames: group=str(frame['step']); h5w(f, frame, group=group, compression='lzf' if dozip else None);
python
def hdfoutput(outname, frames, dozip=False): '''Outputs the frames to an hdf file.''' with h5.File(outname,'a') as f: for frame in frames: group=str(frame['step']); h5w(f, frame, group=group, compression='lzf' if dozip else None);
[ "def", "hdfoutput", "(", "outname", ",", "frames", ",", "dozip", "=", "False", ")", ":", "with", "h5", ".", "File", "(", "outname", ",", "'a'", ")", "as", "f", ":", "for", "frame", "in", "frames", ":", "group", "=", "str", "(", "frame", "[", "'step'", "]", ")", "h5w", "(", "f", ",", "frame", ",", "group", "=", "group", ",", "compression", "=", "'lzf'", "if", "dozip", "else", "None", ")" ]
Outputs the frames to an hdf file.
[ "Outputs", "the", "frames", "to", "an", "hdf", "file", "." ]
903b9d6427513b07986ffacf76cbca54e18d8be6
https://github.com/noobermin/lspreader/blob/903b9d6427513b07986ffacf76cbca54e18d8be6/bin/pmov.py#L42-L48
240,917
cdeboever3/cdpybio
cdpybio/bedtools.py
beds_to_boolean
def beds_to_boolean(beds, ref=None, beds_sorted=False, ref_sorted=False, **kwargs): """ Compare a list of bed files or BedTool objects to a reference bed file and create a boolean matrix where each row is an interval and each column is a 1 if that file has an interval that overlaps the row interval and a 0 otherwise. If no reference bed is provided, the provided bed files will be merged into a single bed and compared to that. Parameters ---------- beds : list List of paths to bed files or BedTool objects. ref : str or BedTool Reference bed file to compare against. If no reference bed is provided, the provided bed files will be merged into a single bed and compared to that. beds_sorted : boolean Whether the bed files in beds are already sorted. If False, all bed files in beds will be sorted. ref_sorted : boolean Whether the reference bed file is sorted. If False, ref will be sorted. names : list of strings Names to use for columns of output files. Overrides define_sample_name if provided. define_sample_name : function that takes string as input Function mapping filename to sample name (or basename). For instance, you may have the basename in the path and use a regex to extract it. The basenames will be used as the column names. If this is not provided, the columns will be named as the input files. Returns ------- out : pandas.DataFrame Boolean data frame indicating whether each bed file has an interval that overlaps each interval in the reference bed file. """ beds = copy.deepcopy(beds) fns = [] for i,v in enumerate(beds): if type(v) == str: fns.append(v) beds[i] = pbt.BedTool(v) else: fns.append(v.fn) if not beds_sorted: beds[i] = beds[i].sort() names = _sample_names(fns, kwargs) if ref: if type(ref) == str: ref = pbt.BedTool(ref) if not ref_sorted: ref = ref.sort() else: ref = combine(beds) ind = [] for r in ref: ind.append('{}:{}-{}'.format(r.chrom, r.start, r.stop)) bdf = pd.DataFrame(0, index=ind, columns=names) for i,bed in enumerate(beds): res = ref.intersect(bed, sorted=True, wa=True) ind = [] for r in res: ind.append('{}:{}-{}'.format(r.chrom, r.start, r.stop)) bdf.ix[ind, names[i]] = 1 return bdf
python
def beds_to_boolean(beds, ref=None, beds_sorted=False, ref_sorted=False, **kwargs): """ Compare a list of bed files or BedTool objects to a reference bed file and create a boolean matrix where each row is an interval and each column is a 1 if that file has an interval that overlaps the row interval and a 0 otherwise. If no reference bed is provided, the provided bed files will be merged into a single bed and compared to that. Parameters ---------- beds : list List of paths to bed files or BedTool objects. ref : str or BedTool Reference bed file to compare against. If no reference bed is provided, the provided bed files will be merged into a single bed and compared to that. beds_sorted : boolean Whether the bed files in beds are already sorted. If False, all bed files in beds will be sorted. ref_sorted : boolean Whether the reference bed file is sorted. If False, ref will be sorted. names : list of strings Names to use for columns of output files. Overrides define_sample_name if provided. define_sample_name : function that takes string as input Function mapping filename to sample name (or basename). For instance, you may have the basename in the path and use a regex to extract it. The basenames will be used as the column names. If this is not provided, the columns will be named as the input files. Returns ------- out : pandas.DataFrame Boolean data frame indicating whether each bed file has an interval that overlaps each interval in the reference bed file. """ beds = copy.deepcopy(beds) fns = [] for i,v in enumerate(beds): if type(v) == str: fns.append(v) beds[i] = pbt.BedTool(v) else: fns.append(v.fn) if not beds_sorted: beds[i] = beds[i].sort() names = _sample_names(fns, kwargs) if ref: if type(ref) == str: ref = pbt.BedTool(ref) if not ref_sorted: ref = ref.sort() else: ref = combine(beds) ind = [] for r in ref: ind.append('{}:{}-{}'.format(r.chrom, r.start, r.stop)) bdf = pd.DataFrame(0, index=ind, columns=names) for i,bed in enumerate(beds): res = ref.intersect(bed, sorted=True, wa=True) ind = [] for r in res: ind.append('{}:{}-{}'.format(r.chrom, r.start, r.stop)) bdf.ix[ind, names[i]] = 1 return bdf
[ "def", "beds_to_boolean", "(", "beds", ",", "ref", "=", "None", ",", "beds_sorted", "=", "False", ",", "ref_sorted", "=", "False", ",", "*", "*", "kwargs", ")", ":", "beds", "=", "copy", ".", "deepcopy", "(", "beds", ")", "fns", "=", "[", "]", "for", "i", ",", "v", "in", "enumerate", "(", "beds", ")", ":", "if", "type", "(", "v", ")", "==", "str", ":", "fns", ".", "append", "(", "v", ")", "beds", "[", "i", "]", "=", "pbt", ".", "BedTool", "(", "v", ")", "else", ":", "fns", ".", "append", "(", "v", ".", "fn", ")", "if", "not", "beds_sorted", ":", "beds", "[", "i", "]", "=", "beds", "[", "i", "]", ".", "sort", "(", ")", "names", "=", "_sample_names", "(", "fns", ",", "kwargs", ")", "if", "ref", ":", "if", "type", "(", "ref", ")", "==", "str", ":", "ref", "=", "pbt", ".", "BedTool", "(", "ref", ")", "if", "not", "ref_sorted", ":", "ref", "=", "ref", ".", "sort", "(", ")", "else", ":", "ref", "=", "combine", "(", "beds", ")", "ind", "=", "[", "]", "for", "r", "in", "ref", ":", "ind", ".", "append", "(", "'{}:{}-{}'", ".", "format", "(", "r", ".", "chrom", ",", "r", ".", "start", ",", "r", ".", "stop", ")", ")", "bdf", "=", "pd", ".", "DataFrame", "(", "0", ",", "index", "=", "ind", ",", "columns", "=", "names", ")", "for", "i", ",", "bed", "in", "enumerate", "(", "beds", ")", ":", "res", "=", "ref", ".", "intersect", "(", "bed", ",", "sorted", "=", "True", ",", "wa", "=", "True", ")", "ind", "=", "[", "]", "for", "r", "in", "res", ":", "ind", ".", "append", "(", "'{}:{}-{}'", ".", "format", "(", "r", ".", "chrom", ",", "r", ".", "start", ",", "r", ".", "stop", ")", ")", "bdf", ".", "ix", "[", "ind", ",", "names", "[", "i", "]", "]", "=", "1", "return", "bdf" ]
Compare a list of bed files or BedTool objects to a reference bed file and create a boolean matrix where each row is an interval and each column is a 1 if that file has an interval that overlaps the row interval and a 0 otherwise. If no reference bed is provided, the provided bed files will be merged into a single bed and compared to that. Parameters ---------- beds : list List of paths to bed files or BedTool objects. ref : str or BedTool Reference bed file to compare against. If no reference bed is provided, the provided bed files will be merged into a single bed and compared to that. beds_sorted : boolean Whether the bed files in beds are already sorted. If False, all bed files in beds will be sorted. ref_sorted : boolean Whether the reference bed file is sorted. If False, ref will be sorted. names : list of strings Names to use for columns of output files. Overrides define_sample_name if provided. define_sample_name : function that takes string as input Function mapping filename to sample name (or basename). For instance, you may have the basename in the path and use a regex to extract it. The basenames will be used as the column names. If this is not provided, the columns will be named as the input files. Returns ------- out : pandas.DataFrame Boolean data frame indicating whether each bed file has an interval that overlaps each interval in the reference bed file.
[ "Compare", "a", "list", "of", "bed", "files", "or", "BedTool", "objects", "to", "a", "reference", "bed", "file", "and", "create", "a", "boolean", "matrix", "where", "each", "row", "is", "an", "interval", "and", "each", "column", "is", "a", "1", "if", "that", "file", "has", "an", "interval", "that", "overlaps", "the", "row", "interval", "and", "a", "0", "otherwise", ".", "If", "no", "reference", "bed", "is", "provided", "the", "provided", "bed", "files", "will", "be", "merged", "into", "a", "single", "bed", "and", "compared", "to", "that", "." ]
38efdf0e11d01bc00a135921cb91a19c03db5d5c
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/bedtools.py#L271-L346
240,918
cdeboever3/cdpybio
cdpybio/bedtools.py
combine
def combine(beds, beds_sorted=False, postmerge=True): """ Combine a list of bed files or BedTool objects into a single BedTool object. Parameters ---------- beds : list List of paths to bed files or BedTool objects. beds_sorted : boolean Whether the bed files in beds are already sorted. If False, all bed files in beds will be sorted. postmerge : boolean Whether to merge intervals after combining beds together. Returns ------- out : pybedtools.BedTool New sorted BedTool with intervals from all input beds. """ beds = copy.deepcopy(beds) for i,v in enumerate(beds): if type(v) == str: beds[i] = pbt.BedTool(v) if not beds_sorted: beds[i] = beds[i].sort() # For some reason, doing the merging in the reduce statement doesn't work. I # think this might be a pybedtools bug. In any fashion, I can merge # afterward although I think it makes a performance hit because the combined # bed file grows larger than it needs to. out = reduce(lambda x,y : x.cat(y, postmerge=False), beds) out = out.sort() if postmerge: out = out.merge() return out
python
def combine(beds, beds_sorted=False, postmerge=True): """ Combine a list of bed files or BedTool objects into a single BedTool object. Parameters ---------- beds : list List of paths to bed files or BedTool objects. beds_sorted : boolean Whether the bed files in beds are already sorted. If False, all bed files in beds will be sorted. postmerge : boolean Whether to merge intervals after combining beds together. Returns ------- out : pybedtools.BedTool New sorted BedTool with intervals from all input beds. """ beds = copy.deepcopy(beds) for i,v in enumerate(beds): if type(v) == str: beds[i] = pbt.BedTool(v) if not beds_sorted: beds[i] = beds[i].sort() # For some reason, doing the merging in the reduce statement doesn't work. I # think this might be a pybedtools bug. In any fashion, I can merge # afterward although I think it makes a performance hit because the combined # bed file grows larger than it needs to. out = reduce(lambda x,y : x.cat(y, postmerge=False), beds) out = out.sort() if postmerge: out = out.merge() return out
[ "def", "combine", "(", "beds", ",", "beds_sorted", "=", "False", ",", "postmerge", "=", "True", ")", ":", "beds", "=", "copy", ".", "deepcopy", "(", "beds", ")", "for", "i", ",", "v", "in", "enumerate", "(", "beds", ")", ":", "if", "type", "(", "v", ")", "==", "str", ":", "beds", "[", "i", "]", "=", "pbt", ".", "BedTool", "(", "v", ")", "if", "not", "beds_sorted", ":", "beds", "[", "i", "]", "=", "beds", "[", "i", "]", ".", "sort", "(", ")", "# For some reason, doing the merging in the reduce statement doesn't work. I", "# think this might be a pybedtools bug. In any fashion, I can merge", "# afterward although I think it makes a performance hit because the combined", "# bed file grows larger than it needs to.", "out", "=", "reduce", "(", "lambda", "x", ",", "y", ":", "x", ".", "cat", "(", "y", ",", "postmerge", "=", "False", ")", ",", "beds", ")", "out", "=", "out", ".", "sort", "(", ")", "if", "postmerge", ":", "out", "=", "out", ".", "merge", "(", ")", "return", "out" ]
Combine a list of bed files or BedTool objects into a single BedTool object. Parameters ---------- beds : list List of paths to bed files or BedTool objects. beds_sorted : boolean Whether the bed files in beds are already sorted. If False, all bed files in beds will be sorted. postmerge : boolean Whether to merge intervals after combining beds together. Returns ------- out : pybedtools.BedTool New sorted BedTool with intervals from all input beds.
[ "Combine", "a", "list", "of", "bed", "files", "or", "BedTool", "objects", "into", "a", "single", "BedTool", "object", "." ]
38efdf0e11d01bc00a135921cb91a19c03db5d5c
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/bedtools.py#L348-L385
240,919
cdeboever3/cdpybio
cdpybio/bedtools.py
strip_chr
def strip_chr(bt): """Strip 'chr' from chromosomes for BedTool object Parameters ---------- bt : pybedtools.BedTool BedTool to strip 'chr' from. Returns ------- out : pybedtools.BedTool New BedTool with 'chr' stripped from chromosome names. """ try: df = pd.read_table(bt.fn, header=None, dtype=str) # If the try fails, I assume that's because the file has a trackline. Note # that I don't preserve the trackline (I'm not sure how pybedtools keeps # track of it anyway). except pd.parser.CParserError: df = pd.read_table(bt.fn, header=None, skiprows=1, dtype=str) df[0] = df[0].apply(lambda x: x[3:]) s = '\n'.join(df.astype(str).apply(lambda x: '\t'.join(x), axis=1)) + '\n' out = pbt.BedTool(s, from_string=True) return out
python
def strip_chr(bt): """Strip 'chr' from chromosomes for BedTool object Parameters ---------- bt : pybedtools.BedTool BedTool to strip 'chr' from. Returns ------- out : pybedtools.BedTool New BedTool with 'chr' stripped from chromosome names. """ try: df = pd.read_table(bt.fn, header=None, dtype=str) # If the try fails, I assume that's because the file has a trackline. Note # that I don't preserve the trackline (I'm not sure how pybedtools keeps # track of it anyway). except pd.parser.CParserError: df = pd.read_table(bt.fn, header=None, skiprows=1, dtype=str) df[0] = df[0].apply(lambda x: x[3:]) s = '\n'.join(df.astype(str).apply(lambda x: '\t'.join(x), axis=1)) + '\n' out = pbt.BedTool(s, from_string=True) return out
[ "def", "strip_chr", "(", "bt", ")", ":", "try", ":", "df", "=", "pd", ".", "read_table", "(", "bt", ".", "fn", ",", "header", "=", "None", ",", "dtype", "=", "str", ")", "# If the try fails, I assume that's because the file has a trackline. Note", "# that I don't preserve the trackline (I'm not sure how pybedtools keeps", "# track of it anyway).", "except", "pd", ".", "parser", ".", "CParserError", ":", "df", "=", "pd", ".", "read_table", "(", "bt", ".", "fn", ",", "header", "=", "None", ",", "skiprows", "=", "1", ",", "dtype", "=", "str", ")", "df", "[", "0", "]", "=", "df", "[", "0", "]", ".", "apply", "(", "lambda", "x", ":", "x", "[", "3", ":", "]", ")", "s", "=", "'\\n'", ".", "join", "(", "df", ".", "astype", "(", "str", ")", ".", "apply", "(", "lambda", "x", ":", "'\\t'", ".", "join", "(", "x", ")", ",", "axis", "=", "1", ")", ")", "+", "'\\n'", "out", "=", "pbt", ".", "BedTool", "(", "s", ",", "from_string", "=", "True", ")", "return", "out" ]
Strip 'chr' from chromosomes for BedTool object Parameters ---------- bt : pybedtools.BedTool BedTool to strip 'chr' from. Returns ------- out : pybedtools.BedTool New BedTool with 'chr' stripped from chromosome names.
[ "Strip", "chr", "from", "chromosomes", "for", "BedTool", "object" ]
38efdf0e11d01bc00a135921cb91a19c03db5d5c
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/bedtools.py#L414-L438
240,920
cdeboever3/cdpybio
cdpybio/bedtools.py
AnnotatedInteractions.load_saved_bts
def load_saved_bts(self): """If the AnnotatedInteractions object was saved to a pickle and reloaded, this method remakes the BedTool objects.""" if self._bt1_path: self.bt1 = pbt.BedTool(self._bt1_path) if self._bt2_path: self.bt2 = pbt.BedTool(self._bt2_path) if self._bt_loop_path: self.bt_loop = pbt.BedTool(self._bt_loop_path) if self._bt_loop_inner_path: self.bt_loop_inner = pbt.BedTool(self._bt_loop_inner_path)
python
def load_saved_bts(self): """If the AnnotatedInteractions object was saved to a pickle and reloaded, this method remakes the BedTool objects.""" if self._bt1_path: self.bt1 = pbt.BedTool(self._bt1_path) if self._bt2_path: self.bt2 = pbt.BedTool(self._bt2_path) if self._bt_loop_path: self.bt_loop = pbt.BedTool(self._bt_loop_path) if self._bt_loop_inner_path: self.bt_loop_inner = pbt.BedTool(self._bt_loop_inner_path)
[ "def", "load_saved_bts", "(", "self", ")", ":", "if", "self", ".", "_bt1_path", ":", "self", ".", "bt1", "=", "pbt", ".", "BedTool", "(", "self", ".", "_bt1_path", ")", "if", "self", ".", "_bt2_path", ":", "self", ".", "bt2", "=", "pbt", ".", "BedTool", "(", "self", ".", "_bt2_path", ")", "if", "self", ".", "_bt_loop_path", ":", "self", ".", "bt_loop", "=", "pbt", ".", "BedTool", "(", "self", ".", "_bt_loop_path", ")", "if", "self", ".", "_bt_loop_inner_path", ":", "self", ".", "bt_loop_inner", "=", "pbt", ".", "BedTool", "(", "self", ".", "_bt_loop_inner_path", ")" ]
If the AnnotatedInteractions object was saved to a pickle and reloaded, this method remakes the BedTool objects.
[ "If", "the", "AnnotatedInteractions", "object", "was", "saved", "to", "a", "pickle", "and", "reloaded", "this", "method", "remakes", "the", "BedTool", "objects", "." ]
38efdf0e11d01bc00a135921cb91a19c03db5d5c
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/bedtools.py#L89-L99
240,921
inveniosoftware-contrib/record-recommender
record_recommender/storage.py
File.delete
def delete(self): """Delete the file.""" self.close() if self.does_file_exist(): os.remove(self.path)
python
def delete(self): """Delete the file.""" self.close() if self.does_file_exist(): os.remove(self.path)
[ "def", "delete", "(", "self", ")", ":", "self", ".", "close", "(", ")", "if", "self", ".", "does_file_exist", "(", ")", ":", "os", ".", "remove", "(", "self", ".", "path", ")" ]
Delete the file.
[ "Delete", "the", "file", "." ]
07f71e783369e6373218b5e6ba0bf15901e9251a
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/storage.py#L63-L67
240,922
inveniosoftware-contrib/record-recommender
record_recommender/storage.py
File.open
def open(self, mode='read'): """Open the file.""" if self.file: self.close() raise 'Close file before opening.' if mode == 'write': self.file = open(self.path, 'w') elif mode == 'overwrite': # Delete file if exist. self.file = open(self.path, 'w+') else: # Open for reading. self.file = open(self.path, 'r') self._csv = csv.DictWriter(self.file, fieldnames=self.fields, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL, extrasaction='ignore') if self.file.tell() == 0: self._csv.writeheader()
python
def open(self, mode='read'): """Open the file.""" if self.file: self.close() raise 'Close file before opening.' if mode == 'write': self.file = open(self.path, 'w') elif mode == 'overwrite': # Delete file if exist. self.file = open(self.path, 'w+') else: # Open for reading. self.file = open(self.path, 'r') self._csv = csv.DictWriter(self.file, fieldnames=self.fields, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL, extrasaction='ignore') if self.file.tell() == 0: self._csv.writeheader()
[ "def", "open", "(", "self", ",", "mode", "=", "'read'", ")", ":", "if", "self", ".", "file", ":", "self", ".", "close", "(", ")", "raise", "'Close file before opening.'", "if", "mode", "==", "'write'", ":", "self", ".", "file", "=", "open", "(", "self", ".", "path", ",", "'w'", ")", "elif", "mode", "==", "'overwrite'", ":", "# Delete file if exist.", "self", ".", "file", "=", "open", "(", "self", ".", "path", ",", "'w+'", ")", "else", ":", "# Open for reading.", "self", ".", "file", "=", "open", "(", "self", ".", "path", ",", "'r'", ")", "self", ".", "_csv", "=", "csv", ".", "DictWriter", "(", "self", ".", "file", ",", "fieldnames", "=", "self", ".", "fields", ",", "delimiter", "=", "','", ",", "quotechar", "=", "'|'", ",", "quoting", "=", "csv", ".", "QUOTE_MINIMAL", ",", "extrasaction", "=", "'ignore'", ")", "if", "self", ".", "file", ".", "tell", "(", ")", "==", "0", ":", "self", ".", "_csv", ".", "writeheader", "(", ")" ]
Open the file.
[ "Open", "the", "file", "." ]
07f71e783369e6373218b5e6ba0bf15901e9251a
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/storage.py#L73-L94
240,923
inveniosoftware-contrib/record-recommender
record_recommender/storage.py
RawEvents.add_hit
def add_hit(self, hit): """Add a hit to the file.""" if not self._csv: raise 'Open before write' self._csv.writerow(hit) self.number_of_hits += 1 # Todo: check performance for timestamp check # assert self._path == self.get_filename_by_timestamp(timestamp) timestamp = hit['timestamp'] if self.latest_timestamp <= timestamp: self.latest_timestamp = timestamp
python
def add_hit(self, hit): """Add a hit to the file.""" if not self._csv: raise 'Open before write' self._csv.writerow(hit) self.number_of_hits += 1 # Todo: check performance for timestamp check # assert self._path == self.get_filename_by_timestamp(timestamp) timestamp = hit['timestamp'] if self.latest_timestamp <= timestamp: self.latest_timestamp = timestamp
[ "def", "add_hit", "(", "self", ",", "hit", ")", ":", "if", "not", "self", ".", "_csv", ":", "raise", "'Open before write'", "self", ".", "_csv", ".", "writerow", "(", "hit", ")", "self", ".", "number_of_hits", "+=", "1", "# Todo: check performance for timestamp check", "# assert self._path == self.get_filename_by_timestamp(timestamp)", "timestamp", "=", "hit", "[", "'timestamp'", "]", "if", "self", ".", "latest_timestamp", "<=", "timestamp", ":", "self", ".", "latest_timestamp", "=", "timestamp" ]
Add a hit to the file.
[ "Add", "a", "hit", "to", "the", "file", "." ]
07f71e783369e6373218b5e6ba0bf15901e9251a
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/storage.py#L116-L126
240,924
inveniosoftware-contrib/record-recommender
record_recommender/storage.py
RawEvents.get_records
def get_records(self): """ Get all stored records. Returns: (timestamp, user, recid,...) """ self.close() with open(self.path, 'r') as filep: first_line = filep.readline().split(',') if first_line[0] != self.fields[0]: yield first_line for line in filep: yield line.split(',')
python
def get_records(self): """ Get all stored records. Returns: (timestamp, user, recid,...) """ self.close() with open(self.path, 'r') as filep: first_line = filep.readline().split(',') if first_line[0] != self.fields[0]: yield first_line for line in filep: yield line.split(',')
[ "def", "get_records", "(", "self", ")", ":", "self", ".", "close", "(", ")", "with", "open", "(", "self", ".", "path", ",", "'r'", ")", "as", "filep", ":", "first_line", "=", "filep", ".", "readline", "(", ")", ".", "split", "(", "','", ")", "if", "first_line", "[", "0", "]", "!=", "self", ".", "fields", "[", "0", "]", ":", "yield", "first_line", "for", "line", "in", "filep", ":", "yield", "line", ".", "split", "(", "','", ")" ]
Get all stored records. Returns: (timestamp, user, recid,...)
[ "Get", "all", "stored", "records", "." ]
07f71e783369e6373218b5e6ba0bf15901e9251a
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/storage.py#L128-L141
240,925
inveniosoftware-contrib/record-recommender
record_recommender/storage.py
UserProfiles.add_user
def add_user(self, uid, nodes, weights): """Add a user.""" for i, node in enumerate(nodes): self.file.write("{},{},{}\n".format(uid, node, weights[i]))
python
def add_user(self, uid, nodes, weights): """Add a user.""" for i, node in enumerate(nodes): self.file.write("{},{},{}\n".format(uid, node, weights[i]))
[ "def", "add_user", "(", "self", ",", "uid", ",", "nodes", ",", "weights", ")", ":", "for", "i", ",", "node", "in", "enumerate", "(", "nodes", ")", ":", "self", ".", "file", ".", "write", "(", "\"{},{},{}\\n\"", ".", "format", "(", "uid", ",", "node", ",", "weights", "[", "i", "]", ")", ")" ]
Add a user.
[ "Add", "a", "user", "." ]
07f71e783369e6373218b5e6ba0bf15901e9251a
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/storage.py#L152-L155
240,926
inveniosoftware-contrib/record-recommender
record_recommender/storage.py
RedisStore.get
def get(self, key, default=None): """Get a key.""" key = "{0}{1}".format(self.prefix, key) data = self.redis.get(key) # Redis returns None not an exception if data is None: data = default else: data = json.loads(data) return data
python
def get(self, key, default=None): """Get a key.""" key = "{0}{1}".format(self.prefix, key) data = self.redis.get(key) # Redis returns None not an exception if data is None: data = default else: data = json.loads(data) return data
[ "def", "get", "(", "self", ",", "key", ",", "default", "=", "None", ")", ":", "key", "=", "\"{0}{1}\"", ".", "format", "(", "self", ".", "prefix", ",", "key", ")", "data", "=", "self", ".", "redis", ".", "get", "(", "key", ")", "# Redis returns None not an exception", "if", "data", "is", "None", ":", "data", "=", "default", "else", ":", "data", "=", "json", ".", "loads", "(", "data", ")", "return", "data" ]
Get a key.
[ "Get", "a", "key", "." ]
07f71e783369e6373218b5e6ba0bf15901e9251a
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/storage.py#L182-L193
240,927
inveniosoftware-contrib/record-recommender
record_recommender/storage.py
RedisStore.set
def set(self, key, value): """Set a key, value pair.""" key = "{0}{1}".format(self.prefix, key) value = json.dumps(value, cls=NumpyEncoder) self.redis.set(key, value)
python
def set(self, key, value): """Set a key, value pair.""" key = "{0}{1}".format(self.prefix, key) value = json.dumps(value, cls=NumpyEncoder) self.redis.set(key, value)
[ "def", "set", "(", "self", ",", "key", ",", "value", ")", ":", "key", "=", "\"{0}{1}\"", ".", "format", "(", "self", ".", "prefix", ",", "key", ")", "value", "=", "json", ".", "dumps", "(", "value", ",", "cls", "=", "NumpyEncoder", ")", "self", ".", "redis", ".", "set", "(", "key", ",", "value", ")" ]
Set a key, value pair.
[ "Set", "a", "key", "value", "pair", "." ]
07f71e783369e6373218b5e6ba0bf15901e9251a
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/storage.py#L195-L200
240,928
inveniosoftware-contrib/record-recommender
record_recommender/storage.py
FileStore.get_by_timestamp
def get_by_timestamp(self, prefix, timestamp): """Get the cache file to a given timestamp.""" year, week = get_year_week(timestamp) return self.get(prefix, year, week)
python
def get_by_timestamp(self, prefix, timestamp): """Get the cache file to a given timestamp.""" year, week = get_year_week(timestamp) return self.get(prefix, year, week)
[ "def", "get_by_timestamp", "(", "self", ",", "prefix", ",", "timestamp", ")", ":", "year", ",", "week", "=", "get_year_week", "(", "timestamp", ")", "return", "self", ".", "get", "(", "prefix", ",", "year", ",", "week", ")" ]
Get the cache file to a given timestamp.
[ "Get", "the", "cache", "file", "to", "a", "given", "timestamp", "." ]
07f71e783369e6373218b5e6ba0bf15901e9251a
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/storage.py#L242-L245
240,929
inveniosoftware-contrib/record-recommender
record_recommender/storage.py
FileStore.get
def get(self, prefix, year, week): """Get the cache file.""" filename = self._format_filename(prefix, year, week) return RawEvents(filename, prefix, year, week)
python
def get(self, prefix, year, week): """Get the cache file.""" filename = self._format_filename(prefix, year, week) return RawEvents(filename, prefix, year, week)
[ "def", "get", "(", "self", ",", "prefix", ",", "year", ",", "week", ")", ":", "filename", "=", "self", ".", "_format_filename", "(", "prefix", ",", "year", ",", "week", ")", "return", "RawEvents", "(", "filename", ",", "prefix", ",", "year", ",", "week", ")" ]
Get the cache file.
[ "Get", "the", "cache", "file", "." ]
07f71e783369e6373218b5e6ba0bf15901e9251a
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/storage.py#L247-L250
240,930
inveniosoftware-contrib/record-recommender
record_recommender/storage.py
FileStore.get_user_profiles
def get_user_profiles(self, prefix): """Get the user profil from the cache to the given prefix.""" filepath = "{}{}".format(self.base_path, prefix) return UserProfiles(filepath, prefix)
python
def get_user_profiles(self, prefix): """Get the user profil from the cache to the given prefix.""" filepath = "{}{}".format(self.base_path, prefix) return UserProfiles(filepath, prefix)
[ "def", "get_user_profiles", "(", "self", ",", "prefix", ")", ":", "filepath", "=", "\"{}{}\"", ".", "format", "(", "self", ".", "base_path", ",", "prefix", ")", "return", "UserProfiles", "(", "filepath", ",", "prefix", ")" ]
Get the user profil from the cache to the given prefix.
[ "Get", "the", "user", "profil", "from", "the", "cache", "to", "the", "given", "prefix", "." ]
07f71e783369e6373218b5e6ba0bf15901e9251a
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/storage.py#L252-L255
240,931
inveniosoftware-contrib/record-recommender
record_recommender/storage.py
FileStore._format_filename
def _format_filename(self, prefix, year, week): """Construct the file name based on the path and options.""" return "{}{}_{}-{}.csv".format(self.base_path, prefix, year, week)
python
def _format_filename(self, prefix, year, week): """Construct the file name based on the path and options.""" return "{}{}_{}-{}.csv".format(self.base_path, prefix, year, week)
[ "def", "_format_filename", "(", "self", ",", "prefix", ",", "year", ",", "week", ")", ":", "return", "\"{}{}_{}-{}.csv\"", ".", "format", "(", "self", ".", "base_path", ",", "prefix", ",", "year", ",", "week", ")" ]
Construct the file name based on the path and options.
[ "Construct", "the", "file", "name", "based", "on", "the", "path", "and", "options", "." ]
07f71e783369e6373218b5e6ba0bf15901e9251a
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/storage.py#L257-L259
240,932
inveniosoftware-contrib/record-recommender
record_recommender/storage.py
FileStore.get_recommendation_store
def get_recommendation_store(self): """Get the configured recommendation store.""" return RedisStore(self.config['host'], self.config['port'], self.config['db'], self.config['prefix'])
python
def get_recommendation_store(self): """Get the configured recommendation store.""" return RedisStore(self.config['host'], self.config['port'], self.config['db'], self.config['prefix'])
[ "def", "get_recommendation_store", "(", "self", ")", ":", "return", "RedisStore", "(", "self", ".", "config", "[", "'host'", "]", ",", "self", ".", "config", "[", "'port'", "]", ",", "self", ".", "config", "[", "'db'", "]", ",", "self", ".", "config", "[", "'prefix'", "]", ")" ]
Get the configured recommendation store.
[ "Get", "the", "configured", "recommendation", "store", "." ]
07f71e783369e6373218b5e6ba0bf15901e9251a
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/storage.py#L261-L266
240,933
esterhui/pypu
pypu/pusher.py
status._deleteFile
def _deleteFile(self,directory,fn,dentry,db,service): """Deletets file and changes status to '?' if no more services manages the file """ # FIXME : can switch back to only managing once service # at a time logger.debug("%s - Deleting"%(fn)) if fn not in db: print("%s - rm: Not in DB, can't remove !"%(fn)) return False # Build up list of names servicenames=db[fn]['services'].keys() # If service is none, build list of all services # to perform this action on if service is None: servicelist=servicenames else: servicelist=[service] for service in servicelist: if not db[fn]['services'].has_key(service): print("%s - Can't delete, service [%s] unknown"%(service)) continue if db[fn]['services'][service]['status']!=self.ST_DELETED: print("%s - rm: Can't remove file with non 'D' status (%s)!"\ %(fn,service)) continue # Only change status if correctly deleted if self.sman.GetServiceObj(service).Remove(directory,fn): # Delete our service entry del db[fn]['services'][service] logger.debug('%s - deleted by service: %s'%(fn,service)) else: logger.error('%s - Failed to delete by service: %s'%(fn,service)) continue # Delete whole entry if no services manage it any more if len(db[fn]['services'].keys())==0: del db[fn] return True
python
def _deleteFile(self,directory,fn,dentry,db,service): """Deletets file and changes status to '?' if no more services manages the file """ # FIXME : can switch back to only managing once service # at a time logger.debug("%s - Deleting"%(fn)) if fn not in db: print("%s - rm: Not in DB, can't remove !"%(fn)) return False # Build up list of names servicenames=db[fn]['services'].keys() # If service is none, build list of all services # to perform this action on if service is None: servicelist=servicenames else: servicelist=[service] for service in servicelist: if not db[fn]['services'].has_key(service): print("%s - Can't delete, service [%s] unknown"%(service)) continue if db[fn]['services'][service]['status']!=self.ST_DELETED: print("%s - rm: Can't remove file with non 'D' status (%s)!"\ %(fn,service)) continue # Only change status if correctly deleted if self.sman.GetServiceObj(service).Remove(directory,fn): # Delete our service entry del db[fn]['services'][service] logger.debug('%s - deleted by service: %s'%(fn,service)) else: logger.error('%s - Failed to delete by service: %s'%(fn,service)) continue # Delete whole entry if no services manage it any more if len(db[fn]['services'].keys())==0: del db[fn] return True
[ "def", "_deleteFile", "(", "self", ",", "directory", ",", "fn", ",", "dentry", ",", "db", ",", "service", ")", ":", "# FIXME : can switch back to only managing once service", "# at a time", "logger", ".", "debug", "(", "\"%s - Deleting\"", "%", "(", "fn", ")", ")", "if", "fn", "not", "in", "db", ":", "print", "(", "\"%s - rm: Not in DB, can't remove !\"", "%", "(", "fn", ")", ")", "return", "False", "# Build up list of names", "servicenames", "=", "db", "[", "fn", "]", "[", "'services'", "]", ".", "keys", "(", ")", "# If service is none, build list of all services", "# to perform this action on", "if", "service", "is", "None", ":", "servicelist", "=", "servicenames", "else", ":", "servicelist", "=", "[", "service", "]", "for", "service", "in", "servicelist", ":", "if", "not", "db", "[", "fn", "]", "[", "'services'", "]", ".", "has_key", "(", "service", ")", ":", "print", "(", "\"%s - Can't delete, service [%s] unknown\"", "%", "(", "service", ")", ")", "continue", "if", "db", "[", "fn", "]", "[", "'services'", "]", "[", "service", "]", "[", "'status'", "]", "!=", "self", ".", "ST_DELETED", ":", "print", "(", "\"%s - rm: Can't remove file with non 'D' status (%s)!\"", "%", "(", "fn", ",", "service", ")", ")", "continue", "# Only change status if correctly deleted", "if", "self", ".", "sman", ".", "GetServiceObj", "(", "service", ")", ".", "Remove", "(", "directory", ",", "fn", ")", ":", "# Delete our service entry", "del", "db", "[", "fn", "]", "[", "'services'", "]", "[", "service", "]", "logger", ".", "debug", "(", "'%s - deleted by service: %s'", "%", "(", "fn", ",", "service", ")", ")", "else", ":", "logger", ".", "error", "(", "'%s - Failed to delete by service: %s'", "%", "(", "fn", ",", "service", ")", ")", "continue", "# Delete whole entry if no services manage it any more", "if", "len", "(", "db", "[", "fn", "]", "[", "'services'", "]", ".", "keys", "(", ")", ")", "==", "0", ":", "del", "db", "[", "fn", "]", "return", "True" ]
Deletets file and changes status to '?' if no more services manages the file
[ "Deletets", "file", "and", "changes", "status", "to", "?", "if", "no", "more", "services", "manages", "the", "file" ]
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/pusher.py#L219-L263
240,934
esterhui/pypu
pypu/pusher.py
status._uploadFile
def _uploadFile(self,directory,fn,dentry,db,service): """Uploads file and changes status to 'S'. Looks up service name with service string """ # Create a hash of the file if fn not in db: print("%s - Not in DB, must run 'add' first!"%(fn)) else: # If already added, see if it's modified, only then # do another upload if db[fn]['services'][service]['status']==self.ST_UPTODATE: if not dentry['status']==self.ST_MODIFIED: logger.info("%s - Up to date, skipping (%s)!"\ %(fn,service)) return False sobj=self.sman.GetServiceObj(service) # If nobody manages this file, just skip it if (not sobj): print("%s - Upload: No service of name [%s] found"%(fn,service)) return # Only change status if correctly uploaded if sobj.Upload(directory,fn): # Write newest mtime/hash to indicate all is well db[fn]['mtime']=dentry['mtime'] db[fn]['hash']=self._hashfile(os.path.join(directory,fn)) db[fn]['services'][service]['status']=self.ST_UPTODATE logger.debug('%s - uploaded by service: %s'%(fn,sobj.GetName())) return True else: logger.error('%s - Failed to upload by service: %s'%(fn,sobj.GetName())) return False return False
python
def _uploadFile(self,directory,fn,dentry,db,service): """Uploads file and changes status to 'S'. Looks up service name with service string """ # Create a hash of the file if fn not in db: print("%s - Not in DB, must run 'add' first!"%(fn)) else: # If already added, see if it's modified, only then # do another upload if db[fn]['services'][service]['status']==self.ST_UPTODATE: if not dentry['status']==self.ST_MODIFIED: logger.info("%s - Up to date, skipping (%s)!"\ %(fn,service)) return False sobj=self.sman.GetServiceObj(service) # If nobody manages this file, just skip it if (not sobj): print("%s - Upload: No service of name [%s] found"%(fn,service)) return # Only change status if correctly uploaded if sobj.Upload(directory,fn): # Write newest mtime/hash to indicate all is well db[fn]['mtime']=dentry['mtime'] db[fn]['hash']=self._hashfile(os.path.join(directory,fn)) db[fn]['services'][service]['status']=self.ST_UPTODATE logger.debug('%s - uploaded by service: %s'%(fn,sobj.GetName())) return True else: logger.error('%s - Failed to upload by service: %s'%(fn,sobj.GetName())) return False return False
[ "def", "_uploadFile", "(", "self", ",", "directory", ",", "fn", ",", "dentry", ",", "db", ",", "service", ")", ":", "# Create a hash of the file", "if", "fn", "not", "in", "db", ":", "print", "(", "\"%s - Not in DB, must run 'add' first!\"", "%", "(", "fn", ")", ")", "else", ":", "# If already added, see if it's modified, only then", "# do another upload", "if", "db", "[", "fn", "]", "[", "'services'", "]", "[", "service", "]", "[", "'status'", "]", "==", "self", ".", "ST_UPTODATE", ":", "if", "not", "dentry", "[", "'status'", "]", "==", "self", ".", "ST_MODIFIED", ":", "logger", ".", "info", "(", "\"%s - Up to date, skipping (%s)!\"", "%", "(", "fn", ",", "service", ")", ")", "return", "False", "sobj", "=", "self", ".", "sman", ".", "GetServiceObj", "(", "service", ")", "# If nobody manages this file, just skip it", "if", "(", "not", "sobj", ")", ":", "print", "(", "\"%s - Upload: No service of name [%s] found\"", "%", "(", "fn", ",", "service", ")", ")", "return", "# Only change status if correctly uploaded", "if", "sobj", ".", "Upload", "(", "directory", ",", "fn", ")", ":", "# Write newest mtime/hash to indicate all is well", "db", "[", "fn", "]", "[", "'mtime'", "]", "=", "dentry", "[", "'mtime'", "]", "db", "[", "fn", "]", "[", "'hash'", "]", "=", "self", ".", "_hashfile", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "fn", ")", ")", "db", "[", "fn", "]", "[", "'services'", "]", "[", "service", "]", "[", "'status'", "]", "=", "self", ".", "ST_UPTODATE", "logger", ".", "debug", "(", "'%s - uploaded by service: %s'", "%", "(", "fn", ",", "sobj", ".", "GetName", "(", ")", ")", ")", "return", "True", "else", ":", "logger", ".", "error", "(", "'%s - Failed to upload by service: %s'", "%", "(", "fn", ",", "sobj", ".", "GetName", "(", ")", ")", ")", "return", "False", "return", "False" ]
Uploads file and changes status to 'S'. Looks up service name with service string
[ "Uploads", "file", "and", "changes", "status", "to", "S", ".", "Looks", "up", "service", "name", "with", "service", "string" ]
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/pusher.py#L266-L300
240,935
esterhui/pypu
pypu/pusher.py
status._updateToDeleted
def _updateToDeleted(self,directory,fn,dentry,db,service): """Changes to status to 'D' as long as a handler exists, directory - DIR where stuff is happening fn - File name to be added dentry - dictionary entry as returned by GetStatus for this file db - pusher DB for this directory service - service to delete, None means all """ # Create a hash of the file if fn not in db: print("%s - rm: not in DB, skipping!"%(fn)) return services=self.sman.GetServices(fn) # If nobody manages this file, just skip it if (not services): print("%s - no manger of this file type found"%(fn)) return if service: if db[fn]['services'].has_key(service): db[fn]['services'][service]['status']=self.ST_DELETED else: print("%s - Service %s doesn't exist, can't delete"%(fn,service)) return # If we get here it means all services should delete for service in db[fn]['services']: db[fn]['services'][service]['status']=self.ST_DELETED return
python
def _updateToDeleted(self,directory,fn,dentry,db,service): """Changes to status to 'D' as long as a handler exists, directory - DIR where stuff is happening fn - File name to be added dentry - dictionary entry as returned by GetStatus for this file db - pusher DB for this directory service - service to delete, None means all """ # Create a hash of the file if fn not in db: print("%s - rm: not in DB, skipping!"%(fn)) return services=self.sman.GetServices(fn) # If nobody manages this file, just skip it if (not services): print("%s - no manger of this file type found"%(fn)) return if service: if db[fn]['services'].has_key(service): db[fn]['services'][service]['status']=self.ST_DELETED else: print("%s - Service %s doesn't exist, can't delete"%(fn,service)) return # If we get here it means all services should delete for service in db[fn]['services']: db[fn]['services'][service]['status']=self.ST_DELETED return
[ "def", "_updateToDeleted", "(", "self", ",", "directory", ",", "fn", ",", "dentry", ",", "db", ",", "service", ")", ":", "# Create a hash of the file", "if", "fn", "not", "in", "db", ":", "print", "(", "\"%s - rm: not in DB, skipping!\"", "%", "(", "fn", ")", ")", "return", "services", "=", "self", ".", "sman", ".", "GetServices", "(", "fn", ")", "# If nobody manages this file, just skip it", "if", "(", "not", "services", ")", ":", "print", "(", "\"%s - no manger of this file type found\"", "%", "(", "fn", ")", ")", "return", "if", "service", ":", "if", "db", "[", "fn", "]", "[", "'services'", "]", ".", "has_key", "(", "service", ")", ":", "db", "[", "fn", "]", "[", "'services'", "]", "[", "service", "]", "[", "'status'", "]", "=", "self", ".", "ST_DELETED", "else", ":", "print", "(", "\"%s - Service %s doesn't exist, can't delete\"", "%", "(", "fn", ",", "service", ")", ")", "return", "# If we get here it means all services should delete", "for", "service", "in", "db", "[", "fn", "]", "[", "'services'", "]", ":", "db", "[", "fn", "]", "[", "'services'", "]", "[", "service", "]", "[", "'status'", "]", "=", "self", ".", "ST_DELETED", "return" ]
Changes to status to 'D' as long as a handler exists, directory - DIR where stuff is happening fn - File name to be added dentry - dictionary entry as returned by GetStatus for this file db - pusher DB for this directory service - service to delete, None means all
[ "Changes", "to", "status", "to", "D", "as", "long", "as", "a", "handler", "exists", "directory", "-", "DIR", "where", "stuff", "is", "happening", "fn", "-", "File", "name", "to", "be", "added", "dentry", "-", "dictionary", "entry", "as", "returned", "by", "GetStatus", "for", "this", "file", "db", "-", "pusher", "DB", "for", "this", "directory", "service", "-", "service", "to", "delete", "None", "means", "all" ]
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/pusher.py#L302-L333
240,936
esterhui/pypu
pypu/pusher.py
status._updateToAdded
def _updateToAdded(self,directory,fn,dentry,db,service): """Changes to status to 'A' as long as a handler exists, also generates a hash directory - DIR where stuff is happening fn - File name to be added dentry - dictionary entry as returned by GetStatus for this file db - pusher DB for this directory service - None means all services, otherwise looks for service """ services=self.sman.GetServices(fn) # If nobody manages this file, just skip it if services is None: print("%s - No services handle this file" %(fn)) return # Build up list of names servicenames=[] for s in services: servicenames.append(s.GetName()) if service is not None and service not in servicenames: print("%s - Requested service (%s) not available for this file"\ %(fn,service)) return # If service is none, build list of all services # to perform this action on if service is None: servicelist=servicenames else: servicelist=[service] if not db.has_key(fn): # Since this is a new entry, populate with stuff # we got from GetSatus for this file (usually mtime) db[fn]=dentry del db[fn]['status'] # Delete this key we're not using db[fn]['services']={} # Empty dictionary of services # that manages this file + status # Now add the hash db[fn]['hash']=self._hashfile(os.path.join(directory,fn)) # Now run through services and see if we should # perform actions for service in servicelist: if not db[fn]['services'].has_key(service): db[fn]['services'][service]={} db[fn]['services'][service]['status']=self.ST_ADDED else: print("%s - Already managed by service %s, maybe do a 'push'?"\ %(fn,service)) logger.info('%s - managers: %s'%(fn,db[fn]['services'].keys())) return
python
def _updateToAdded(self,directory,fn,dentry,db,service): """Changes to status to 'A' as long as a handler exists, also generates a hash directory - DIR where stuff is happening fn - File name to be added dentry - dictionary entry as returned by GetStatus for this file db - pusher DB for this directory service - None means all services, otherwise looks for service """ services=self.sman.GetServices(fn) # If nobody manages this file, just skip it if services is None: print("%s - No services handle this file" %(fn)) return # Build up list of names servicenames=[] for s in services: servicenames.append(s.GetName()) if service is not None and service not in servicenames: print("%s - Requested service (%s) not available for this file"\ %(fn,service)) return # If service is none, build list of all services # to perform this action on if service is None: servicelist=servicenames else: servicelist=[service] if not db.has_key(fn): # Since this is a new entry, populate with stuff # we got from GetSatus for this file (usually mtime) db[fn]=dentry del db[fn]['status'] # Delete this key we're not using db[fn]['services']={} # Empty dictionary of services # that manages this file + status # Now add the hash db[fn]['hash']=self._hashfile(os.path.join(directory,fn)) # Now run through services and see if we should # perform actions for service in servicelist: if not db[fn]['services'].has_key(service): db[fn]['services'][service]={} db[fn]['services'][service]['status']=self.ST_ADDED else: print("%s - Already managed by service %s, maybe do a 'push'?"\ %(fn,service)) logger.info('%s - managers: %s'%(fn,db[fn]['services'].keys())) return
[ "def", "_updateToAdded", "(", "self", ",", "directory", ",", "fn", ",", "dentry", ",", "db", ",", "service", ")", ":", "services", "=", "self", ".", "sman", ".", "GetServices", "(", "fn", ")", "# If nobody manages this file, just skip it", "if", "services", "is", "None", ":", "print", "(", "\"%s - No services handle this file\"", "%", "(", "fn", ")", ")", "return", "# Build up list of names", "servicenames", "=", "[", "]", "for", "s", "in", "services", ":", "servicenames", ".", "append", "(", "s", ".", "GetName", "(", ")", ")", "if", "service", "is", "not", "None", "and", "service", "not", "in", "servicenames", ":", "print", "(", "\"%s - Requested service (%s) not available for this file\"", "%", "(", "fn", ",", "service", ")", ")", "return", "# If service is none, build list of all services", "# to perform this action on", "if", "service", "is", "None", ":", "servicelist", "=", "servicenames", "else", ":", "servicelist", "=", "[", "service", "]", "if", "not", "db", ".", "has_key", "(", "fn", ")", ":", "# Since this is a new entry, populate with stuff", "# we got from GetSatus for this file (usually mtime)", "db", "[", "fn", "]", "=", "dentry", "del", "db", "[", "fn", "]", "[", "'status'", "]", "# Delete this key we're not using", "db", "[", "fn", "]", "[", "'services'", "]", "=", "{", "}", "# Empty dictionary of services", "# that manages this file + status", "# Now add the hash", "db", "[", "fn", "]", "[", "'hash'", "]", "=", "self", ".", "_hashfile", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "fn", ")", ")", "# Now run through services and see if we should", "# perform actions", "for", "service", "in", "servicelist", ":", "if", "not", "db", "[", "fn", "]", "[", "'services'", "]", ".", "has_key", "(", "service", ")", ":", "db", "[", "fn", "]", "[", "'services'", "]", "[", "service", "]", "=", "{", "}", "db", "[", "fn", "]", "[", "'services'", "]", "[", "service", "]", "[", "'status'", "]", "=", "self", ".", "ST_ADDED", "else", ":", "print", "(", "\"%s - Already managed by service %s, maybe do a 'push'?\"", "%", "(", "fn", ",", "service", ")", ")", "logger", ".", "info", "(", "'%s - managers: %s'", "%", "(", "fn", ",", "db", "[", "fn", "]", "[", "'services'", "]", ".", "keys", "(", ")", ")", ")", "return" ]
Changes to status to 'A' as long as a handler exists, also generates a hash directory - DIR where stuff is happening fn - File name to be added dentry - dictionary entry as returned by GetStatus for this file db - pusher DB for this directory service - None means all services, otherwise looks for service
[ "Changes", "to", "status", "to", "A", "as", "long", "as", "a", "handler", "exists", "also", "generates", "a", "hash", "directory", "-", "DIR", "where", "stuff", "is", "happening", "fn", "-", "File", "name", "to", "be", "added", "dentry", "-", "dictionary", "entry", "as", "returned", "by", "GetStatus", "for", "this", "file", "db", "-", "pusher", "DB", "for", "this", "directory", "service", "-", "None", "means", "all", "services", "otherwise", "looks", "for", "service" ]
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/pusher.py#L336-L392
240,937
esterhui/pypu
pypu/pusher.py
status._hashfile
def _hashfile(self,filename,blocksize=65536): """Hashes the file and returns hash""" logger.debug("Hashing file %s"%(filename)) hasher=hashlib.sha256() afile=open(filename,'rb') buf=afile.read(blocksize) while len(buf) > 0: hasher.update(buf) buf = afile.read(blocksize) return hasher.hexdigest()
python
def _hashfile(self,filename,blocksize=65536): """Hashes the file and returns hash""" logger.debug("Hashing file %s"%(filename)) hasher=hashlib.sha256() afile=open(filename,'rb') buf=afile.read(blocksize) while len(buf) > 0: hasher.update(buf) buf = afile.read(blocksize) return hasher.hexdigest()
[ "def", "_hashfile", "(", "self", ",", "filename", ",", "blocksize", "=", "65536", ")", ":", "logger", ".", "debug", "(", "\"Hashing file %s\"", "%", "(", "filename", ")", ")", "hasher", "=", "hashlib", ".", "sha256", "(", ")", "afile", "=", "open", "(", "filename", ",", "'rb'", ")", "buf", "=", "afile", ".", "read", "(", "blocksize", ")", "while", "len", "(", "buf", ")", ">", "0", ":", "hasher", ".", "update", "(", "buf", ")", "buf", "=", "afile", ".", "read", "(", "blocksize", ")", "return", "hasher", ".", "hexdigest", "(", ")" ]
Hashes the file and returns hash
[ "Hashes", "the", "file", "and", "returns", "hash" ]
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/pusher.py#L454-L463
240,938
romaryd/python-jsonrepo
jsonrepo/mixin.py
StorageMixin.storage
def storage(self): """ Instantiates and returns a storage instance """ if self.backend == 'redis': return RedisBackend(self.prefix, self.secondary_indexes) if self.backend == 'dynamodb': return DynamoDBBackend(self.prefix, self.key, self.sort_key, self.secondary_indexes) return DictBackend(self.prefix, self.secondary_indexes)
python
def storage(self): """ Instantiates and returns a storage instance """ if self.backend == 'redis': return RedisBackend(self.prefix, self.secondary_indexes) if self.backend == 'dynamodb': return DynamoDBBackend(self.prefix, self.key, self.sort_key, self.secondary_indexes) return DictBackend(self.prefix, self.secondary_indexes)
[ "def", "storage", "(", "self", ")", ":", "if", "self", ".", "backend", "==", "'redis'", ":", "return", "RedisBackend", "(", "self", ".", "prefix", ",", "self", ".", "secondary_indexes", ")", "if", "self", ".", "backend", "==", "'dynamodb'", ":", "return", "DynamoDBBackend", "(", "self", ".", "prefix", ",", "self", ".", "key", ",", "self", ".", "sort_key", ",", "self", ".", "secondary_indexes", ")", "return", "DictBackend", "(", "self", ".", "prefix", ",", "self", ".", "secondary_indexes", ")" ]
Instantiates and returns a storage instance
[ "Instantiates", "and", "returns", "a", "storage", "instance" ]
08a9c039a5bd21e93e9a6d1bce77d43e6e10b57d
https://github.com/romaryd/python-jsonrepo/blob/08a9c039a5bd21e93e9a6d1bce77d43e6e10b57d/jsonrepo/mixin.py#L18-L27
240,939
drongo-framework/drongo
drongo/utils/dict2.py
dict2.get_property
def get_property(self, prop): """Access nested value using dot separated keys Args: prop (:obj:`str`): Property in the form of dot separated keys Returns: Property value if exists, else `None` """ prop = prop.split('.') root = self for p in prop: if p in root: root = root[p] else: return None return root
python
def get_property(self, prop): """Access nested value using dot separated keys Args: prop (:obj:`str`): Property in the form of dot separated keys Returns: Property value if exists, else `None` """ prop = prop.split('.') root = self for p in prop: if p in root: root = root[p] else: return None return root
[ "def", "get_property", "(", "self", ",", "prop", ")", ":", "prop", "=", "prop", ".", "split", "(", "'.'", ")", "root", "=", "self", "for", "p", "in", "prop", ":", "if", "p", "in", "root", ":", "root", "=", "root", "[", "p", "]", "else", ":", "return", "None", "return", "root" ]
Access nested value using dot separated keys Args: prop (:obj:`str`): Property in the form of dot separated keys Returns: Property value if exists, else `None`
[ "Access", "nested", "value", "using", "dot", "separated", "keys" ]
487edb370ae329f370bcf3b433ed3f28ba4c1d8c
https://github.com/drongo-framework/drongo/blob/487edb370ae329f370bcf3b433ed3f28ba4c1d8c/drongo/utils/dict2.py#L25-L41
240,940
drongo-framework/drongo
drongo/utils/dict2.py
dict2.from_dict
def from_dict(cls, val): """Creates dict2 object from dict object Args: val (:obj:`dict`): Value to create from Returns: Equivalent dict2 object. """ if isinstance(val, dict2): return val elif isinstance(val, dict): res = cls() for k, v in val.items(): res[k] = cls.from_dict(v) return res elif isinstance(val, list): res = [] for item in val: res.append(cls.from_dict(item)) return res else: return val
python
def from_dict(cls, val): """Creates dict2 object from dict object Args: val (:obj:`dict`): Value to create from Returns: Equivalent dict2 object. """ if isinstance(val, dict2): return val elif isinstance(val, dict): res = cls() for k, v in val.items(): res[k] = cls.from_dict(v) return res elif isinstance(val, list): res = [] for item in val: res.append(cls.from_dict(item)) return res else: return val
[ "def", "from_dict", "(", "cls", ",", "val", ")", ":", "if", "isinstance", "(", "val", ",", "dict2", ")", ":", "return", "val", "elif", "isinstance", "(", "val", ",", "dict", ")", ":", "res", "=", "cls", "(", ")", "for", "k", ",", "v", "in", "val", ".", "items", "(", ")", ":", "res", "[", "k", "]", "=", "cls", ".", "from_dict", "(", "v", ")", "return", "res", "elif", "isinstance", "(", "val", ",", "list", ")", ":", "res", "=", "[", "]", "for", "item", "in", "val", ":", "res", ".", "append", "(", "cls", ".", "from_dict", "(", "item", ")", ")", "return", "res", "else", ":", "return", "val" ]
Creates dict2 object from dict object Args: val (:obj:`dict`): Value to create from Returns: Equivalent dict2 object.
[ "Creates", "dict2", "object", "from", "dict", "object" ]
487edb370ae329f370bcf3b433ed3f28ba4c1d8c
https://github.com/drongo-framework/drongo/blob/487edb370ae329f370bcf3b433ed3f28ba4c1d8c/drongo/utils/dict2.py#L51-L75
240,941
drongo-framework/drongo
drongo/utils/dict2.py
dict2.to_dict
def to_dict(self, val=UNSET): """Creates dict object from dict2 object Args: val (:obj:`dict2`): Value to create from Returns: Equivalent dict object. """ if val is UNSET: val = self if isinstance(val, dict2) or isinstance(val, dict): res = dict() for k, v in val.items(): res[k] = self.to_dict(v) return res elif isinstance(val, list): res = [] for item in val: res.append(self.to_dict(item)) return res else: return val
python
def to_dict(self, val=UNSET): """Creates dict object from dict2 object Args: val (:obj:`dict2`): Value to create from Returns: Equivalent dict object. """ if val is UNSET: val = self if isinstance(val, dict2) or isinstance(val, dict): res = dict() for k, v in val.items(): res[k] = self.to_dict(v) return res elif isinstance(val, list): res = [] for item in val: res.append(self.to_dict(item)) return res else: return val
[ "def", "to_dict", "(", "self", ",", "val", "=", "UNSET", ")", ":", "if", "val", "is", "UNSET", ":", "val", "=", "self", "if", "isinstance", "(", "val", ",", "dict2", ")", "or", "isinstance", "(", "val", ",", "dict", ")", ":", "res", "=", "dict", "(", ")", "for", "k", ",", "v", "in", "val", ".", "items", "(", ")", ":", "res", "[", "k", "]", "=", "self", ".", "to_dict", "(", "v", ")", "return", "res", "elif", "isinstance", "(", "val", ",", "list", ")", ":", "res", "=", "[", "]", "for", "item", "in", "val", ":", "res", ".", "append", "(", "self", ".", "to_dict", "(", "item", ")", ")", "return", "res", "else", ":", "return", "val" ]
Creates dict object from dict2 object Args: val (:obj:`dict2`): Value to create from Returns: Equivalent dict object.
[ "Creates", "dict", "object", "from", "dict2", "object" ]
487edb370ae329f370bcf3b433ed3f28ba4c1d8c
https://github.com/drongo-framework/drongo/blob/487edb370ae329f370bcf3b433ed3f28ba4c1d8c/drongo/utils/dict2.py#L77-L100
240,942
mirukan/whratio
whratio/__main__.py
main
def main(): "Process CLI arguments and call appropriate functions." try: args = docopt.docopt(__doc__, version=__about__.__version__) except docopt.DocoptExit: if len(sys.argv) > 1: print(f"{Fore.RED}Invalid command syntax, " f"check help:{Fore.RESET}\n") print(__doc__) sys.exit(1) print_all = False if not (args["--int-width"] or args["--int-height"] or args["--decimal"]): print_all = True width = float(args["WIDTH"]) height = float(args["HEIGHT"]) as_int_ = as_int(width, height) as_float_ = as_float(width, height) if args["--ndigits"]: as_float_ = round(as_float_, int(args["--ndigits"])) to_print = [] if args["--int-width"] or print_all: to_print.append(f"{Fore.BLUE}{as_int_[0]!s}") if args["--int-height"] or print_all: to_print.append(f"{Fore.BLUE}{as_int_[1]!s}") if args["--decimal"] or print_all: to_print.append(f"{Fore.MAGENTA}{as_float_!s}") print(" ".join(to_print))
python
def main(): "Process CLI arguments and call appropriate functions." try: args = docopt.docopt(__doc__, version=__about__.__version__) except docopt.DocoptExit: if len(sys.argv) > 1: print(f"{Fore.RED}Invalid command syntax, " f"check help:{Fore.RESET}\n") print(__doc__) sys.exit(1) print_all = False if not (args["--int-width"] or args["--int-height"] or args["--decimal"]): print_all = True width = float(args["WIDTH"]) height = float(args["HEIGHT"]) as_int_ = as_int(width, height) as_float_ = as_float(width, height) if args["--ndigits"]: as_float_ = round(as_float_, int(args["--ndigits"])) to_print = [] if args["--int-width"] or print_all: to_print.append(f"{Fore.BLUE}{as_int_[0]!s}") if args["--int-height"] or print_all: to_print.append(f"{Fore.BLUE}{as_int_[1]!s}") if args["--decimal"] or print_all: to_print.append(f"{Fore.MAGENTA}{as_float_!s}") print(" ".join(to_print))
[ "def", "main", "(", ")", ":", "try", ":", "args", "=", "docopt", ".", "docopt", "(", "__doc__", ",", "version", "=", "__about__", ".", "__version__", ")", "except", "docopt", ".", "DocoptExit", ":", "if", "len", "(", "sys", ".", "argv", ")", ">", "1", ":", "print", "(", "f\"{Fore.RED}Invalid command syntax, \"", "f\"check help:{Fore.RESET}\\n\"", ")", "print", "(", "__doc__", ")", "sys", ".", "exit", "(", "1", ")", "print_all", "=", "False", "if", "not", "(", "args", "[", "\"--int-width\"", "]", "or", "args", "[", "\"--int-height\"", "]", "or", "args", "[", "\"--decimal\"", "]", ")", ":", "print_all", "=", "True", "width", "=", "float", "(", "args", "[", "\"WIDTH\"", "]", ")", "height", "=", "float", "(", "args", "[", "\"HEIGHT\"", "]", ")", "as_int_", "=", "as_int", "(", "width", ",", "height", ")", "as_float_", "=", "as_float", "(", "width", ",", "height", ")", "if", "args", "[", "\"--ndigits\"", "]", ":", "as_float_", "=", "round", "(", "as_float_", ",", "int", "(", "args", "[", "\"--ndigits\"", "]", ")", ")", "to_print", "=", "[", "]", "if", "args", "[", "\"--int-width\"", "]", "or", "print_all", ":", "to_print", ".", "append", "(", "f\"{Fore.BLUE}{as_int_[0]!s}\"", ")", "if", "args", "[", "\"--int-height\"", "]", "or", "print_all", ":", "to_print", ".", "append", "(", "f\"{Fore.BLUE}{as_int_[1]!s}\"", ")", "if", "args", "[", "\"--decimal\"", "]", "or", "print_all", ":", "to_print", ".", "append", "(", "f\"{Fore.MAGENTA}{as_float_!s}\"", ")", "print", "(", "\" \"", ".", "join", "(", "to_print", ")", ")" ]
Process CLI arguments and call appropriate functions.
[ "Process", "CLI", "arguments", "and", "call", "appropriate", "functions", "." ]
e19cf7346351649d196d2eb3369870841f7bfea5
https://github.com/mirukan/whratio/blob/e19cf7346351649d196d2eb3369870841f7bfea5/whratio/__main__.py#L33-L68
240,943
pyvec/pyvodb
pyvodb/tables.py
Event.start
def start(self): """The event's start time, as a timezone-aware datetime object""" if self.start_time is None: time = datetime.time(hour=19, tzinfo=CET) else: time = self.start_time.replace(tzinfo=CET) return datetime.datetime.combine(self.date, time)
python
def start(self): """The event's start time, as a timezone-aware datetime object""" if self.start_time is None: time = datetime.time(hour=19, tzinfo=CET) else: time = self.start_time.replace(tzinfo=CET) return datetime.datetime.combine(self.date, time)
[ "def", "start", "(", "self", ")", ":", "if", "self", ".", "start_time", "is", "None", ":", "time", "=", "datetime", ".", "time", "(", "hour", "=", "19", ",", "tzinfo", "=", "CET", ")", "else", ":", "time", "=", "self", ".", "start_time", ".", "replace", "(", "tzinfo", "=", "CET", ")", "return", "datetime", ".", "datetime", ".", "combine", "(", "self", ".", "date", ",", "time", ")" ]
The event's start time, as a timezone-aware datetime object
[ "The", "event", "s", "start", "time", "as", "a", "timezone", "-", "aware", "datetime", "object" ]
07183333df26eb12c5c2b98802cde3fb3a6c1339
https://github.com/pyvec/pyvodb/blob/07183333df26eb12c5c2b98802cde3fb3a6c1339/pyvodb/tables.py#L103-L109
240,944
pyvec/pyvodb
pyvodb/tables.py
Series.next_occurrences
def next_occurrences(self, n=None, since=None): """Yield the next planned occurrences after the date "since" The `since` argument can be either a date or datetime onject. If not given, it defaults to the date of the last event that's already planned. If `n` is given, the result is limited to that many dates; otherwise, infinite results may be generated. Note that less than `n` results may be yielded. """ scheme = self.recurrence_scheme if scheme is None: return () db = Session.object_session(self) query = db.query(Event) query = query.filter(Event.series_slug == self.slug) query = query.order_by(desc(Event.date)) query = query.limit(1) last_planned_event = query.one_or_none() if since is None: last_planned_event = query.one() since = last_planned_event.date elif since < last_planned_event.date: since = last_planned_event.date start = getattr(since, 'date', since) start += relativedelta.relativedelta(days=+1) if (scheme == 'monthly' and last_planned_event and last_planned_event.date.year == start.year and last_planned_event.date.month == start.month): # Monthly events try to have one event per month, so exclude # the current month if there was already a meetup start += relativedelta.relativedelta(months=+1) start = start.replace(day=1) start = datetime.datetime.combine(start, datetime.time(tzinfo=CET)) result = rrule.rrulestr(self.recurrence_rule, dtstart=start) if n is not None: result = itertools.islice(result, n) return result
python
def next_occurrences(self, n=None, since=None): """Yield the next planned occurrences after the date "since" The `since` argument can be either a date or datetime onject. If not given, it defaults to the date of the last event that's already planned. If `n` is given, the result is limited to that many dates; otherwise, infinite results may be generated. Note that less than `n` results may be yielded. """ scheme = self.recurrence_scheme if scheme is None: return () db = Session.object_session(self) query = db.query(Event) query = query.filter(Event.series_slug == self.slug) query = query.order_by(desc(Event.date)) query = query.limit(1) last_planned_event = query.one_or_none() if since is None: last_planned_event = query.one() since = last_planned_event.date elif since < last_planned_event.date: since = last_planned_event.date start = getattr(since, 'date', since) start += relativedelta.relativedelta(days=+1) if (scheme == 'monthly' and last_planned_event and last_planned_event.date.year == start.year and last_planned_event.date.month == start.month): # Monthly events try to have one event per month, so exclude # the current month if there was already a meetup start += relativedelta.relativedelta(months=+1) start = start.replace(day=1) start = datetime.datetime.combine(start, datetime.time(tzinfo=CET)) result = rrule.rrulestr(self.recurrence_rule, dtstart=start) if n is not None: result = itertools.islice(result, n) return result
[ "def", "next_occurrences", "(", "self", ",", "n", "=", "None", ",", "since", "=", "None", ")", ":", "scheme", "=", "self", ".", "recurrence_scheme", "if", "scheme", "is", "None", ":", "return", "(", ")", "db", "=", "Session", ".", "object_session", "(", "self", ")", "query", "=", "db", ".", "query", "(", "Event", ")", "query", "=", "query", ".", "filter", "(", "Event", ".", "series_slug", "==", "self", ".", "slug", ")", "query", "=", "query", ".", "order_by", "(", "desc", "(", "Event", ".", "date", ")", ")", "query", "=", "query", ".", "limit", "(", "1", ")", "last_planned_event", "=", "query", ".", "one_or_none", "(", ")", "if", "since", "is", "None", ":", "last_planned_event", "=", "query", ".", "one", "(", ")", "since", "=", "last_planned_event", ".", "date", "elif", "since", "<", "last_planned_event", ".", "date", ":", "since", "=", "last_planned_event", ".", "date", "start", "=", "getattr", "(", "since", ",", "'date'", ",", "since", ")", "start", "+=", "relativedelta", ".", "relativedelta", "(", "days", "=", "+", "1", ")", "if", "(", "scheme", "==", "'monthly'", "and", "last_planned_event", "and", "last_planned_event", ".", "date", ".", "year", "==", "start", ".", "year", "and", "last_planned_event", ".", "date", ".", "month", "==", "start", ".", "month", ")", ":", "# Monthly events try to have one event per month, so exclude", "# the current month if there was already a meetup", "start", "+=", "relativedelta", ".", "relativedelta", "(", "months", "=", "+", "1", ")", "start", "=", "start", ".", "replace", "(", "day", "=", "1", ")", "start", "=", "datetime", ".", "datetime", ".", "combine", "(", "start", ",", "datetime", ".", "time", "(", "tzinfo", "=", "CET", ")", ")", "result", "=", "rrule", ".", "rrulestr", "(", "self", ".", "recurrence_rule", ",", "dtstart", "=", "start", ")", "if", "n", "is", "not", "None", ":", "result", "=", "itertools", ".", "islice", "(", "result", ",", "n", ")", "return", "result" ]
Yield the next planned occurrences after the date "since" The `since` argument can be either a date or datetime onject. If not given, it defaults to the date of the last event that's already planned. If `n` is given, the result is limited to that many dates; otherwise, infinite results may be generated. Note that less than `n` results may be yielded.
[ "Yield", "the", "next", "planned", "occurrences", "after", "the", "date", "since" ]
07183333df26eb12c5c2b98802cde3fb3a6c1339
https://github.com/pyvec/pyvodb/blob/07183333df26eb12c5c2b98802cde3fb3a6c1339/pyvodb/tables.py#L215-L260
240,945
inveniosoftware-attic/invenio-knowledge
invenio_knowledge/upgrades/knowledge_2015_07_14_innodb.py
do_upgrade
def do_upgrade(): """Carry out the upgrade.""" op.alter_column( table_name='knwKBRVAL', column_name='id_knwKB', type_=db.MediumInteger(8, unsigned=True), existing_nullable=False, existing_server_default='0' )
python
def do_upgrade(): """Carry out the upgrade.""" op.alter_column( table_name='knwKBRVAL', column_name='id_knwKB', type_=db.MediumInteger(8, unsigned=True), existing_nullable=False, existing_server_default='0' )
[ "def", "do_upgrade", "(", ")", ":", "op", ".", "alter_column", "(", "table_name", "=", "'knwKBRVAL'", ",", "column_name", "=", "'id_knwKB'", ",", "type_", "=", "db", ".", "MediumInteger", "(", "8", ",", "unsigned", "=", "True", ")", ",", "existing_nullable", "=", "False", ",", "existing_server_default", "=", "'0'", ")" ]
Carry out the upgrade.
[ "Carry", "out", "the", "upgrade", "." ]
b31722dc14243ca8f626f8b3bce9718d0119de55
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/upgrades/knowledge_2015_07_14_innodb.py#L35-L43
240,946
cwoebker/paxo
paxo/text.py
spark_string
def spark_string(ints): """Returns a spark string from given iterable of ints.""" ticks = u'▁▂▃▅▆▇' ints = [i for i in ints if type(i) == int] if len(ints) == 0: return "" step = (max(ints) / float(len(ticks) - 1)) or 1 return u''.join( ticks[int(round(i / step))] if type(i) == int else u'.' for i in ints)
python
def spark_string(ints): """Returns a spark string from given iterable of ints.""" ticks = u'▁▂▃▅▆▇' ints = [i for i in ints if type(i) == int] if len(ints) == 0: return "" step = (max(ints) / float(len(ticks) - 1)) or 1 return u''.join( ticks[int(round(i / step))] if type(i) == int else u'.' for i in ints)
[ "def", "spark_string", "(", "ints", ")", ":", "ticks", "=", "u'▁▂▃▅▆▇'", "ints", "=", "[", "i", "for", "i", "in", "ints", "if", "type", "(", "i", ")", "==", "int", "]", "if", "len", "(", "ints", ")", "==", "0", ":", "return", "\"\"", "step", "=", "(", "max", "(", "ints", ")", "/", "float", "(", "len", "(", "ticks", ")", "-", "1", ")", ")", "or", "1", "return", "u''", ".", "join", "(", "ticks", "[", "int", "(", "round", "(", "i", "/", "step", ")", ")", "]", "if", "type", "(", "i", ")", "==", "int", "else", "u'.'", "for", "i", "in", "ints", ")" ]
Returns a spark string from given iterable of ints.
[ "Returns", "a", "spark", "string", "from", "given", "iterable", "of", "ints", "." ]
b97518a830948faaaf31aeacda3f8d4f6364eb07
https://github.com/cwoebker/paxo/blob/b97518a830948faaaf31aeacda3f8d4f6364eb07/paxo/text.py#L16-L24
240,947
arcus-io/puppetdb-python
puppetdb/v2/facts.py
get_facts_by_name
def get_facts_by_name(api_url=None, fact_name=None, verify=False, cert=list()): """ Returns facts by name :param api_url: Base PuppetDB API url :param fact_name: Name of fact """ return utils._make_api_request(api_url, '/facts/{0}'.format(fact_name), verify, cert)
python
def get_facts_by_name(api_url=None, fact_name=None, verify=False, cert=list()): """ Returns facts by name :param api_url: Base PuppetDB API url :param fact_name: Name of fact """ return utils._make_api_request(api_url, '/facts/{0}'.format(fact_name), verify, cert)
[ "def", "get_facts_by_name", "(", "api_url", "=", "None", ",", "fact_name", "=", "None", ",", "verify", "=", "False", ",", "cert", "=", "list", "(", ")", ")", ":", "return", "utils", ".", "_make_api_request", "(", "api_url", ",", "'/facts/{0}'", ".", "format", "(", "fact_name", ")", ",", "verify", ",", "cert", ")" ]
Returns facts by name :param api_url: Base PuppetDB API url :param fact_name: Name of fact
[ "Returns", "facts", "by", "name" ]
d772eb80a1dfb1154a1f421c7ecdc1ac951b5ea2
https://github.com/arcus-io/puppetdb-python/blob/d772eb80a1dfb1154a1f421c7ecdc1ac951b5ea2/puppetdb/v2/facts.py#L34-L42
240,948
arcus-io/puppetdb-python
puppetdb/v2/facts.py
get_facts_by_name_and_value
def get_facts_by_name_and_value(api_url=None, fact_name=None, fact_value=None, verify=False, cert=list()): """ Returns facts by name and value :param api_url: Base PuppetDB API url :param fact_name: Name of fact :param fact_value: Value of fact """ return utils._make_api_request(api_url, '/facts/{0}/{1}'.format(fact_name, fact_value), verify, cert)
python
def get_facts_by_name_and_value(api_url=None, fact_name=None, fact_value=None, verify=False, cert=list()): """ Returns facts by name and value :param api_url: Base PuppetDB API url :param fact_name: Name of fact :param fact_value: Value of fact """ return utils._make_api_request(api_url, '/facts/{0}/{1}'.format(fact_name, fact_value), verify, cert)
[ "def", "get_facts_by_name_and_value", "(", "api_url", "=", "None", ",", "fact_name", "=", "None", ",", "fact_value", "=", "None", ",", "verify", "=", "False", ",", "cert", "=", "list", "(", ")", ")", ":", "return", "utils", ".", "_make_api_request", "(", "api_url", ",", "'/facts/{0}/{1}'", ".", "format", "(", "fact_name", ",", "fact_value", ")", ",", "verify", ",", "cert", ")" ]
Returns facts by name and value :param api_url: Base PuppetDB API url :param fact_name: Name of fact :param fact_value: Value of fact
[ "Returns", "facts", "by", "name", "and", "value" ]
d772eb80a1dfb1154a1f421c7ecdc1ac951b5ea2
https://github.com/arcus-io/puppetdb-python/blob/d772eb80a1dfb1154a1f421c7ecdc1ac951b5ea2/puppetdb/v2/facts.py#L44-L53
240,949
fr33jc/bang
bang/util.py
sanitize_config_loglevel
def sanitize_config_loglevel(level): ''' Kinda sorta backport of loglevel sanitization for Python 2.6. ''' if sys.version_info[:2] != (2, 6) or isinstance(level, (int, long)): return level lvl = None if isinstance(level, basestring): lvl = logging._levelNames.get(level) if not lvl: raise ValueError('Invalid log level, %s' % level) return lvl
python
def sanitize_config_loglevel(level): ''' Kinda sorta backport of loglevel sanitization for Python 2.6. ''' if sys.version_info[:2] != (2, 6) or isinstance(level, (int, long)): return level lvl = None if isinstance(level, basestring): lvl = logging._levelNames.get(level) if not lvl: raise ValueError('Invalid log level, %s' % level) return lvl
[ "def", "sanitize_config_loglevel", "(", "level", ")", ":", "if", "sys", ".", "version_info", "[", ":", "2", "]", "!=", "(", "2", ",", "6", ")", "or", "isinstance", "(", "level", ",", "(", "int", ",", "long", ")", ")", ":", "return", "level", "lvl", "=", "None", "if", "isinstance", "(", "level", ",", "basestring", ")", ":", "lvl", "=", "logging", ".", "_levelNames", ".", "get", "(", "level", ")", "if", "not", "lvl", ":", "raise", "ValueError", "(", "'Invalid log level, %s'", "%", "level", ")", "return", "lvl" ]
Kinda sorta backport of loglevel sanitization for Python 2.6.
[ "Kinda", "sorta", "backport", "of", "loglevel", "sanitization", "for", "Python", "2", ".", "6", "." ]
8f000713f88d2a9a8c1193b63ca10a6578560c16
https://github.com/fr33jc/bang/blob/8f000713f88d2a9a8c1193b63ca10a6578560c16/bang/util.py#L217-L228
240,950
fr33jc/bang
bang/util.py
count_by_tag
def count_by_tag(stack, descriptor): """ Returns the count of currently running or pending instances that match the given stack and deployer combo """ ec2_conn = boto.ec2.connection.EC2Connection() resses = ec2_conn.get_all_instances( filters={ 'tag:stack': stack, 'tag:descriptor': descriptor }) instance_list_raw = list() [[instance_list_raw.append(x) for x in res.instances] for res in resses] instance_list = [x for x in instance_list_raw if state_filter(x)] instances = len(instance_list) return instances
python
def count_by_tag(stack, descriptor): """ Returns the count of currently running or pending instances that match the given stack and deployer combo """ ec2_conn = boto.ec2.connection.EC2Connection() resses = ec2_conn.get_all_instances( filters={ 'tag:stack': stack, 'tag:descriptor': descriptor }) instance_list_raw = list() [[instance_list_raw.append(x) for x in res.instances] for res in resses] instance_list = [x for x in instance_list_raw if state_filter(x)] instances = len(instance_list) return instances
[ "def", "count_by_tag", "(", "stack", ",", "descriptor", ")", ":", "ec2_conn", "=", "boto", ".", "ec2", ".", "connection", ".", "EC2Connection", "(", ")", "resses", "=", "ec2_conn", ".", "get_all_instances", "(", "filters", "=", "{", "'tag:stack'", ":", "stack", ",", "'tag:descriptor'", ":", "descriptor", "}", ")", "instance_list_raw", "=", "list", "(", ")", "[", "[", "instance_list_raw", ".", "append", "(", "x", ")", "for", "x", "in", "res", ".", "instances", "]", "for", "res", "in", "resses", "]", "instance_list", "=", "[", "x", "for", "x", "in", "instance_list_raw", "if", "state_filter", "(", "x", ")", "]", "instances", "=", "len", "(", "instance_list", ")", "return", "instances" ]
Returns the count of currently running or pending instances that match the given stack and deployer combo
[ "Returns", "the", "count", "of", "currently", "running", "or", "pending", "instances", "that", "match", "the", "given", "stack", "and", "deployer", "combo" ]
8f000713f88d2a9a8c1193b63ca10a6578560c16
https://github.com/fr33jc/bang/blob/8f000713f88d2a9a8c1193b63ca10a6578560c16/bang/util.py#L369-L384
240,951
fr33jc/bang
bang/util.py
SharedNamespace.add_if_unique
def add_if_unique(self, name): """ Returns ``True`` on success. Returns ``False`` if the name already exists in the namespace. """ with self.lock: if name not in self.names: self.names.append(name) return True return False
python
def add_if_unique(self, name): """ Returns ``True`` on success. Returns ``False`` if the name already exists in the namespace. """ with self.lock: if name not in self.names: self.names.append(name) return True return False
[ "def", "add_if_unique", "(", "self", ",", "name", ")", ":", "with", "self", ".", "lock", ":", "if", "name", "not", "in", "self", ".", "names", ":", "self", ".", "names", ".", "append", "(", "name", ")", "return", "True", "return", "False" ]
Returns ``True`` on success. Returns ``False`` if the name already exists in the namespace.
[ "Returns", "True", "on", "success", "." ]
8f000713f88d2a9a8c1193b63ca10a6578560c16
https://github.com/fr33jc/bang/blob/8f000713f88d2a9a8c1193b63ca10a6578560c16/bang/util.py#L149-L159
240,952
NegativeMjark/mockingmirror
mockingmirror.py
Mirror._add_to_spec
def _add_to_spec(self, name): """The spec of the mirrored mock object is updated whenever the mirror gains new attributes""" self._spec.add(name) self._mock.mock_add_spec(list(self._spec), True)
python
def _add_to_spec(self, name): """The spec of the mirrored mock object is updated whenever the mirror gains new attributes""" self._spec.add(name) self._mock.mock_add_spec(list(self._spec), True)
[ "def", "_add_to_spec", "(", "self", ",", "name", ")", ":", "self", ".", "_spec", ".", "add", "(", "name", ")", "self", ".", "_mock", ".", "mock_add_spec", "(", "list", "(", "self", ".", "_spec", ")", ",", "True", ")" ]
The spec of the mirrored mock object is updated whenever the mirror gains new attributes
[ "The", "spec", "of", "the", "mirrored", "mock", "object", "is", "updated", "whenever", "the", "mirror", "gains", "new", "attributes" ]
75cf7d56ab18922394db89725ae9b37f1d4b3711
https://github.com/NegativeMjark/mockingmirror/blob/75cf7d56ab18922394db89725ae9b37f1d4b3711/mockingmirror.py#L120-L124
240,953
pacificclimate/cfmeta
cfmeta/cmip3file.py
get_fp_meta
def get_fp_meta(fp): """Processes a CMIP3 style file path. The standard CMIP3 directory structure: <experiment>/<variable_name>/<model>/<ensemble_member>/<CMOR filename>.nc Filename is of pattern: <model>-<experiment>-<variable_name>-<ensemble_member>.nc Arguments: fp (str): A file path conforming to CMIP3 spec. Returns: dict: Metadata as extracted from the file path. """ # Copy metadata list then reverse to start at end of path directory_meta = list(DIR_ATTS) # Prefer meta extracted from filename meta = get_dir_meta(fp, directory_meta) meta.update(get_fname_meta(fp)) return meta
python
def get_fp_meta(fp): """Processes a CMIP3 style file path. The standard CMIP3 directory structure: <experiment>/<variable_name>/<model>/<ensemble_member>/<CMOR filename>.nc Filename is of pattern: <model>-<experiment>-<variable_name>-<ensemble_member>.nc Arguments: fp (str): A file path conforming to CMIP3 spec. Returns: dict: Metadata as extracted from the file path. """ # Copy metadata list then reverse to start at end of path directory_meta = list(DIR_ATTS) # Prefer meta extracted from filename meta = get_dir_meta(fp, directory_meta) meta.update(get_fname_meta(fp)) return meta
[ "def", "get_fp_meta", "(", "fp", ")", ":", "# Copy metadata list then reverse to start at end of path", "directory_meta", "=", "list", "(", "DIR_ATTS", ")", "# Prefer meta extracted from filename", "meta", "=", "get_dir_meta", "(", "fp", ",", "directory_meta", ")", "meta", ".", "update", "(", "get_fname_meta", "(", "fp", ")", ")", "return", "meta" ]
Processes a CMIP3 style file path. The standard CMIP3 directory structure: <experiment>/<variable_name>/<model>/<ensemble_member>/<CMOR filename>.nc Filename is of pattern: <model>-<experiment>-<variable_name>-<ensemble_member>.nc Arguments: fp (str): A file path conforming to CMIP3 spec. Returns: dict: Metadata as extracted from the file path.
[ "Processes", "a", "CMIP3", "style", "file", "path", "." ]
a6eef78d0bce523bb44920ba96233f034b60316a
https://github.com/pacificclimate/cfmeta/blob/a6eef78d0bce523bb44920ba96233f034b60316a/cfmeta/cmip3file.py#L74-L100
240,954
pacificclimate/cfmeta
cfmeta/cmip3file.py
get_fname_meta
def get_fname_meta(fp): """Processes a CMIP3 style file name. Filename is of pattern: <model>-<experiment>-<variable_name>-<ensemble_member>.nc Arguments: fp (str): A file path/name conforming to DRS spec. Returns: dict: Metadata as extracted from the filename. .. _Data Reference Syntax: http://cmip-pcmdi.llnl.gov/cmip5/docs/cmip5_data_reference_syntax.pdf """ # Strip directory, extension, then split if '/' in fp: fp = os.path.split(fp)[1] fname = os.path.splitext(fp)[0] meta = fname.split('-') res = {} try: for key in FNAME_ATTS: res[key] = meta.pop(0) except IndexError: raise PathError(fname) return res
python
def get_fname_meta(fp): """Processes a CMIP3 style file name. Filename is of pattern: <model>-<experiment>-<variable_name>-<ensemble_member>.nc Arguments: fp (str): A file path/name conforming to DRS spec. Returns: dict: Metadata as extracted from the filename. .. _Data Reference Syntax: http://cmip-pcmdi.llnl.gov/cmip5/docs/cmip5_data_reference_syntax.pdf """ # Strip directory, extension, then split if '/' in fp: fp = os.path.split(fp)[1] fname = os.path.splitext(fp)[0] meta = fname.split('-') res = {} try: for key in FNAME_ATTS: res[key] = meta.pop(0) except IndexError: raise PathError(fname) return res
[ "def", "get_fname_meta", "(", "fp", ")", ":", "# Strip directory, extension, then split", "if", "'/'", "in", "fp", ":", "fp", "=", "os", ".", "path", ".", "split", "(", "fp", ")", "[", "1", "]", "fname", "=", "os", ".", "path", ".", "splitext", "(", "fp", ")", "[", "0", "]", "meta", "=", "fname", ".", "split", "(", "'-'", ")", "res", "=", "{", "}", "try", ":", "for", "key", "in", "FNAME_ATTS", ":", "res", "[", "key", "]", "=", "meta", ".", "pop", "(", "0", ")", "except", "IndexError", ":", "raise", "PathError", "(", "fname", ")", "return", "res" ]
Processes a CMIP3 style file name. Filename is of pattern: <model>-<experiment>-<variable_name>-<ensemble_member>.nc Arguments: fp (str): A file path/name conforming to DRS spec. Returns: dict: Metadata as extracted from the filename. .. _Data Reference Syntax: http://cmip-pcmdi.llnl.gov/cmip5/docs/cmip5_data_reference_syntax.pdf
[ "Processes", "a", "CMIP3", "style", "file", "name", "." ]
a6eef78d0bce523bb44920ba96233f034b60316a
https://github.com/pacificclimate/cfmeta/blob/a6eef78d0bce523bb44920ba96233f034b60316a/cfmeta/cmip3file.py#L102-L133
240,955
Arvedui/picuplib
picuplib/misc.py
gen_user_agent
def gen_user_agent(version): """ generating the user agent witch will be used for most requests monkey patching system and release functions from platform module to prevent disclosure of the OS and it's version """ def monkey_patch(): """ small monkey patch """ raise IOError # saving original functions orig_system = platform.system orig_release = platform.release # applying patch platform.system = monkey_patch platform.release = monkey_patch user_agent = requests_toolbelt.user_agent('picuplib', version) # reverting patch platform.system = orig_system platform.release = orig_release return user_agent
python
def gen_user_agent(version): """ generating the user agent witch will be used for most requests monkey patching system and release functions from platform module to prevent disclosure of the OS and it's version """ def monkey_patch(): """ small monkey patch """ raise IOError # saving original functions orig_system = platform.system orig_release = platform.release # applying patch platform.system = monkey_patch platform.release = monkey_patch user_agent = requests_toolbelt.user_agent('picuplib', version) # reverting patch platform.system = orig_system platform.release = orig_release return user_agent
[ "def", "gen_user_agent", "(", "version", ")", ":", "def", "monkey_patch", "(", ")", ":", "\"\"\"\n small monkey patch\n \"\"\"", "raise", "IOError", "# saving original functions", "orig_system", "=", "platform", ".", "system", "orig_release", "=", "platform", ".", "release", "# applying patch", "platform", ".", "system", "=", "monkey_patch", "platform", ".", "release", "=", "monkey_patch", "user_agent", "=", "requests_toolbelt", ".", "user_agent", "(", "'picuplib'", ",", "version", ")", "# reverting patch", "platform", ".", "system", "=", "orig_system", "platform", ".", "release", "=", "orig_release", "return", "user_agent" ]
generating the user agent witch will be used for most requests monkey patching system and release functions from platform module to prevent disclosure of the OS and it's version
[ "generating", "the", "user", "agent", "witch", "will", "be", "used", "for", "most", "requests" ]
c8a5d1542dbd421e84afd5ee81fe76efec89fb95
https://github.com/Arvedui/picuplib/blob/c8a5d1542dbd421e84afd5ee81fe76efec89fb95/picuplib/misc.py#L27-L54
240,956
ryanhorn/tyoiOAuth2
tyoi/oauth2/__init__.py
AccessTokenRequest.build_url_request
def build_url_request(self): """ Consults the authenticator and grant for HTTP request parameters and headers to send with the access token request, builds the request using the stored endpoint and returns it. """ params = {} headers = {} self._authenticator(params, headers) self._grant(params) return Request(self._endpoint, urlencode(params), headers)
python
def build_url_request(self): """ Consults the authenticator and grant for HTTP request parameters and headers to send with the access token request, builds the request using the stored endpoint and returns it. """ params = {} headers = {} self._authenticator(params, headers) self._grant(params) return Request(self._endpoint, urlencode(params), headers)
[ "def", "build_url_request", "(", "self", ")", ":", "params", "=", "{", "}", "headers", "=", "{", "}", "self", ".", "_authenticator", "(", "params", ",", "headers", ")", "self", ".", "_grant", "(", "params", ")", "return", "Request", "(", "self", ".", "_endpoint", ",", "urlencode", "(", "params", ")", ",", "headers", ")" ]
Consults the authenticator and grant for HTTP request parameters and headers to send with the access token request, builds the request using the stored endpoint and returns it.
[ "Consults", "the", "authenticator", "and", "grant", "for", "HTTP", "request", "parameters", "and", "headers", "to", "send", "with", "the", "access", "token", "request", "builds", "the", "request", "using", "the", "stored", "endpoint", "and", "returns", "it", "." ]
e6a388868ee05c5fef3bec795c5efd88c19b0955
https://github.com/ryanhorn/tyoiOAuth2/blob/e6a388868ee05c5fef3bec795c5efd88c19b0955/tyoi/oauth2/__init__.py#L93-L103
240,957
ryanhorn/tyoiOAuth2
tyoi/oauth2/__init__.py
AccessTokenRequest.send
def send(self, response_decoder=None): """ Creates and sends a request to the OAuth server, decodes the response and returns the resulting token object. response_decoder - A custom callable can be supplied to override the default method of extracting AccessToken parameters from the response. This is necessary for server implementations which do not conform to the more recent OAuth2 specification (ex: Facebook). By default, this will assume the response is encoded using JSON. The callable should return a dictionary with keys and values as follows: access_token - The access token token_type - The token type expires_in - The number of seconds in which the token expires refresh_token - The refresh token scope - The permission scope (as a space delimited string) """ decoder = loads if response_decoder is not None and callable(response_decoder): decoder = response_decoder request = self.build_url_request() try: f = urlopen(request) except HTTPError as e: try: error_resp = e.read() error_data = loads(error_resp) except Exception: raise AccessTokenResponseError('Access request returned an error, but the response could not be read: %s ' % error_resp) if error_data.get('error') is None: raise AccessTokenResponseError('Access request returned an error, but did not include an error code') raise AccessTokenRequestError(error_data['error'], error_data.get('error_description'), error_data.get('error_uri')) token_data = decoder(f.read()) return self._create_access_token(token_data)
python
def send(self, response_decoder=None): """ Creates and sends a request to the OAuth server, decodes the response and returns the resulting token object. response_decoder - A custom callable can be supplied to override the default method of extracting AccessToken parameters from the response. This is necessary for server implementations which do not conform to the more recent OAuth2 specification (ex: Facebook). By default, this will assume the response is encoded using JSON. The callable should return a dictionary with keys and values as follows: access_token - The access token token_type - The token type expires_in - The number of seconds in which the token expires refresh_token - The refresh token scope - The permission scope (as a space delimited string) """ decoder = loads if response_decoder is not None and callable(response_decoder): decoder = response_decoder request = self.build_url_request() try: f = urlopen(request) except HTTPError as e: try: error_resp = e.read() error_data = loads(error_resp) except Exception: raise AccessTokenResponseError('Access request returned an error, but the response could not be read: %s ' % error_resp) if error_data.get('error') is None: raise AccessTokenResponseError('Access request returned an error, but did not include an error code') raise AccessTokenRequestError(error_data['error'], error_data.get('error_description'), error_data.get('error_uri')) token_data = decoder(f.read()) return self._create_access_token(token_data)
[ "def", "send", "(", "self", ",", "response_decoder", "=", "None", ")", ":", "decoder", "=", "loads", "if", "response_decoder", "is", "not", "None", "and", "callable", "(", "response_decoder", ")", ":", "decoder", "=", "response_decoder", "request", "=", "self", ".", "build_url_request", "(", ")", "try", ":", "f", "=", "urlopen", "(", "request", ")", "except", "HTTPError", "as", "e", ":", "try", ":", "error_resp", "=", "e", ".", "read", "(", ")", "error_data", "=", "loads", "(", "error_resp", ")", "except", "Exception", ":", "raise", "AccessTokenResponseError", "(", "'Access request returned an error, but the response could not be read: %s '", "%", "error_resp", ")", "if", "error_data", ".", "get", "(", "'error'", ")", "is", "None", ":", "raise", "AccessTokenResponseError", "(", "'Access request returned an error, but did not include an error code'", ")", "raise", "AccessTokenRequestError", "(", "error_data", "[", "'error'", "]", ",", "error_data", ".", "get", "(", "'error_description'", ")", ",", "error_data", ".", "get", "(", "'error_uri'", ")", ")", "token_data", "=", "decoder", "(", "f", ".", "read", "(", ")", ")", "return", "self", ".", "_create_access_token", "(", "token_data", ")" ]
Creates and sends a request to the OAuth server, decodes the response and returns the resulting token object. response_decoder - A custom callable can be supplied to override the default method of extracting AccessToken parameters from the response. This is necessary for server implementations which do not conform to the more recent OAuth2 specification (ex: Facebook). By default, this will assume the response is encoded using JSON. The callable should return a dictionary with keys and values as follows: access_token - The access token token_type - The token type expires_in - The number of seconds in which the token expires refresh_token - The refresh token scope - The permission scope (as a space delimited string)
[ "Creates", "and", "sends", "a", "request", "to", "the", "OAuth", "server", "decodes", "the", "response", "and", "returns", "the", "resulting", "token", "object", "." ]
e6a388868ee05c5fef3bec795c5efd88c19b0955
https://github.com/ryanhorn/tyoiOAuth2/blob/e6a388868ee05c5fef3bec795c5efd88c19b0955/tyoi/oauth2/__init__.py#L105-L147
240,958
mikkeljans/pyconomic
pyconomic/base.py
EConomicsService.get
def get(self, model, **spec): """get a single model instance by handle :param model: model :param handle: instance handle :return: """ handles = self.__find_handles(model, **spec) if len(handles) > 1: raise MultipleObjectsReturned() if not handles: raise ObjectDoesNotExist() return self.get_instance(model, handles[0])
python
def get(self, model, **spec): """get a single model instance by handle :param model: model :param handle: instance handle :return: """ handles = self.__find_handles(model, **spec) if len(handles) > 1: raise MultipleObjectsReturned() if not handles: raise ObjectDoesNotExist() return self.get_instance(model, handles[0])
[ "def", "get", "(", "self", ",", "model", ",", "*", "*", "spec", ")", ":", "handles", "=", "self", ".", "__find_handles", "(", "model", ",", "*", "*", "spec", ")", "if", "len", "(", "handles", ")", ">", "1", ":", "raise", "MultipleObjectsReturned", "(", ")", "if", "not", "handles", ":", "raise", "ObjectDoesNotExist", "(", ")", "return", "self", ".", "get_instance", "(", "model", ",", "handles", "[", "0", "]", ")" ]
get a single model instance by handle :param model: model :param handle: instance handle :return:
[ "get", "a", "single", "model", "instance", "by", "handle" ]
845b8148a364cf5be9065f8a70133d4f16ab645d
https://github.com/mikkeljans/pyconomic/blob/845b8148a364cf5be9065f8a70133d4f16ab645d/pyconomic/base.py#L123-L136
240,959
oxalorg/ghPublish
ghPublish/ghPublish.py
Publish.get_sha_blob
def get_sha_blob(self): """ if the current file exists returns the sha blob else returns None """ r = requests.get(self.api_url, auth=self.get_auth_details()) try: return r.json()['sha'] except KeyError: return None
python
def get_sha_blob(self): """ if the current file exists returns the sha blob else returns None """ r = requests.get(self.api_url, auth=self.get_auth_details()) try: return r.json()['sha'] except KeyError: return None
[ "def", "get_sha_blob", "(", "self", ")", ":", "r", "=", "requests", ".", "get", "(", "self", ".", "api_url", ",", "auth", "=", "self", ".", "get_auth_details", "(", ")", ")", "try", ":", "return", "r", ".", "json", "(", ")", "[", "'sha'", "]", "except", "KeyError", ":", "return", "None" ]
if the current file exists returns the sha blob else returns None
[ "if", "the", "current", "file", "exists", "returns", "the", "sha", "blob", "else", "returns", "None" ]
aa3ec8fd2187efd99cffc1a5a76eda4ff2a3b636
https://github.com/oxalorg/ghPublish/blob/aa3ec8fd2187efd99cffc1a5a76eda4ff2a3b636/ghPublish/ghPublish.py#L31-L42
240,960
oxalorg/ghPublish
ghPublish/ghPublish.py
Publish.publish_post
def publish_post(self): """ If it's a new file, add it. Else, update it. """ payload = {'content': self.content_base64.decode('utf-8')} sha_blob = self.get_sha_blob() if sha_blob: commit_msg = 'ghPublish UPDATE: {}'.format(self.title) payload.update(sha=sha_blob) payload.update(message=commit_msg) else: commit_msg = 'ghPublish ADD: {}'.format(self.title) payload.update(message=commit_msg) r = requests.put(self.api_url, auth=self.get_auth_details(), data=json.dumps(payload)) try: url = r.json()['content']['html_url'] return r.status_code, url except KeyError: return r.status_code, None
python
def publish_post(self): """ If it's a new file, add it. Else, update it. """ payload = {'content': self.content_base64.decode('utf-8')} sha_blob = self.get_sha_blob() if sha_blob: commit_msg = 'ghPublish UPDATE: {}'.format(self.title) payload.update(sha=sha_blob) payload.update(message=commit_msg) else: commit_msg = 'ghPublish ADD: {}'.format(self.title) payload.update(message=commit_msg) r = requests.put(self.api_url, auth=self.get_auth_details(), data=json.dumps(payload)) try: url = r.json()['content']['html_url'] return r.status_code, url except KeyError: return r.status_code, None
[ "def", "publish_post", "(", "self", ")", ":", "payload", "=", "{", "'content'", ":", "self", ".", "content_base64", ".", "decode", "(", "'utf-8'", ")", "}", "sha_blob", "=", "self", ".", "get_sha_blob", "(", ")", "if", "sha_blob", ":", "commit_msg", "=", "'ghPublish UPDATE: {}'", ".", "format", "(", "self", ".", "title", ")", "payload", ".", "update", "(", "sha", "=", "sha_blob", ")", "payload", ".", "update", "(", "message", "=", "commit_msg", ")", "else", ":", "commit_msg", "=", "'ghPublish ADD: {}'", ".", "format", "(", "self", ".", "title", ")", "payload", ".", "update", "(", "message", "=", "commit_msg", ")", "r", "=", "requests", ".", "put", "(", "self", ".", "api_url", ",", "auth", "=", "self", ".", "get_auth_details", "(", ")", ",", "data", "=", "json", ".", "dumps", "(", "payload", ")", ")", "try", ":", "url", "=", "r", ".", "json", "(", ")", "[", "'content'", "]", "[", "'html_url'", "]", "return", "r", ".", "status_code", ",", "url", "except", "KeyError", ":", "return", "r", ".", "status_code", ",", "None" ]
If it's a new file, add it. Else, update it.
[ "If", "it", "s", "a", "new", "file", "add", "it", ".", "Else", "update", "it", "." ]
aa3ec8fd2187efd99cffc1a5a76eda4ff2a3b636
https://github.com/oxalorg/ghPublish/blob/aa3ec8fd2187efd99cffc1a5a76eda4ff2a3b636/ghPublish/ghPublish.py#L44-L67
240,961
the01/python-paps
examples/gpio_detector.py
GPIODetector._gpio_callback
def _gpio_callback(self, gpio): """ Gets triggered whenever the the gpio state changes :param gpio: Number of gpio that changed :type gpio: int :rtype: None """ self.debug(u"Triggered #{}".format(gpio)) try: index = self.gpios.index(gpio) except ValueError: self.error(u"{} not present in GPIO list".format(gpio)) return with self._people_lock: person = self.people[index] read_val = GPIO.input(gpio) if read_val == person.sitting: # Nothing changed? time.sleep(self.gpio_bouncetime_sleep) # Really sure? read_val = GPIO.input(gpio) if person.sitting != read_val: person.sitting = read_val self.debug(u"Person is now {}sitting".format( "" if person.sitting else "not ") ) try: self.changer.on_person_update(self.people) except: self.exception( u"Failed to update people (Person: {})".format(person) ) else: self.warning(u"Nothing changed on {}".format(gpio))
python
def _gpio_callback(self, gpio): """ Gets triggered whenever the the gpio state changes :param gpio: Number of gpio that changed :type gpio: int :rtype: None """ self.debug(u"Triggered #{}".format(gpio)) try: index = self.gpios.index(gpio) except ValueError: self.error(u"{} not present in GPIO list".format(gpio)) return with self._people_lock: person = self.people[index] read_val = GPIO.input(gpio) if read_val == person.sitting: # Nothing changed? time.sleep(self.gpio_bouncetime_sleep) # Really sure? read_val = GPIO.input(gpio) if person.sitting != read_val: person.sitting = read_val self.debug(u"Person is now {}sitting".format( "" if person.sitting else "not ") ) try: self.changer.on_person_update(self.people) except: self.exception( u"Failed to update people (Person: {})".format(person) ) else: self.warning(u"Nothing changed on {}".format(gpio))
[ "def", "_gpio_callback", "(", "self", ",", "gpio", ")", ":", "self", ".", "debug", "(", "u\"Triggered #{}\"", ".", "format", "(", "gpio", ")", ")", "try", ":", "index", "=", "self", ".", "gpios", ".", "index", "(", "gpio", ")", "except", "ValueError", ":", "self", ".", "error", "(", "u\"{} not present in GPIO list\"", ".", "format", "(", "gpio", ")", ")", "return", "with", "self", ".", "_people_lock", ":", "person", "=", "self", ".", "people", "[", "index", "]", "read_val", "=", "GPIO", ".", "input", "(", "gpio", ")", "if", "read_val", "==", "person", ".", "sitting", ":", "# Nothing changed?", "time", ".", "sleep", "(", "self", ".", "gpio_bouncetime_sleep", ")", "# Really sure?", "read_val", "=", "GPIO", ".", "input", "(", "gpio", ")", "if", "person", ".", "sitting", "!=", "read_val", ":", "person", ".", "sitting", "=", "read_val", "self", ".", "debug", "(", "u\"Person is now {}sitting\"", ".", "format", "(", "\"\"", "if", "person", ".", "sitting", "else", "\"not \"", ")", ")", "try", ":", "self", ".", "changer", ".", "on_person_update", "(", "self", ".", "people", ")", "except", ":", "self", ".", "exception", "(", "u\"Failed to update people (Person: {})\"", ".", "format", "(", "person", ")", ")", "else", ":", "self", ".", "warning", "(", "u\"Nothing changed on {}\"", ".", "format", "(", "gpio", ")", ")" ]
Gets triggered whenever the the gpio state changes :param gpio: Number of gpio that changed :type gpio: int :rtype: None
[ "Gets", "triggered", "whenever", "the", "the", "gpio", "state", "changes" ]
2dde5a71913e4c7b22901cf05c6ecedd890919c4
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/examples/gpio_detector.py#L64-L98
240,962
pavelsof/ipalint
ipalint/report.py
Reporter._get_report
def _get_report(self, with_line_nums=True): """ Returns a report which includes each distinct error only once, together with a list of the input lines where the error occurs. The latter will be omitted if flag is set to False. Helper for the get_report method. """ templ = '{} ← {}' if with_line_nums else '{}' return '\n'.join([ templ.format(error.string, ','.join(map(str, sorted(set(lines))))) for error, lines in self.errors.items()])
python
def _get_report(self, with_line_nums=True): """ Returns a report which includes each distinct error only once, together with a list of the input lines where the error occurs. The latter will be omitted if flag is set to False. Helper for the get_report method. """ templ = '{} ← {}' if with_line_nums else '{}' return '\n'.join([ templ.format(error.string, ','.join(map(str, sorted(set(lines))))) for error, lines in self.errors.items()])
[ "def", "_get_report", "(", "self", ",", "with_line_nums", "=", "True", ")", ":", "templ", "=", "'{} ← {}' i", " w", "th_line_nums e", "se '", "}'", "return", "'\\n'", ".", "join", "(", "[", "templ", ".", "format", "(", "error", ".", "string", ",", "','", ".", "join", "(", "map", "(", "str", ",", "sorted", "(", "set", "(", "lines", ")", ")", ")", ")", ")", "for", "error", ",", "lines", "in", "self", ".", "errors", ".", "items", "(", ")", "]", ")" ]
Returns a report which includes each distinct error only once, together with a list of the input lines where the error occurs. The latter will be omitted if flag is set to False. Helper for the get_report method.
[ "Returns", "a", "report", "which", "includes", "each", "distinct", "error", "only", "once", "together", "with", "a", "list", "of", "the", "input", "lines", "where", "the", "error", "occurs", ".", "The", "latter", "will", "be", "omitted", "if", "flag", "is", "set", "to", "False", "." ]
763e5979ede6980cbfc746b06fd007b379762eeb
https://github.com/pavelsof/ipalint/blob/763e5979ede6980cbfc746b06fd007b379762eeb/ipalint/report.py#L69-L81
240,963
davidmiller/letter
letter/sms.py
TwillioPostie.send
def send(self, to, from_, body): """ Send BODY to TO from FROM as an SMS! """ try: msg = self.client.sms.messages.create( body=body, to=to, from_=from_ ) print msg.sid except twilio.TwilioRestException as e: raise
python
def send(self, to, from_, body): """ Send BODY to TO from FROM as an SMS! """ try: msg = self.client.sms.messages.create( body=body, to=to, from_=from_ ) print msg.sid except twilio.TwilioRestException as e: raise
[ "def", "send", "(", "self", ",", "to", ",", "from_", ",", "body", ")", ":", "try", ":", "msg", "=", "self", ".", "client", ".", "sms", ".", "messages", ".", "create", "(", "body", "=", "body", ",", "to", "=", "to", ",", "from_", "=", "from_", ")", "print", "msg", ".", "sid", "except", "twilio", ".", "TwilioRestException", "as", "e", ":", "raise" ]
Send BODY to TO from FROM as an SMS!
[ "Send", "BODY", "to", "TO", "from", "FROM", "as", "an", "SMS!" ]
c0c66ae2c6a792106e9a8374a01421817c8a8ae0
https://github.com/davidmiller/letter/blob/c0c66ae2c6a792106e9a8374a01421817c8a8ae0/letter/sms.py#L14-L26
240,964
ravenac95/pippy
pippy/utils.py
tar_gzip_dir
def tar_gzip_dir(directory, destination, base=None): """Creates a tar.gz from a directory.""" dest_file = tarfile.open(destination, 'w:gz') abs_dir_path = os.path.abspath(directory) base_name = abs_dir_path + "/" base = base or os.path.basename(directory) for path, dirnames, filenames in os.walk(abs_dir_path): rel_path = path[len(base_name):] dir_norm_path = os.path.join(base, rel_path) dir_tar_info = dest_file.gettarinfo(name=path) dir_tar_info.name = dir_norm_path dest_file.addfile(dir_tar_info) for filename in filenames: actual_path = os.path.join(path, filename) norm_path = os.path.join(base, rel_path, filename) tar_info = dest_file.gettarinfo(name=actual_path) tar_info.name = norm_path new_file = open(actual_path, 'rb') dest_file.addfile(tar_info, new_file) dest_file.close()
python
def tar_gzip_dir(directory, destination, base=None): """Creates a tar.gz from a directory.""" dest_file = tarfile.open(destination, 'w:gz') abs_dir_path = os.path.abspath(directory) base_name = abs_dir_path + "/" base = base or os.path.basename(directory) for path, dirnames, filenames in os.walk(abs_dir_path): rel_path = path[len(base_name):] dir_norm_path = os.path.join(base, rel_path) dir_tar_info = dest_file.gettarinfo(name=path) dir_tar_info.name = dir_norm_path dest_file.addfile(dir_tar_info) for filename in filenames: actual_path = os.path.join(path, filename) norm_path = os.path.join(base, rel_path, filename) tar_info = dest_file.gettarinfo(name=actual_path) tar_info.name = norm_path new_file = open(actual_path, 'rb') dest_file.addfile(tar_info, new_file) dest_file.close()
[ "def", "tar_gzip_dir", "(", "directory", ",", "destination", ",", "base", "=", "None", ")", ":", "dest_file", "=", "tarfile", ".", "open", "(", "destination", ",", "'w:gz'", ")", "abs_dir_path", "=", "os", ".", "path", ".", "abspath", "(", "directory", ")", "base_name", "=", "abs_dir_path", "+", "\"/\"", "base", "=", "base", "or", "os", ".", "path", ".", "basename", "(", "directory", ")", "for", "path", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "abs_dir_path", ")", ":", "rel_path", "=", "path", "[", "len", "(", "base_name", ")", ":", "]", "dir_norm_path", "=", "os", ".", "path", ".", "join", "(", "base", ",", "rel_path", ")", "dir_tar_info", "=", "dest_file", ".", "gettarinfo", "(", "name", "=", "path", ")", "dir_tar_info", ".", "name", "=", "dir_norm_path", "dest_file", ".", "addfile", "(", "dir_tar_info", ")", "for", "filename", "in", "filenames", ":", "actual_path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "filename", ")", "norm_path", "=", "os", ".", "path", ".", "join", "(", "base", ",", "rel_path", ",", "filename", ")", "tar_info", "=", "dest_file", ".", "gettarinfo", "(", "name", "=", "actual_path", ")", "tar_info", ".", "name", "=", "norm_path", "new_file", "=", "open", "(", "actual_path", ",", "'rb'", ")", "dest_file", ".", "addfile", "(", "tar_info", ",", "new_file", ")", "dest_file", ".", "close", "(", ")" ]
Creates a tar.gz from a directory.
[ "Creates", "a", "tar", ".", "gz", "from", "a", "directory", "." ]
8ae26e0a3f20d99103a421d9b94dc8d2169688b1
https://github.com/ravenac95/pippy/blob/8ae26e0a3f20d99103a421d9b94dc8d2169688b1/pippy/utils.py#L14-L33
240,965
za-creature/gulpless
gulpless/__init__.py
main
def main(): """Entry point for command line usage.""" import colorama import argparse import logging import sys import os parser = argparse.ArgumentParser(prog="gulpless", description="Simple build system.") parser.add_argument("-v", "--version", action="version", version="%(prog)s 0.7.6") parser.add_argument("-d", "--directory", action="store", default=os.getcwd(), help="Look for `build.py` in this folder (defaults to " "the current directory)") parser.add_argument("mode", action="store", choices=["build", "interactive"], default="interactive", metavar="mode", nargs="?", help="If `interactive` (the default), will wait for " "filesystem events and attempt to keep the input " "and output folders in sync. If `build`, it will " "attempt to build all updated files, then exit.") args = parser.parse_args() os.chdir(args.directory) sys.path.append(os.getcwd()) if os.environ.get("TERM") == "cygwin": # colorama doesn't play well with git bash del os.environ["TERM"] colorama.init() os.environ["TERM"] = "cygwin" else: colorama.init() try: old, sys.dont_write_bytecode = sys.dont_write_bytecode, True import build except ImportError: sys.exit("No `build.py` found in current folder.") finally: sys.dont_write_bytecode = old try: logging.basicConfig(level=build.LOGGING, format="%(message)s") except AttributeError: logging.basicConfig(level=logging.INFO, format="%(message)s") reactor = Reactor(build.SRC, build.DEST) for handler in build.HANDLERS: reactor.add_handler(handler) reactor.run(args.mode == "build")
python
def main(): """Entry point for command line usage.""" import colorama import argparse import logging import sys import os parser = argparse.ArgumentParser(prog="gulpless", description="Simple build system.") parser.add_argument("-v", "--version", action="version", version="%(prog)s 0.7.6") parser.add_argument("-d", "--directory", action="store", default=os.getcwd(), help="Look for `build.py` in this folder (defaults to " "the current directory)") parser.add_argument("mode", action="store", choices=["build", "interactive"], default="interactive", metavar="mode", nargs="?", help="If `interactive` (the default), will wait for " "filesystem events and attempt to keep the input " "and output folders in sync. If `build`, it will " "attempt to build all updated files, then exit.") args = parser.parse_args() os.chdir(args.directory) sys.path.append(os.getcwd()) if os.environ.get("TERM") == "cygwin": # colorama doesn't play well with git bash del os.environ["TERM"] colorama.init() os.environ["TERM"] = "cygwin" else: colorama.init() try: old, sys.dont_write_bytecode = sys.dont_write_bytecode, True import build except ImportError: sys.exit("No `build.py` found in current folder.") finally: sys.dont_write_bytecode = old try: logging.basicConfig(level=build.LOGGING, format="%(message)s") except AttributeError: logging.basicConfig(level=logging.INFO, format="%(message)s") reactor = Reactor(build.SRC, build.DEST) for handler in build.HANDLERS: reactor.add_handler(handler) reactor.run(args.mode == "build")
[ "def", "main", "(", ")", ":", "import", "colorama", "import", "argparse", "import", "logging", "import", "sys", "import", "os", "parser", "=", "argparse", ".", "ArgumentParser", "(", "prog", "=", "\"gulpless\"", ",", "description", "=", "\"Simple build system.\"", ")", "parser", ".", "add_argument", "(", "\"-v\"", ",", "\"--version\"", ",", "action", "=", "\"version\"", ",", "version", "=", "\"%(prog)s 0.7.6\"", ")", "parser", ".", "add_argument", "(", "\"-d\"", ",", "\"--directory\"", ",", "action", "=", "\"store\"", ",", "default", "=", "os", ".", "getcwd", "(", ")", ",", "help", "=", "\"Look for `build.py` in this folder (defaults to \"", "\"the current directory)\"", ")", "parser", ".", "add_argument", "(", "\"mode\"", ",", "action", "=", "\"store\"", ",", "choices", "=", "[", "\"build\"", ",", "\"interactive\"", "]", ",", "default", "=", "\"interactive\"", ",", "metavar", "=", "\"mode\"", ",", "nargs", "=", "\"?\"", ",", "help", "=", "\"If `interactive` (the default), will wait for \"", "\"filesystem events and attempt to keep the input \"", "\"and output folders in sync. If `build`, it will \"", "\"attempt to build all updated files, then exit.\"", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "os", ".", "chdir", "(", "args", ".", "directory", ")", "sys", ".", "path", ".", "append", "(", "os", ".", "getcwd", "(", ")", ")", "if", "os", ".", "environ", ".", "get", "(", "\"TERM\"", ")", "==", "\"cygwin\"", ":", "# colorama doesn't play well with git bash", "del", "os", ".", "environ", "[", "\"TERM\"", "]", "colorama", ".", "init", "(", ")", "os", ".", "environ", "[", "\"TERM\"", "]", "=", "\"cygwin\"", "else", ":", "colorama", ".", "init", "(", ")", "try", ":", "old", ",", "sys", ".", "dont_write_bytecode", "=", "sys", ".", "dont_write_bytecode", ",", "True", "import", "build", "except", "ImportError", ":", "sys", ".", "exit", "(", "\"No `build.py` found in current folder.\"", ")", "finally", ":", "sys", ".", "dont_write_bytecode", "=", "old", "try", ":", "logging", ".", "basicConfig", "(", "level", "=", "build", ".", "LOGGING", ",", "format", "=", "\"%(message)s\"", ")", "except", "AttributeError", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ",", "format", "=", "\"%(message)s\"", ")", "reactor", "=", "Reactor", "(", "build", ".", "SRC", ",", "build", ".", "DEST", ")", "for", "handler", "in", "build", ".", "HANDLERS", ":", "reactor", ".", "add_handler", "(", "handler", ")", "reactor", ".", "run", "(", "args", ".", "mode", "==", "\"build\"", ")" ]
Entry point for command line usage.
[ "Entry", "point", "for", "command", "line", "usage", "." ]
fd73907dbe86880086719816bb042233f85121f6
https://github.com/za-creature/gulpless/blob/fd73907dbe86880086719816bb042233f85121f6/gulpless/__init__.py#L11-L70
240,966
inveniosoftware-attic/invenio-knowledge
invenio_knowledge/models.py
del_kwnkb
def del_kwnkb(mapper, connection, target): """Remove taxonomy file.""" if(target.kbtype == KnwKB.KNWKB_TYPES['taxonomy']): # Delete taxonomy file if os.path.isfile(target.get_filename()): os.remove(target.get_filename())
python
def del_kwnkb(mapper, connection, target): """Remove taxonomy file.""" if(target.kbtype == KnwKB.KNWKB_TYPES['taxonomy']): # Delete taxonomy file if os.path.isfile(target.get_filename()): os.remove(target.get_filename())
[ "def", "del_kwnkb", "(", "mapper", ",", "connection", ",", "target", ")", ":", "if", "(", "target", ".", "kbtype", "==", "KnwKB", ".", "KNWKB_TYPES", "[", "'taxonomy'", "]", ")", ":", "# Delete taxonomy file", "if", "os", ".", "path", ".", "isfile", "(", "target", ".", "get_filename", "(", ")", ")", ":", "os", ".", "remove", "(", "target", ".", "get_filename", "(", ")", ")" ]
Remove taxonomy file.
[ "Remove", "taxonomy", "file", "." ]
b31722dc14243ca8f626f8b3bce9718d0119de55
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/models.py#L239-L244
240,967
inveniosoftware-attic/invenio-knowledge
invenio_knowledge/models.py
KnwKB.name
def name(self, value): """Set name and generate the slug.""" self._name = value # generate slug if not self.slug: self.slug = KnwKB.generate_slug(value)
python
def name(self, value): """Set name and generate the slug.""" self._name = value # generate slug if not self.slug: self.slug = KnwKB.generate_slug(value)
[ "def", "name", "(", "self", ",", "value", ")", ":", "self", ".", "_name", "=", "value", "# generate slug", "if", "not", "self", ".", "slug", ":", "self", ".", "slug", "=", "KnwKB", ".", "generate_slug", "(", "value", ")" ]
Set name and generate the slug.
[ "Set", "name", "and", "generate", "the", "slug", "." ]
b31722dc14243ca8f626f8b3bce9718d0119de55
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/models.py#L64-L69
240,968
inveniosoftware-attic/invenio-knowledge
invenio_knowledge/models.py
KnwKB.kbtype
def kbtype(self, value): """Set kbtype.""" if value is None: # set the default value return # or set one of the available values kbtype = value[0] if len(value) > 0 else 'w' if kbtype not in ['t', 'd', 'w']: raise ValueError('unknown type "{value}", please use one of \ following values: "taxonomy", "dynamic" or \ "written_as"'.format(value=value)) self._kbtype = kbtype
python
def kbtype(self, value): """Set kbtype.""" if value is None: # set the default value return # or set one of the available values kbtype = value[0] if len(value) > 0 else 'w' if kbtype not in ['t', 'd', 'w']: raise ValueError('unknown type "{value}", please use one of \ following values: "taxonomy", "dynamic" or \ "written_as"'.format(value=value)) self._kbtype = kbtype
[ "def", "kbtype", "(", "self", ",", "value", ")", ":", "if", "value", "is", "None", ":", "# set the default value", "return", "# or set one of the available values", "kbtype", "=", "value", "[", "0", "]", "if", "len", "(", "value", ")", ">", "0", "else", "'w'", "if", "kbtype", "not", "in", "[", "'t'", ",", "'d'", ",", "'w'", "]", ":", "raise", "ValueError", "(", "'unknown type \"{value}\", please use one of \\\n following values: \"taxonomy\", \"dynamic\" or \\\n \"written_as\"'", ".", "format", "(", "value", "=", "value", ")", ")", "self", ".", "_kbtype", "=", "kbtype" ]
Set kbtype.
[ "Set", "kbtype", "." ]
b31722dc14243ca8f626f8b3bce9718d0119de55
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/models.py#L89-L100
240,969
inveniosoftware-attic/invenio-knowledge
invenio_knowledge/models.py
KnwKB.to_dict
def to_dict(self): """Return a dict representation of KnwKB.""" mydict = {'id': self.id, 'name': self.name, 'description': self.description, 'kbtype': self.kbtype} if self.kbtype == 'd': mydict.update((self.kbdefs.to_dict() if self.kbdefs else {}) or {}) return mydict
python
def to_dict(self): """Return a dict representation of KnwKB.""" mydict = {'id': self.id, 'name': self.name, 'description': self.description, 'kbtype': self.kbtype} if self.kbtype == 'd': mydict.update((self.kbdefs.to_dict() if self.kbdefs else {}) or {}) return mydict
[ "def", "to_dict", "(", "self", ")", ":", "mydict", "=", "{", "'id'", ":", "self", ".", "id", ",", "'name'", ":", "self", ".", "name", ",", "'description'", ":", "self", ".", "description", ",", "'kbtype'", ":", "self", ".", "kbtype", "}", "if", "self", ".", "kbtype", "==", "'d'", ":", "mydict", ".", "update", "(", "(", "self", ".", "kbdefs", ".", "to_dict", "(", ")", "if", "self", ".", "kbdefs", "else", "{", "}", ")", "or", "{", "}", ")", "return", "mydict" ]
Return a dict representation of KnwKB.
[ "Return", "a", "dict", "representation", "of", "KnwKB", "." ]
b31722dc14243ca8f626f8b3bce9718d0119de55
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/models.py#L106-L114
240,970
inveniosoftware-attic/invenio-knowledge
invenio_knowledge/models.py
KnwKB.set_dyn_config
def set_dyn_config(self, field, expression, collection=None): """Set dynamic configuration.""" if self.kbdefs: # update self.kbdefs.output_tag = field self.kbdefs.search_expression = expression self.kbdefs.collection = collection db.session.merge(self.kbdefs) else: # insert self.kbdefs = KnwKBDDEF(output_tag=field, search_expression=expression, collection=collection)
python
def set_dyn_config(self, field, expression, collection=None): """Set dynamic configuration.""" if self.kbdefs: # update self.kbdefs.output_tag = field self.kbdefs.search_expression = expression self.kbdefs.collection = collection db.session.merge(self.kbdefs) else: # insert self.kbdefs = KnwKBDDEF(output_tag=field, search_expression=expression, collection=collection)
[ "def", "set_dyn_config", "(", "self", ",", "field", ",", "expression", ",", "collection", "=", "None", ")", ":", "if", "self", ".", "kbdefs", ":", "# update", "self", ".", "kbdefs", ".", "output_tag", "=", "field", "self", ".", "kbdefs", ".", "search_expression", "=", "expression", "self", ".", "kbdefs", ".", "collection", "=", "collection", "db", ".", "session", ".", "merge", "(", "self", ".", "kbdefs", ")", "else", ":", "# insert", "self", ".", "kbdefs", "=", "KnwKBDDEF", "(", "output_tag", "=", "field", ",", "search_expression", "=", "expression", ",", "collection", "=", "collection", ")" ]
Set dynamic configuration.
[ "Set", "dynamic", "configuration", "." ]
b31722dc14243ca8f626f8b3bce9718d0119de55
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/models.py#L180-L192
240,971
inveniosoftware-attic/invenio-knowledge
invenio_knowledge/models.py
KnwKB.generate_slug
def generate_slug(name): """Generate a slug for the knowledge. :param name: text to slugify :return: slugified text """ slug = slugify(name) i = KnwKB.query.filter(db.or_( KnwKB.slug.like(slug), KnwKB.slug.like(slug + '-%'), )).count() return slug + ('-{0}'.format(i) if i > 0 else '')
python
def generate_slug(name): """Generate a slug for the knowledge. :param name: text to slugify :return: slugified text """ slug = slugify(name) i = KnwKB.query.filter(db.or_( KnwKB.slug.like(slug), KnwKB.slug.like(slug + '-%'), )).count() return slug + ('-{0}'.format(i) if i > 0 else '')
[ "def", "generate_slug", "(", "name", ")", ":", "slug", "=", "slugify", "(", "name", ")", "i", "=", "KnwKB", ".", "query", ".", "filter", "(", "db", ".", "or_", "(", "KnwKB", ".", "slug", ".", "like", "(", "slug", ")", ",", "KnwKB", ".", "slug", ".", "like", "(", "slug", "+", "'-%'", ")", ",", ")", ")", ".", "count", "(", ")", "return", "slug", "+", "(", "'-{0}'", ".", "format", "(", "i", ")", "if", "i", ">", "0", "else", "''", ")" ]
Generate a slug for the knowledge. :param name: text to slugify :return: slugified text
[ "Generate", "a", "slug", "for", "the", "knowledge", "." ]
b31722dc14243ca8f626f8b3bce9718d0119de55
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/models.py#L195-L208
240,972
inveniosoftware-attic/invenio-knowledge
invenio_knowledge/models.py
KnwKB.query_exists
def query_exists(filters): """Return True if a kb with the given filters exists. E.g: KnwKB.query_exists(KnwKB.name.like('FAQ')) :param filters: filter for sqlalchemy :return: True if kb exists """ return db.session.query( KnwKB.query.filter( filters).exists()).scalar()
python
def query_exists(filters): """Return True if a kb with the given filters exists. E.g: KnwKB.query_exists(KnwKB.name.like('FAQ')) :param filters: filter for sqlalchemy :return: True if kb exists """ return db.session.query( KnwKB.query.filter( filters).exists()).scalar()
[ "def", "query_exists", "(", "filters", ")", ":", "return", "db", ".", "session", ".", "query", "(", "KnwKB", ".", "query", ".", "filter", "(", "filters", ")", ".", "exists", "(", ")", ")", ".", "scalar", "(", ")" ]
Return True if a kb with the given filters exists. E.g: KnwKB.query_exists(KnwKB.name.like('FAQ')) :param filters: filter for sqlalchemy :return: True if kb exists
[ "Return", "True", "if", "a", "kb", "with", "the", "given", "filters", "exists", "." ]
b31722dc14243ca8f626f8b3bce9718d0119de55
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/models.py#L220-L230
240,973
inveniosoftware-attic/invenio-knowledge
invenio_knowledge/models.py
KnwKBDDEF.to_dict
def to_dict(self): """Return a dict representation of KnwKBDDEF.""" return {'field': self.output_tag, 'expression': self.search_expression, 'coll_id': self.id_collection, 'collection': self.collection.name if self.collection else None}
python
def to_dict(self): """Return a dict representation of KnwKBDDEF.""" return {'field': self.output_tag, 'expression': self.search_expression, 'coll_id': self.id_collection, 'collection': self.collection.name if self.collection else None}
[ "def", "to_dict", "(", "self", ")", ":", "return", "{", "'field'", ":", "self", ".", "output_tag", ",", "'expression'", ":", "self", ".", "search_expression", ",", "'coll_id'", ":", "self", ".", "id_collection", ",", "'collection'", ":", "self", ".", "collection", ".", "name", "if", "self", ".", "collection", "else", "None", "}" ]
Return a dict representation of KnwKBDDEF.
[ "Return", "a", "dict", "representation", "of", "KnwKBDDEF", "." ]
b31722dc14243ca8f626f8b3bce9718d0119de55
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/models.py#L269-L275
240,974
inveniosoftware-attic/invenio-knowledge
invenio_knowledge/models.py
KnwKBRVAL.to_dict
def to_dict(self): """Return a dict representation of KnwKBRVAL.""" # FIXME remove 'id' dependency from invenio modules return {'id': self.m_key + "_" + str(self.id_knwKB), 'key': self.m_key, 'value': self.m_value, 'kbid': self.kb.id if self.kb else None, 'kbname': self.kb.name if self.kb else None}
python
def to_dict(self): """Return a dict representation of KnwKBRVAL.""" # FIXME remove 'id' dependency from invenio modules return {'id': self.m_key + "_" + str(self.id_knwKB), 'key': self.m_key, 'value': self.m_value, 'kbid': self.kb.id if self.kb else None, 'kbname': self.kb.name if self.kb else None}
[ "def", "to_dict", "(", "self", ")", ":", "# FIXME remove 'id' dependency from invenio modules", "return", "{", "'id'", ":", "self", ".", "m_key", "+", "\"_\"", "+", "str", "(", "self", ".", "id_knwKB", ")", ",", "'key'", ":", "self", ".", "m_key", ",", "'value'", ":", "self", ".", "m_value", ",", "'kbid'", ":", "self", ".", "kb", ".", "id", "if", "self", ".", "kb", "else", "None", ",", "'kbname'", ":", "self", ".", "kb", ".", "name", "if", "self", ".", "kb", "else", "None", "}" ]
Return a dict representation of KnwKBRVAL.
[ "Return", "a", "dict", "representation", "of", "KnwKBRVAL", "." ]
b31722dc14243ca8f626f8b3bce9718d0119de55
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/invenio_knowledge/models.py#L346-L353
240,975
tBaxter/tango-shared-core
build/lib/tango_shared/templatetags/social_tags.py
social_links
def social_links(context, object, user=None, authed=False, downable=False, vote_down_msg=None): """ Outputs social links. At minimum, this will be Facebook and Twitter. But if possible, it will also output voting and watchlist links. Usage: {% social_links object %} {% social_links object user %} {% social_links object user authenticated_request %} """ # check if voting available voting = False if hasattr(object, 'votes'): voting = True return { 'object': object, 'url': object.get_absolute_url(), 'site': get_current_site(context.request), 'ctype': ContentType.objects.get_for_model(object), 'user': user, 'voting': voting, 'vote_down': downable, 'vote_down_msg': vote_down_msg, 'authenticated_request': authed, }
python
def social_links(context, object, user=None, authed=False, downable=False, vote_down_msg=None): """ Outputs social links. At minimum, this will be Facebook and Twitter. But if possible, it will also output voting and watchlist links. Usage: {% social_links object %} {% social_links object user %} {% social_links object user authenticated_request %} """ # check if voting available voting = False if hasattr(object, 'votes'): voting = True return { 'object': object, 'url': object.get_absolute_url(), 'site': get_current_site(context.request), 'ctype': ContentType.objects.get_for_model(object), 'user': user, 'voting': voting, 'vote_down': downable, 'vote_down_msg': vote_down_msg, 'authenticated_request': authed, }
[ "def", "social_links", "(", "context", ",", "object", ",", "user", "=", "None", ",", "authed", "=", "False", ",", "downable", "=", "False", ",", "vote_down_msg", "=", "None", ")", ":", "# check if voting available", "voting", "=", "False", "if", "hasattr", "(", "object", ",", "'votes'", ")", ":", "voting", "=", "True", "return", "{", "'object'", ":", "object", ",", "'url'", ":", "object", ".", "get_absolute_url", "(", ")", ",", "'site'", ":", "get_current_site", "(", "context", ".", "request", ")", ",", "'ctype'", ":", "ContentType", ".", "objects", ".", "get_for_model", "(", "object", ")", ",", "'user'", ":", "user", ",", "'voting'", ":", "voting", ",", "'vote_down'", ":", "downable", ",", "'vote_down_msg'", ":", "vote_down_msg", ",", "'authenticated_request'", ":", "authed", ",", "}" ]
Outputs social links. At minimum, this will be Facebook and Twitter. But if possible, it will also output voting and watchlist links. Usage: {% social_links object %} {% social_links object user %} {% social_links object user authenticated_request %}
[ "Outputs", "social", "links", ".", "At", "minimum", "this", "will", "be", "Facebook", "and", "Twitter", ".", "But", "if", "possible", "it", "will", "also", "output", "voting", "and", "watchlist", "links", "." ]
35fc10aef1ceedcdb4d6d866d44a22efff718812
https://github.com/tBaxter/tango-shared-core/blob/35fc10aef1ceedcdb4d6d866d44a22efff718812/build/lib/tango_shared/templatetags/social_tags.py#L9-L34
240,976
mattupstate/cubric
cubric/utils.py
render
def render(obj): """Convienently render strings with the fabric context""" def get_v(v): return v % env if isinstance(v, basestring) else v if isinstance(obj, types.StringType): return obj % env elif isinstance(obj, types.TupleType) or isinstance(obj, types.ListType): rv = [] for v in obj: rv.append(get_v(v)) elif isinstance(obj, types.DictType): rv = {} for k, v in obj.items(): rv[k] = get_v(v) return rv
python
def render(obj): """Convienently render strings with the fabric context""" def get_v(v): return v % env if isinstance(v, basestring) else v if isinstance(obj, types.StringType): return obj % env elif isinstance(obj, types.TupleType) or isinstance(obj, types.ListType): rv = [] for v in obj: rv.append(get_v(v)) elif isinstance(obj, types.DictType): rv = {} for k, v in obj.items(): rv[k] = get_v(v) return rv
[ "def", "render", "(", "obj", ")", ":", "def", "get_v", "(", "v", ")", ":", "return", "v", "%", "env", "if", "isinstance", "(", "v", ",", "basestring", ")", "else", "v", "if", "isinstance", "(", "obj", ",", "types", ".", "StringType", ")", ":", "return", "obj", "%", "env", "elif", "isinstance", "(", "obj", ",", "types", ".", "TupleType", ")", "or", "isinstance", "(", "obj", ",", "types", ".", "ListType", ")", ":", "rv", "=", "[", "]", "for", "v", "in", "obj", ":", "rv", ".", "append", "(", "get_v", "(", "v", ")", ")", "elif", "isinstance", "(", "obj", ",", "types", ".", "DictType", ")", ":", "rv", "=", "{", "}", "for", "k", ",", "v", "in", "obj", ".", "items", "(", ")", ":", "rv", "[", "k", "]", "=", "get_v", "(", "v", ")", "return", "rv" ]
Convienently render strings with the fabric context
[ "Convienently", "render", "strings", "with", "the", "fabric", "context" ]
a648ce00e4467cd14d71e754240ef6c1f87a34b5
https://github.com/mattupstate/cubric/blob/a648ce00e4467cd14d71e754240ef6c1f87a34b5/cubric/utils.py#L105-L121
240,977
jcalogovic/lightning
stormstats/misc.py
read_wwln
def read_wwln(file): """Read WWLN file""" tmp = pd.read_csv(file, parse_dates=True, header=None, names=['date', 'time', 'lat', 'lon', 'err', '#sta']) # Generate a list of datetime objects with time to miliseconds list_dts = [] for dvals, tvals, in zip(tmp['date'], tmp['time']): list_dts.append(gen_datetime(dvals, tvals)) dtdf = pd.DataFrame(list_dts, columns=['datetime']) result = pd.concat([dtdf, tmp], axis=1) result = result.drop(['date', 'time'], axis=1) return result
python
def read_wwln(file): """Read WWLN file""" tmp = pd.read_csv(file, parse_dates=True, header=None, names=['date', 'time', 'lat', 'lon', 'err', '#sta']) # Generate a list of datetime objects with time to miliseconds list_dts = [] for dvals, tvals, in zip(tmp['date'], tmp['time']): list_dts.append(gen_datetime(dvals, tvals)) dtdf = pd.DataFrame(list_dts, columns=['datetime']) result = pd.concat([dtdf, tmp], axis=1) result = result.drop(['date', 'time'], axis=1) return result
[ "def", "read_wwln", "(", "file", ")", ":", "tmp", "=", "pd", ".", "read_csv", "(", "file", ",", "parse_dates", "=", "True", ",", "header", "=", "None", ",", "names", "=", "[", "'date'", ",", "'time'", ",", "'lat'", ",", "'lon'", ",", "'err'", ",", "'#sta'", "]", ")", "# Generate a list of datetime objects with time to miliseconds", "list_dts", "=", "[", "]", "for", "dvals", ",", "tvals", ",", "in", "zip", "(", "tmp", "[", "'date'", "]", ",", "tmp", "[", "'time'", "]", ")", ":", "list_dts", ".", "append", "(", "gen_datetime", "(", "dvals", ",", "tvals", ")", ")", "dtdf", "=", "pd", ".", "DataFrame", "(", "list_dts", ",", "columns", "=", "[", "'datetime'", "]", ")", "result", "=", "pd", ".", "concat", "(", "[", "dtdf", ",", "tmp", "]", ",", "axis", "=", "1", ")", "result", "=", "result", ".", "drop", "(", "[", "'date'", ",", "'time'", "]", ",", "axis", "=", "1", ")", "return", "result" ]
Read WWLN file
[ "Read", "WWLN", "file" ]
f9e52731c9dd40cb302295ec36a444e0377d0570
https://github.com/jcalogovic/lightning/blob/f9e52731c9dd40cb302295ec36a444e0377d0570/stormstats/misc.py#L15-L26
240,978
jcalogovic/lightning
stormstats/misc.py
wwln_to_geopandas
def wwln_to_geopandas(file): """Read data from Blitzorg first using pandas.read_csv for convienence, and then convert lat, lon points to a shaleply geometry POINT type. Finally put this gemoetry into a geopandas dataframe and return it.""" tmp = pd.read_csv(file, parse_dates=True, header=None, names=['date', 'time', 'lat', 'lon', 'err', '#sta']) list_dts = [gen_datetime(dvals, tvals) for dvals, tvals in zip(tmp['date'], tmp['time'])] points = [[Point(tmp.lat[row], tmp.lon[row]), dt] for row, dt in zip(tmp.index, list_dts)] df = gpd.GeoDataFrame(points, columns=['geometry', 'dt']) return df
python
def wwln_to_geopandas(file): """Read data from Blitzorg first using pandas.read_csv for convienence, and then convert lat, lon points to a shaleply geometry POINT type. Finally put this gemoetry into a geopandas dataframe and return it.""" tmp = pd.read_csv(file, parse_dates=True, header=None, names=['date', 'time', 'lat', 'lon', 'err', '#sta']) list_dts = [gen_datetime(dvals, tvals) for dvals, tvals in zip(tmp['date'], tmp['time'])] points = [[Point(tmp.lat[row], tmp.lon[row]), dt] for row, dt in zip(tmp.index, list_dts)] df = gpd.GeoDataFrame(points, columns=['geometry', 'dt']) return df
[ "def", "wwln_to_geopandas", "(", "file", ")", ":", "tmp", "=", "pd", ".", "read_csv", "(", "file", ",", "parse_dates", "=", "True", ",", "header", "=", "None", ",", "names", "=", "[", "'date'", ",", "'time'", ",", "'lat'", ",", "'lon'", ",", "'err'", ",", "'#sta'", "]", ")", "list_dts", "=", "[", "gen_datetime", "(", "dvals", ",", "tvals", ")", "for", "dvals", ",", "tvals", "in", "zip", "(", "tmp", "[", "'date'", "]", ",", "tmp", "[", "'time'", "]", ")", "]", "points", "=", "[", "[", "Point", "(", "tmp", ".", "lat", "[", "row", "]", ",", "tmp", ".", "lon", "[", "row", "]", ")", ",", "dt", "]", "for", "row", ",", "dt", "in", "zip", "(", "tmp", ".", "index", ",", "list_dts", ")", "]", "df", "=", "gpd", ".", "GeoDataFrame", "(", "points", ",", "columns", "=", "[", "'geometry'", ",", "'dt'", "]", ")", "return", "df" ]
Read data from Blitzorg first using pandas.read_csv for convienence, and then convert lat, lon points to a shaleply geometry POINT type. Finally put this gemoetry into a geopandas dataframe and return it.
[ "Read", "data", "from", "Blitzorg", "first", "using", "pandas", ".", "read_csv", "for", "convienence", "and", "then", "convert", "lat", "lon", "points", "to", "a", "shaleply", "geometry", "POINT", "type", ".", "Finally", "put", "this", "gemoetry", "into", "a", "geopandas", "dataframe", "and", "return", "it", "." ]
f9e52731c9dd40cb302295ec36a444e0377d0570
https://github.com/jcalogovic/lightning/blob/f9e52731c9dd40cb302295ec36a444e0377d0570/stormstats/misc.py#L29-L40
240,979
jcalogovic/lightning
stormstats/misc.py
gen_time_intervals
def gen_time_intervals(start, end, delta): """Create time intervals with timedelta periods using datetime for start and end """ curr = start while curr < end: yield curr curr += delta
python
def gen_time_intervals(start, end, delta): """Create time intervals with timedelta periods using datetime for start and end """ curr = start while curr < end: yield curr curr += delta
[ "def", "gen_time_intervals", "(", "start", ",", "end", ",", "delta", ")", ":", "curr", "=", "start", "while", "curr", "<", "end", ":", "yield", "curr", "curr", "+=", "delta" ]
Create time intervals with timedelta periods using datetime for start and end
[ "Create", "time", "intervals", "with", "timedelta", "periods", "using", "datetime", "for", "start", "and", "end" ]
f9e52731c9dd40cb302295ec36a444e0377d0570
https://github.com/jcalogovic/lightning/blob/f9e52731c9dd40cb302295ec36a444e0377d0570/stormstats/misc.py#L162-L169
240,980
jcalogovic/lightning
stormstats/misc.py
extract_date
def extract_date(value): """ Convert timestamp to datetime and set everything to zero except a date """ dtime = value.to_datetime() dtime = (dtime - timedelta(hours=dtime.hour) - timedelta(minutes=dtime.minute) - timedelta(seconds=dtime.second) - timedelta(microseconds=dtime.microsecond)) return dtime
python
def extract_date(value): """ Convert timestamp to datetime and set everything to zero except a date """ dtime = value.to_datetime() dtime = (dtime - timedelta(hours=dtime.hour) - timedelta(minutes=dtime.minute) - timedelta(seconds=dtime.second) - timedelta(microseconds=dtime.microsecond)) return dtime
[ "def", "extract_date", "(", "value", ")", ":", "dtime", "=", "value", ".", "to_datetime", "(", ")", "dtime", "=", "(", "dtime", "-", "timedelta", "(", "hours", "=", "dtime", ".", "hour", ")", "-", "timedelta", "(", "minutes", "=", "dtime", ".", "minute", ")", "-", "timedelta", "(", "seconds", "=", "dtime", ".", "second", ")", "-", "timedelta", "(", "microseconds", "=", "dtime", ".", "microsecond", ")", ")", "return", "dtime" ]
Convert timestamp to datetime and set everything to zero except a date
[ "Convert", "timestamp", "to", "datetime", "and", "set", "everything", "to", "zero", "except", "a", "date" ]
f9e52731c9dd40cb302295ec36a444e0377d0570
https://github.com/jcalogovic/lightning/blob/f9e52731c9dd40cb302295ec36a444e0377d0570/stormstats/misc.py#L172-L179
240,981
krukas/Trionyx
trionyx/trionyx/middleware.py
LoginRequiredMiddleware.process_request
def process_request(self, request): """Check if user is logged in""" assert hasattr(request, 'user') if not request.user.is_authenticated(): path = request.path_info.lstrip('/') if not any(m.match(path) for m in EXEMPT_URLS): return HttpResponseRedirect(reverse(settings.LOGIN_URL))
python
def process_request(self, request): """Check if user is logged in""" assert hasattr(request, 'user') if not request.user.is_authenticated(): path = request.path_info.lstrip('/') if not any(m.match(path) for m in EXEMPT_URLS): return HttpResponseRedirect(reverse(settings.LOGIN_URL))
[ "def", "process_request", "(", "self", ",", "request", ")", ":", "assert", "hasattr", "(", "request", ",", "'user'", ")", "if", "not", "request", ".", "user", ".", "is_authenticated", "(", ")", ":", "path", "=", "request", ".", "path_info", ".", "lstrip", "(", "'/'", ")", "if", "not", "any", "(", "m", ".", "match", "(", "path", ")", "for", "m", "in", "EXEMPT_URLS", ")", ":", "return", "HttpResponseRedirect", "(", "reverse", "(", "settings", ".", "LOGIN_URL", ")", ")" ]
Check if user is logged in
[ "Check", "if", "user", "is", "logged", "in" ]
edac132cc0797190153f2e60bc7e88cb50e80da6
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/middleware.py#L32-L38
240,982
MacHu-GWU/angora-project
angora/dataIO/js.py
prt_js
def prt_js(js, sort_keys=True, indent=4): """Print Json in pretty format. There's a standard module pprint, can pretty print python dict and list. But it doesn't support sorted key. That why we need this func. Usage:: >>> from weatherlab.lib.dataIO.js import prt_js >>> prt_js({"a": 1, "b": 2}) { "a": 1, "b": 2 } **中文文档** 以人类可读的方式打印可Json化的Python对象。 """ print(js2str(js, sort_keys, indent))
python
def prt_js(js, sort_keys=True, indent=4): """Print Json in pretty format. There's a standard module pprint, can pretty print python dict and list. But it doesn't support sorted key. That why we need this func. Usage:: >>> from weatherlab.lib.dataIO.js import prt_js >>> prt_js({"a": 1, "b": 2}) { "a": 1, "b": 2 } **中文文档** 以人类可读的方式打印可Json化的Python对象。 """ print(js2str(js, sort_keys, indent))
[ "def", "prt_js", "(", "js", ",", "sort_keys", "=", "True", ",", "indent", "=", "4", ")", ":", "print", "(", "js2str", "(", "js", ",", "sort_keys", ",", "indent", ")", ")" ]
Print Json in pretty format. There's a standard module pprint, can pretty print python dict and list. But it doesn't support sorted key. That why we need this func. Usage:: >>> from weatherlab.lib.dataIO.js import prt_js >>> prt_js({"a": 1, "b": 2}) { "a": 1, "b": 2 } **中文文档** 以人类可读的方式打印可Json化的Python对象。
[ "Print", "Json", "in", "pretty", "format", ".", "There", "s", "a", "standard", "module", "pprint", "can", "pretty", "print", "python", "dict", "and", "list", ".", "But", "it", "doesn", "t", "support", "sorted", "key", ".", "That", "why", "we", "need", "this", "func", "." ]
689a60da51cd88680ddbe26e28dbe81e6b01d275
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/dataIO/js.py#L388-L406
240,983
MacHu-GWU/angora-project
angora/crawler/downloader.py
string_SizeInBytes
def string_SizeInBytes(size_in_bytes): """Make ``size in bytes`` human readable. Doesn"t support size greater than 1000PB. Usage:: >>> from __future__ import print_function >>> from weatherlab.lib.filesystem.windowsexplorer import string_SizeInBytes >>> print(string_SizeInBytes(100)) 100 B >>> print(string_SizeInBytes(100*1000)) 97.66 KB >>> print(string_SizeInBytes(100*1000**2)) 95.37 MB >>> print(string_SizeInBytes(100*1000**3)) 93.13 GB >>> print(string_SizeInBytes(100*1000**4)) 90.95 TB >>> print(string_SizeInBytes(100*1000**5)) 88.82 PB """ res, by = divmod(size_in_bytes, 1024) res, kb = divmod(res, 1024) res, mb = divmod(res, 1024) res, gb = divmod(res, 1024) pb, tb = divmod(res, 1024) if pb != 0: human_readable_size = "%.2f PB" % (pb + tb / float(1024)) elif tb != 0: human_readable_size = "%.2f TB" % (tb + gb / float(1024)) elif gb != 0: human_readable_size = "%.2f GB" % (gb + mb / float(1024)) elif mb != 0: human_readable_size = "%.2f MB" % (mb + kb / float(1024)) elif kb != 0: human_readable_size = "%.2f KB" % (kb + by / float(1024)) else: human_readable_size = "%s B" % by return human_readable_size
python
def string_SizeInBytes(size_in_bytes): """Make ``size in bytes`` human readable. Doesn"t support size greater than 1000PB. Usage:: >>> from __future__ import print_function >>> from weatherlab.lib.filesystem.windowsexplorer import string_SizeInBytes >>> print(string_SizeInBytes(100)) 100 B >>> print(string_SizeInBytes(100*1000)) 97.66 KB >>> print(string_SizeInBytes(100*1000**2)) 95.37 MB >>> print(string_SizeInBytes(100*1000**3)) 93.13 GB >>> print(string_SizeInBytes(100*1000**4)) 90.95 TB >>> print(string_SizeInBytes(100*1000**5)) 88.82 PB """ res, by = divmod(size_in_bytes, 1024) res, kb = divmod(res, 1024) res, mb = divmod(res, 1024) res, gb = divmod(res, 1024) pb, tb = divmod(res, 1024) if pb != 0: human_readable_size = "%.2f PB" % (pb + tb / float(1024)) elif tb != 0: human_readable_size = "%.2f TB" % (tb + gb / float(1024)) elif gb != 0: human_readable_size = "%.2f GB" % (gb + mb / float(1024)) elif mb != 0: human_readable_size = "%.2f MB" % (mb + kb / float(1024)) elif kb != 0: human_readable_size = "%.2f KB" % (kb + by / float(1024)) else: human_readable_size = "%s B" % by return human_readable_size
[ "def", "string_SizeInBytes", "(", "size_in_bytes", ")", ":", "res", ",", "by", "=", "divmod", "(", "size_in_bytes", ",", "1024", ")", "res", ",", "kb", "=", "divmod", "(", "res", ",", "1024", ")", "res", ",", "mb", "=", "divmod", "(", "res", ",", "1024", ")", "res", ",", "gb", "=", "divmod", "(", "res", ",", "1024", ")", "pb", ",", "tb", "=", "divmod", "(", "res", ",", "1024", ")", "if", "pb", "!=", "0", ":", "human_readable_size", "=", "\"%.2f PB\"", "%", "(", "pb", "+", "tb", "/", "float", "(", "1024", ")", ")", "elif", "tb", "!=", "0", ":", "human_readable_size", "=", "\"%.2f TB\"", "%", "(", "tb", "+", "gb", "/", "float", "(", "1024", ")", ")", "elif", "gb", "!=", "0", ":", "human_readable_size", "=", "\"%.2f GB\"", "%", "(", "gb", "+", "mb", "/", "float", "(", "1024", ")", ")", "elif", "mb", "!=", "0", ":", "human_readable_size", "=", "\"%.2f MB\"", "%", "(", "mb", "+", "kb", "/", "float", "(", "1024", ")", ")", "elif", "kb", "!=", "0", ":", "human_readable_size", "=", "\"%.2f KB\"", "%", "(", "kb", "+", "by", "/", "float", "(", "1024", ")", ")", "else", ":", "human_readable_size", "=", "\"%s B\"", "%", "by", "return", "human_readable_size" ]
Make ``size in bytes`` human readable. Doesn"t support size greater than 1000PB. Usage:: >>> from __future__ import print_function >>> from weatherlab.lib.filesystem.windowsexplorer import string_SizeInBytes >>> print(string_SizeInBytes(100)) 100 B >>> print(string_SizeInBytes(100*1000)) 97.66 KB >>> print(string_SizeInBytes(100*1000**2)) 95.37 MB >>> print(string_SizeInBytes(100*1000**3)) 93.13 GB >>> print(string_SizeInBytes(100*1000**4)) 90.95 TB >>> print(string_SizeInBytes(100*1000**5)) 88.82 PB
[ "Make", "size", "in", "bytes", "human", "readable", ".", "Doesn", "t", "support", "size", "greater", "than", "1000PB", "." ]
689a60da51cd88680ddbe26e28dbe81e6b01d275
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/crawler/downloader.py#L25-L63
240,984
MacHu-GWU/angora-project
angora/crawler/downloader.py
download_url
def download_url(url, save_as, iter_size=_default_iter_size, enable_verbose=True): """A simple url binary content download function with progress info. Warning: this function will silently overwrite existing file. """ msg = Messenger() if enable_verbose: msg.on() else: msg.off() msg.show("Downloading %s from %s..." % (save_as, url)) with open(save_as, "wb") as f: response = requests.get(url, stream=True) if not response.ok: print("http get error!") return start_time = time.clock() downloaded_size = 0 for block in response.iter_content(iter_size): if not block: break f.write(block) elapse = datetime.timedelta(seconds=(time.clock() - start_time)) downloaded_size += sys.getsizeof(block) msg.show(" Finished %s, elapse %s." % ( string_SizeInBytes(downloaded_size), elapse )) msg.show(" Complete!")
python
def download_url(url, save_as, iter_size=_default_iter_size, enable_verbose=True): """A simple url binary content download function with progress info. Warning: this function will silently overwrite existing file. """ msg = Messenger() if enable_verbose: msg.on() else: msg.off() msg.show("Downloading %s from %s..." % (save_as, url)) with open(save_as, "wb") as f: response = requests.get(url, stream=True) if not response.ok: print("http get error!") return start_time = time.clock() downloaded_size = 0 for block in response.iter_content(iter_size): if not block: break f.write(block) elapse = datetime.timedelta(seconds=(time.clock() - start_time)) downloaded_size += sys.getsizeof(block) msg.show(" Finished %s, elapse %s." % ( string_SizeInBytes(downloaded_size), elapse )) msg.show(" Complete!")
[ "def", "download_url", "(", "url", ",", "save_as", ",", "iter_size", "=", "_default_iter_size", ",", "enable_verbose", "=", "True", ")", ":", "msg", "=", "Messenger", "(", ")", "if", "enable_verbose", ":", "msg", ".", "on", "(", ")", "else", ":", "msg", ".", "off", "(", ")", "msg", ".", "show", "(", "\"Downloading %s from %s...\"", "%", "(", "save_as", ",", "url", ")", ")", "with", "open", "(", "save_as", ",", "\"wb\"", ")", "as", "f", ":", "response", "=", "requests", ".", "get", "(", "url", ",", "stream", "=", "True", ")", "if", "not", "response", ".", "ok", ":", "print", "(", "\"http get error!\"", ")", "return", "start_time", "=", "time", ".", "clock", "(", ")", "downloaded_size", "=", "0", "for", "block", "in", "response", ".", "iter_content", "(", "iter_size", ")", ":", "if", "not", "block", ":", "break", "f", ".", "write", "(", "block", ")", "elapse", "=", "datetime", ".", "timedelta", "(", "seconds", "=", "(", "time", ".", "clock", "(", ")", "-", "start_time", ")", ")", "downloaded_size", "+=", "sys", ".", "getsizeof", "(", "block", ")", "msg", ".", "show", "(", "\" Finished %s, elapse %s.\"", "%", "(", "string_SizeInBytes", "(", "downloaded_size", ")", ",", "elapse", ")", ")", "msg", ".", "show", "(", "\" Complete!\"", ")" ]
A simple url binary content download function with progress info. Warning: this function will silently overwrite existing file.
[ "A", "simple", "url", "binary", "content", "download", "function", "with", "progress", "info", "." ]
689a60da51cd88680ddbe26e28dbe81e6b01d275
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/crawler/downloader.py#L66-L98
240,985
Vito2015/pyextend
pyextend/formula/geo/geohash.py
decode
def decode(hashcode, delta=False): """ decode a hashcode and get center coordinate, and distance between center and outer border """ lat, lon, lat_length, lon_length = _decode_c2i(hashcode) if hasattr(float, "fromhex"): latitude_delta = 90.0/(1 << lat_length) longitude_delta = 180.0/(1 << lon_length) latitude = _int_to_float_hex(lat, lat_length) * 90.0 + latitude_delta longitude = _int_to_float_hex(lon, lon_length) * 180.0 + longitude_delta if delta: return latitude, longitude, latitude_delta, longitude_delta return latitude, longitude lat = (lat << 1) + 1 lon = (lon << 1) + 1 lat_length += 1 lon_length += 1 latitude = 180.0*(lat-(1 << (lat_length-1)))/(1 << lat_length) longitude = 360.0*(lon-(1 << (lon_length-1)))/(1 << lon_length) if delta: latitude_delta = 180.0/(1 << lat_length) longitude_delta = 360.0/(1 << lon_length) return latitude, longitude, latitude_delta, longitude_delta return latitude, longitude
python
def decode(hashcode, delta=False): """ decode a hashcode and get center coordinate, and distance between center and outer border """ lat, lon, lat_length, lon_length = _decode_c2i(hashcode) if hasattr(float, "fromhex"): latitude_delta = 90.0/(1 << lat_length) longitude_delta = 180.0/(1 << lon_length) latitude = _int_to_float_hex(lat, lat_length) * 90.0 + latitude_delta longitude = _int_to_float_hex(lon, lon_length) * 180.0 + longitude_delta if delta: return latitude, longitude, latitude_delta, longitude_delta return latitude, longitude lat = (lat << 1) + 1 lon = (lon << 1) + 1 lat_length += 1 lon_length += 1 latitude = 180.0*(lat-(1 << (lat_length-1)))/(1 << lat_length) longitude = 360.0*(lon-(1 << (lon_length-1)))/(1 << lon_length) if delta: latitude_delta = 180.0/(1 << lat_length) longitude_delta = 360.0/(1 << lon_length) return latitude, longitude, latitude_delta, longitude_delta return latitude, longitude
[ "def", "decode", "(", "hashcode", ",", "delta", "=", "False", ")", ":", "lat", ",", "lon", ",", "lat_length", ",", "lon_length", "=", "_decode_c2i", "(", "hashcode", ")", "if", "hasattr", "(", "float", ",", "\"fromhex\"", ")", ":", "latitude_delta", "=", "90.0", "/", "(", "1", "<<", "lat_length", ")", "longitude_delta", "=", "180.0", "/", "(", "1", "<<", "lon_length", ")", "latitude", "=", "_int_to_float_hex", "(", "lat", ",", "lat_length", ")", "*", "90.0", "+", "latitude_delta", "longitude", "=", "_int_to_float_hex", "(", "lon", ",", "lon_length", ")", "*", "180.0", "+", "longitude_delta", "if", "delta", ":", "return", "latitude", ",", "longitude", ",", "latitude_delta", ",", "longitude_delta", "return", "latitude", ",", "longitude", "lat", "=", "(", "lat", "<<", "1", ")", "+", "1", "lon", "=", "(", "lon", "<<", "1", ")", "+", "1", "lat_length", "+=", "1", "lon_length", "+=", "1", "latitude", "=", "180.0", "*", "(", "lat", "-", "(", "1", "<<", "(", "lat_length", "-", "1", ")", ")", ")", "/", "(", "1", "<<", "lat_length", ")", "longitude", "=", "360.0", "*", "(", "lon", "-", "(", "1", "<<", "(", "lon_length", "-", "1", ")", ")", ")", "/", "(", "1", "<<", "lon_length", ")", "if", "delta", ":", "latitude_delta", "=", "180.0", "/", "(", "1", "<<", "lat_length", ")", "longitude_delta", "=", "360.0", "/", "(", "1", "<<", "lon_length", ")", "return", "latitude", ",", "longitude", ",", "latitude_delta", ",", "longitude_delta", "return", "latitude", ",", "longitude" ]
decode a hashcode and get center coordinate, and distance between center and outer border
[ "decode", "a", "hashcode", "and", "get", "center", "coordinate", "and", "distance", "between", "center", "and", "outer", "border" ]
36861dfe1087e437ffe9b5a1da9345c85b4fa4a1
https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/formula/geo/geohash.py#L152-L180
240,986
Vito2015/pyextend
pyextend/formula/geo/geohash.py
bbox
def bbox(hashcode): """ decode a hashcode and get north, south, east and west border. """ lat, lon, lat_length, lon_length = _decode_c2i(hashcode) if hasattr(float, "fromhex"): latitude_delta = 180.0/(1 << lat_length) longitude_delta = 360.0/(1 << lon_length) latitude = _int_to_float_hex(lat, lat_length) * 90.0 longitude = _int_to_float_hex(lon, lon_length) * 180.0 return {"s": latitude, "w": longitude, "n": latitude+latitude_delta, "e": longitude+longitude_delta} ret = {} if lat_length: ret['n'] = 180.0*(lat+1-(1 << (lat_length-1)))/(1 << lat_length) ret['s'] = 180.0*(lat-(1 << (lat_length-1)))/(1 << lat_length) else: # can't calculate the half with bit shifts (negative shift) ret['n'] = 90.0 ret['s'] = -90.0 if lon_length: ret['e'] = 360.0*(lon+1-(1 << (lon_length-1)))/(1 << lon_length) ret['w'] = 360.0*(lon-(1 << (lon_length-1)))/(1 << lon_length) else: # can't calculate the half with bit shifts (negative shift) ret['e'] = 180.0 ret['w'] = -180.0 return ret
python
def bbox(hashcode): """ decode a hashcode and get north, south, east and west border. """ lat, lon, lat_length, lon_length = _decode_c2i(hashcode) if hasattr(float, "fromhex"): latitude_delta = 180.0/(1 << lat_length) longitude_delta = 360.0/(1 << lon_length) latitude = _int_to_float_hex(lat, lat_length) * 90.0 longitude = _int_to_float_hex(lon, lon_length) * 180.0 return {"s": latitude, "w": longitude, "n": latitude+latitude_delta, "e": longitude+longitude_delta} ret = {} if lat_length: ret['n'] = 180.0*(lat+1-(1 << (lat_length-1)))/(1 << lat_length) ret['s'] = 180.0*(lat-(1 << (lat_length-1)))/(1 << lat_length) else: # can't calculate the half with bit shifts (negative shift) ret['n'] = 90.0 ret['s'] = -90.0 if lon_length: ret['e'] = 360.0*(lon+1-(1 << (lon_length-1)))/(1 << lon_length) ret['w'] = 360.0*(lon-(1 << (lon_length-1)))/(1 << lon_length) else: # can't calculate the half with bit shifts (negative shift) ret['e'] = 180.0 ret['w'] = -180.0 return ret
[ "def", "bbox", "(", "hashcode", ")", ":", "lat", ",", "lon", ",", "lat_length", ",", "lon_length", "=", "_decode_c2i", "(", "hashcode", ")", "if", "hasattr", "(", "float", ",", "\"fromhex\"", ")", ":", "latitude_delta", "=", "180.0", "/", "(", "1", "<<", "lat_length", ")", "longitude_delta", "=", "360.0", "/", "(", "1", "<<", "lon_length", ")", "latitude", "=", "_int_to_float_hex", "(", "lat", ",", "lat_length", ")", "*", "90.0", "longitude", "=", "_int_to_float_hex", "(", "lon", ",", "lon_length", ")", "*", "180.0", "return", "{", "\"s\"", ":", "latitude", ",", "\"w\"", ":", "longitude", ",", "\"n\"", ":", "latitude", "+", "latitude_delta", ",", "\"e\"", ":", "longitude", "+", "longitude_delta", "}", "ret", "=", "{", "}", "if", "lat_length", ":", "ret", "[", "'n'", "]", "=", "180.0", "*", "(", "lat", "+", "1", "-", "(", "1", "<<", "(", "lat_length", "-", "1", ")", ")", ")", "/", "(", "1", "<<", "lat_length", ")", "ret", "[", "'s'", "]", "=", "180.0", "*", "(", "lat", "-", "(", "1", "<<", "(", "lat_length", "-", "1", ")", ")", ")", "/", "(", "1", "<<", "lat_length", ")", "else", ":", "# can't calculate the half with bit shifts (negative shift)", "ret", "[", "'n'", "]", "=", "90.0", "ret", "[", "'s'", "]", "=", "-", "90.0", "if", "lon_length", ":", "ret", "[", "'e'", "]", "=", "360.0", "*", "(", "lon", "+", "1", "-", "(", "1", "<<", "(", "lon_length", "-", "1", ")", ")", ")", "/", "(", "1", "<<", "lon_length", ")", "ret", "[", "'w'", "]", "=", "360.0", "*", "(", "lon", "-", "(", "1", "<<", "(", "lon_length", "-", "1", ")", ")", ")", "/", "(", "1", "<<", "lon_length", ")", "else", ":", "# can't calculate the half with bit shifts (negative shift)", "ret", "[", "'e'", "]", "=", "180.0", "ret", "[", "'w'", "]", "=", "-", "180.0", "return", "ret" ]
decode a hashcode and get north, south, east and west border.
[ "decode", "a", "hashcode", "and", "get", "north", "south", "east", "and", "west", "border", "." ]
36861dfe1087e437ffe9b5a1da9345c85b4fa4a1
https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/formula/geo/geohash.py#L189-L217
240,987
rsalmaso/django-fluo
fluo/admin/nested.py
NestedModelAdmin.save_formset
def save_formset(self, request, form, formset, change): """ Given an inline formset save it to the database. """ formset.save() for form in formset.forms: if hasattr(form, 'nested_formsets') and form not in formset.deleted_forms: for nested_formset in form.nested_formsets: self.save_formset(request, form, nested_formset, change)
python
def save_formset(self, request, form, formset, change): """ Given an inline formset save it to the database. """ formset.save() for form in formset.forms: if hasattr(form, 'nested_formsets') and form not in formset.deleted_forms: for nested_formset in form.nested_formsets: self.save_formset(request, form, nested_formset, change)
[ "def", "save_formset", "(", "self", ",", "request", ",", "form", ",", "formset", ",", "change", ")", ":", "formset", ".", "save", "(", ")", "for", "form", "in", "formset", ".", "forms", ":", "if", "hasattr", "(", "form", ",", "'nested_formsets'", ")", "and", "form", "not", "in", "formset", ".", "deleted_forms", ":", "for", "nested_formset", "in", "form", ".", "nested_formsets", ":", "self", ".", "save_formset", "(", "request", ",", "form", ",", "nested_formset", ",", "change", ")" ]
Given an inline formset save it to the database.
[ "Given", "an", "inline", "formset", "save", "it", "to", "the", "database", "." ]
1321c1e7d6a912108f79be02a9e7f2108c57f89f
https://github.com/rsalmaso/django-fluo/blob/1321c1e7d6a912108f79be02a9e7f2108c57f89f/fluo/admin/nested.py#L72-L81
240,988
rsalmaso/django-fluo
fluo/admin/nested.py
NestedModelAdmin.all_valid_with_nesting
def all_valid_with_nesting(self, formsets): "Recursively validate all nested formsets" if not all_valid(formsets): return False for formset in formsets: if not formset.is_bound: pass for form in formset: if hasattr(form, 'nested_formsets'): if not self.all_valid_with_nesting(form.nested_formsets): return False #TODO - find out why this breaks when extra = 1 and just adding new item with no sub items if (not hasattr(form, 'cleaned_data') or not form.cleaned_data) and self.formset_has_nested_data(form.nested_formsets): form._errors["__all__"] = form.error_class(["Parent object must be created when creating nested inlines."]) return False return True
python
def all_valid_with_nesting(self, formsets): "Recursively validate all nested formsets" if not all_valid(formsets): return False for formset in formsets: if not formset.is_bound: pass for form in formset: if hasattr(form, 'nested_formsets'): if not self.all_valid_with_nesting(form.nested_formsets): return False #TODO - find out why this breaks when extra = 1 and just adding new item with no sub items if (not hasattr(form, 'cleaned_data') or not form.cleaned_data) and self.formset_has_nested_data(form.nested_formsets): form._errors["__all__"] = form.error_class(["Parent object must be created when creating nested inlines."]) return False return True
[ "def", "all_valid_with_nesting", "(", "self", ",", "formsets", ")", ":", "if", "not", "all_valid", "(", "formsets", ")", ":", "return", "False", "for", "formset", "in", "formsets", ":", "if", "not", "formset", ".", "is_bound", ":", "pass", "for", "form", "in", "formset", ":", "if", "hasattr", "(", "form", ",", "'nested_formsets'", ")", ":", "if", "not", "self", ".", "all_valid_with_nesting", "(", "form", ".", "nested_formsets", ")", ":", "return", "False", "#TODO - find out why this breaks when extra = 1 and just adding new item with no sub items", "if", "(", "not", "hasattr", "(", "form", ",", "'cleaned_data'", ")", "or", "not", "form", ".", "cleaned_data", ")", "and", "self", ".", "formset_has_nested_data", "(", "form", ".", "nested_formsets", ")", ":", "form", ".", "_errors", "[", "\"__all__\"", "]", "=", "form", ".", "error_class", "(", "[", "\"Parent object must be created when creating nested inlines.\"", "]", ")", "return", "False", "return", "True" ]
Recursively validate all nested formsets
[ "Recursively", "validate", "all", "nested", "formsets" ]
1321c1e7d6a912108f79be02a9e7f2108c57f89f
https://github.com/rsalmaso/django-fluo/blob/1321c1e7d6a912108f79be02a9e7f2108c57f89f/fluo/admin/nested.py#L161-L178
240,989
jalanb/pysyte
pysyte/scripts.py
main
def main(method, add_args, version=None, docstring=None, error_stream=sys.stderr): """Run the method as a script Add a '--version' argument Call add_args(parser) to add any more Parse sys.argv call method(args) Catch any exceptions exit on SystemExit or KeyboardError Others: If version is less then 1, reraise them Else print them to the error stream """ if version: _versions.append(version) try: args = parse_args( add_args, docstring and docstring or '%s()' % method.__name__) return _exit_ok if method(args) else _exit_fail except KeyboardInterrupt as e: ctrl_c = 3 return ctrl_c except SystemExit as e: return e.code except DebugExit: return _exit_fail except Exception as e: # pylint: disable=broad-except if int(latest_version().split('.')[0]) < 1: raise if error_stream: print(e, file=error_stream) return _exit_fail return _exit_ok
python
def main(method, add_args, version=None, docstring=None, error_stream=sys.stderr): """Run the method as a script Add a '--version' argument Call add_args(parser) to add any more Parse sys.argv call method(args) Catch any exceptions exit on SystemExit or KeyboardError Others: If version is less then 1, reraise them Else print them to the error stream """ if version: _versions.append(version) try: args = parse_args( add_args, docstring and docstring or '%s()' % method.__name__) return _exit_ok if method(args) else _exit_fail except KeyboardInterrupt as e: ctrl_c = 3 return ctrl_c except SystemExit as e: return e.code except DebugExit: return _exit_fail except Exception as e: # pylint: disable=broad-except if int(latest_version().split('.')[0]) < 1: raise if error_stream: print(e, file=error_stream) return _exit_fail return _exit_ok
[ "def", "main", "(", "method", ",", "add_args", ",", "version", "=", "None", ",", "docstring", "=", "None", ",", "error_stream", "=", "sys", ".", "stderr", ")", ":", "if", "version", ":", "_versions", ".", "append", "(", "version", ")", "try", ":", "args", "=", "parse_args", "(", "add_args", ",", "docstring", "and", "docstring", "or", "'%s()'", "%", "method", ".", "__name__", ")", "return", "_exit_ok", "if", "method", "(", "args", ")", "else", "_exit_fail", "except", "KeyboardInterrupt", "as", "e", ":", "ctrl_c", "=", "3", "return", "ctrl_c", "except", "SystemExit", "as", "e", ":", "return", "e", ".", "code", "except", "DebugExit", ":", "return", "_exit_fail", "except", "Exception", "as", "e", ":", "# pylint: disable=broad-except", "if", "int", "(", "latest_version", "(", ")", ".", "split", "(", "'.'", ")", "[", "0", "]", ")", "<", "1", ":", "raise", "if", "error_stream", ":", "print", "(", "e", ",", "file", "=", "error_stream", ")", "return", "_exit_fail", "return", "_exit_ok" ]
Run the method as a script Add a '--version' argument Call add_args(parser) to add any more Parse sys.argv call method(args) Catch any exceptions exit on SystemExit or KeyboardError Others: If version is less then 1, reraise them Else print them to the error stream
[ "Run", "the", "method", "as", "a", "script" ]
4e278101943d1ceb1a6bcaf6ddc72052ecf13114
https://github.com/jalanb/pysyte/blob/4e278101943d1ceb1a6bcaf6ddc72052ecf13114/pysyte/scripts.py#L58-L93
240,990
mouse-reeve/word-builder
wordbuilder/WordBuilder.py
WordBuilder.ingest
def ingest(self, corpus_file): ''' load and parse a pre-formatted and cleaned text file. Garbage in, garbage out ''' corpus = open(corpus_file) total_letters = 0 total_words = 0 shortest_word = 100 for word in corpus.readlines(): # clean word word = word.strip() word = re.sub(r'[\',\.\"]', '', word) total_letters += len(word) total_words += 1 shortest_word = len(word) if len(word) < shortest_word else shortest_word # iterate through n letter groups, where 1 <= n <= 3 n = self.chunk_size start = 0 # >: C, Cys: t, yst: i self.links[self.initial].append(word[0:n]) for position in range(n, len(word)): start = position - n if position - n >= 0 else 0 base = word[start:position] if not base in self.links: self.links[base] = [] self.links[base].append(word[position]) if not word[-n:] in self.links: self.links[word[-n:]] = [] self.links[word[-n:]].append(self.terminal) self.average_word_length = total_letters / total_words self.shortest = shortest_word
python
def ingest(self, corpus_file): ''' load and parse a pre-formatted and cleaned text file. Garbage in, garbage out ''' corpus = open(corpus_file) total_letters = 0 total_words = 0 shortest_word = 100 for word in corpus.readlines(): # clean word word = word.strip() word = re.sub(r'[\',\.\"]', '', word) total_letters += len(word) total_words += 1 shortest_word = len(word) if len(word) < shortest_word else shortest_word # iterate through n letter groups, where 1 <= n <= 3 n = self.chunk_size start = 0 # >: C, Cys: t, yst: i self.links[self.initial].append(word[0:n]) for position in range(n, len(word)): start = position - n if position - n >= 0 else 0 base = word[start:position] if not base in self.links: self.links[base] = [] self.links[base].append(word[position]) if not word[-n:] in self.links: self.links[word[-n:]] = [] self.links[word[-n:]].append(self.terminal) self.average_word_length = total_letters / total_words self.shortest = shortest_word
[ "def", "ingest", "(", "self", ",", "corpus_file", ")", ":", "corpus", "=", "open", "(", "corpus_file", ")", "total_letters", "=", "0", "total_words", "=", "0", "shortest_word", "=", "100", "for", "word", "in", "corpus", ".", "readlines", "(", ")", ":", "# clean word", "word", "=", "word", ".", "strip", "(", ")", "word", "=", "re", ".", "sub", "(", "r'[\\',\\.\\\"]'", ",", "''", ",", "word", ")", "total_letters", "+=", "len", "(", "word", ")", "total_words", "+=", "1", "shortest_word", "=", "len", "(", "word", ")", "if", "len", "(", "word", ")", "<", "shortest_word", "else", "shortest_word", "# iterate through n letter groups, where 1 <= n <= 3", "n", "=", "self", ".", "chunk_size", "start", "=", "0", "# >: C, Cys: t, yst: i", "self", ".", "links", "[", "self", ".", "initial", "]", ".", "append", "(", "word", "[", "0", ":", "n", "]", ")", "for", "position", "in", "range", "(", "n", ",", "len", "(", "word", ")", ")", ":", "start", "=", "position", "-", "n", "if", "position", "-", "n", ">=", "0", "else", "0", "base", "=", "word", "[", "start", ":", "position", "]", "if", "not", "base", "in", "self", ".", "links", ":", "self", ".", "links", "[", "base", "]", "=", "[", "]", "self", ".", "links", "[", "base", "]", ".", "append", "(", "word", "[", "position", "]", ")", "if", "not", "word", "[", "-", "n", ":", "]", "in", "self", ".", "links", ":", "self", ".", "links", "[", "word", "[", "-", "n", ":", "]", "]", "=", "[", "]", "self", ".", "links", "[", "word", "[", "-", "n", ":", "]", "]", ".", "append", "(", "self", ".", "terminal", ")", "self", ".", "average_word_length", "=", "total_letters", "/", "total_words", "self", ".", "shortest", "=", "shortest_word" ]
load and parse a pre-formatted and cleaned text file. Garbage in, garbage out
[ "load", "and", "parse", "a", "pre", "-", "formatted", "and", "cleaned", "text", "file", ".", "Garbage", "in", "garbage", "out" ]
5b73f847faeaa06851cfe63438f549706aee3d15
https://github.com/mouse-reeve/word-builder/blob/5b73f847faeaa06851cfe63438f549706aee3d15/wordbuilder/WordBuilder.py#L23-L54
240,991
mouse-reeve/word-builder
wordbuilder/WordBuilder.py
WordBuilder.get_word
def get_word(self, word=None): ''' creates a new word ''' word = word if not word == None else self.initial if not self.terminal in word: if len(word) > self.average_word_length and \ self.terminal in self.links[word[-self.chunk_size:]] \ and random.randint(0, 1): addon = self.terminal else: options = self.links[word[-self.chunk_size:]] addon = random.choice(options) word = word + addon return self.get_word(word) return word[1:-1]
python
def get_word(self, word=None): ''' creates a new word ''' word = word if not word == None else self.initial if not self.terminal in word: if len(word) > self.average_word_length and \ self.terminal in self.links[word[-self.chunk_size:]] \ and random.randint(0, 1): addon = self.terminal else: options = self.links[word[-self.chunk_size:]] addon = random.choice(options) word = word + addon return self.get_word(word) return word[1:-1]
[ "def", "get_word", "(", "self", ",", "word", "=", "None", ")", ":", "word", "=", "word", "if", "not", "word", "==", "None", "else", "self", ".", "initial", "if", "not", "self", ".", "terminal", "in", "word", ":", "if", "len", "(", "word", ")", ">", "self", ".", "average_word_length", "and", "self", ".", "terminal", "in", "self", ".", "links", "[", "word", "[", "-", "self", ".", "chunk_size", ":", "]", "]", "and", "random", ".", "randint", "(", "0", ",", "1", ")", ":", "addon", "=", "self", ".", "terminal", "else", ":", "options", "=", "self", ".", "links", "[", "word", "[", "-", "self", ".", "chunk_size", ":", "]", "]", "addon", "=", "random", ".", "choice", "(", "options", ")", "word", "=", "word", "+", "addon", "return", "self", ".", "get_word", "(", "word", ")", "return", "word", "[", "1", ":", "-", "1", "]" ]
creates a new word
[ "creates", "a", "new", "word" ]
5b73f847faeaa06851cfe63438f549706aee3d15
https://github.com/mouse-reeve/word-builder/blob/5b73f847faeaa06851cfe63438f549706aee3d15/wordbuilder/WordBuilder.py#L57-L71
240,992
diffeo/rejester
rejester/run.py
existing_path
def existing_path(string): '''"Convert" a string to a string that is a path to an existing file.''' if not os.path.exists(string): msg = 'path {0!r} does not exist'.format(string) raise argparse.ArgumentTypeError(msg) return string
python
def existing_path(string): '''"Convert" a string to a string that is a path to an existing file.''' if not os.path.exists(string): msg = 'path {0!r} does not exist'.format(string) raise argparse.ArgumentTypeError(msg) return string
[ "def", "existing_path", "(", "string", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "string", ")", ":", "msg", "=", "'path {0!r} does not exist'", ".", "format", "(", "string", ")", "raise", "argparse", ".", "ArgumentTypeError", "(", "msg", ")", "return", "string" ]
"Convert" a string to a string that is a path to an existing file.
[ "Convert", "a", "string", "to", "a", "string", "that", "is", "a", "path", "to", "an", "existing", "file", "." ]
5438a4a18be2801d7826c46e2079ba9639d2ecb4
https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/run.py#L198-L203
240,993
diffeo/rejester
rejester/run.py
absolute_path
def absolute_path(string): '''"Convert" a string to a string that is an absolute existing path.''' if not os.path.isabs(string): msg = '{0!r} is not an absolute path'.format(string) raise argparse.ArgumentTypeError(msg) if not os.path.exists(os.path.dirname(string)): msg = 'path {0!r} does not exist'.format(string) raise argparse.ArgumentTypeError(msg) return string
python
def absolute_path(string): '''"Convert" a string to a string that is an absolute existing path.''' if not os.path.isabs(string): msg = '{0!r} is not an absolute path'.format(string) raise argparse.ArgumentTypeError(msg) if not os.path.exists(os.path.dirname(string)): msg = 'path {0!r} does not exist'.format(string) raise argparse.ArgumentTypeError(msg) return string
[ "def", "absolute_path", "(", "string", ")", ":", "if", "not", "os", ".", "path", ".", "isabs", "(", "string", ")", ":", "msg", "=", "'{0!r} is not an absolute path'", ".", "format", "(", "string", ")", "raise", "argparse", ".", "ArgumentTypeError", "(", "msg", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "dirname", "(", "string", ")", ")", ":", "msg", "=", "'path {0!r} does not exist'", ".", "format", "(", "string", ")", "raise", "argparse", ".", "ArgumentTypeError", "(", "msg", ")", "return", "string" ]
"Convert" a string to a string that is an absolute existing path.
[ "Convert", "a", "string", "to", "a", "string", "that", "is", "an", "absolute", "existing", "path", "." ]
5438a4a18be2801d7826c46e2079ba9639d2ecb4
https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/run.py#L210-L218
240,994
diffeo/rejester
rejester/run.py
Manager.task_master
def task_master(self): """A `TaskMaster` object for manipulating work""" if self._task_master is None: self._task_master = build_task_master(self.config) return self._task_master
python
def task_master(self): """A `TaskMaster` object for manipulating work""" if self._task_master is None: self._task_master = build_task_master(self.config) return self._task_master
[ "def", "task_master", "(", "self", ")", ":", "if", "self", ".", "_task_master", "is", "None", ":", "self", ".", "_task_master", "=", "build_task_master", "(", "self", ".", "config", ")", "return", "self", ".", "_task_master" ]
A `TaskMaster` object for manipulating work
[ "A", "TaskMaster", "object", "for", "manipulating", "work" ]
5438a4a18be2801d7826c46e2079ba9639d2ecb4
https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/run.py#L240-L244
240,995
diffeo/rejester
rejester/run.py
Manager._get_work_spec
def _get_work_spec(self, args): '''Get the contents of the work spec from the arguments.''' with open(args.work_spec_path) as f: return yaml.load(f)
python
def _get_work_spec(self, args): '''Get the contents of the work spec from the arguments.''' with open(args.work_spec_path) as f: return yaml.load(f)
[ "def", "_get_work_spec", "(", "self", ",", "args", ")", ":", "with", "open", "(", "args", ".", "work_spec_path", ")", "as", "f", ":", "return", "yaml", ".", "load", "(", "f", ")" ]
Get the contents of the work spec from the arguments.
[ "Get", "the", "contents", "of", "the", "work", "spec", "from", "the", "arguments", "." ]
5438a4a18be2801d7826c46e2079ba9639d2ecb4
https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/run.py#L252-L255
240,996
diffeo/rejester
rejester/run.py
Manager._get_work_spec_name
def _get_work_spec_name(self, args): '''Get the name of the work spec from the arguments. This assumes :meth:`_add_work_spec_name_args` has been called, but will also work if just :meth:`_add_work_spec_args` was called instead. ''' if getattr(args, 'work_spec_name', None): return args.work_spec_name return self._get_work_spec(args)['name']
python
def _get_work_spec_name(self, args): '''Get the name of the work spec from the arguments. This assumes :meth:`_add_work_spec_name_args` has been called, but will also work if just :meth:`_add_work_spec_args` was called instead. ''' if getattr(args, 'work_spec_name', None): return args.work_spec_name return self._get_work_spec(args)['name']
[ "def", "_get_work_spec_name", "(", "self", ",", "args", ")", ":", "if", "getattr", "(", "args", ",", "'work_spec_name'", ",", "None", ")", ":", "return", "args", ".", "work_spec_name", "return", "self", ".", "_get_work_spec", "(", "args", ")", "[", "'name'", "]" ]
Get the name of the work spec from the arguments. This assumes :meth:`_add_work_spec_name_args` has been called, but will also work if just :meth:`_add_work_spec_args` was called instead.
[ "Get", "the", "name", "of", "the", "work", "spec", "from", "the", "arguments", "." ]
5438a4a18be2801d7826c46e2079ba9639d2ecb4
https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/run.py#L267-L277
240,997
diffeo/rejester
rejester/run.py
Manager.do_load
def do_load(self, args): '''loads work_units into a namespace for a given work_spec --work-units file is JSON, one work unit per line, each line a JSON dict with the one key being the work unit key and the value being a data dict for the work unit. ''' work_spec = self._get_work_spec(args) work_spec['nice'] = args.nice self.task_master.set_work_spec(work_spec) if args.no_work: work_units_fh = [] # it just has to be an iterable with no lines else: work_units_fh = self._work_units_fh_from_path(args.work_units_path) if work_units_fh is None: raise RuntimeError('need -u/--work-units or --no-work') self.stdout.write('loading work units from {0!r}\n' .format(work_units_fh)) work_units = dict(self._read_work_units_file(work_units_fh)) if work_units: self.stdout.write('pushing work units\n') self.task_master.add_work_units(work_spec['name'], work_units.items()) self.stdout.write('finished writing {0} work units to work_spec={1!r}\n' .format(len(work_units), work_spec['name'])) else: self.stdout.write('no work units. done.\n')
python
def do_load(self, args): '''loads work_units into a namespace for a given work_spec --work-units file is JSON, one work unit per line, each line a JSON dict with the one key being the work unit key and the value being a data dict for the work unit. ''' work_spec = self._get_work_spec(args) work_spec['nice'] = args.nice self.task_master.set_work_spec(work_spec) if args.no_work: work_units_fh = [] # it just has to be an iterable with no lines else: work_units_fh = self._work_units_fh_from_path(args.work_units_path) if work_units_fh is None: raise RuntimeError('need -u/--work-units or --no-work') self.stdout.write('loading work units from {0!r}\n' .format(work_units_fh)) work_units = dict(self._read_work_units_file(work_units_fh)) if work_units: self.stdout.write('pushing work units\n') self.task_master.add_work_units(work_spec['name'], work_units.items()) self.stdout.write('finished writing {0} work units to work_spec={1!r}\n' .format(len(work_units), work_spec['name'])) else: self.stdout.write('no work units. done.\n')
[ "def", "do_load", "(", "self", ",", "args", ")", ":", "work_spec", "=", "self", ".", "_get_work_spec", "(", "args", ")", "work_spec", "[", "'nice'", "]", "=", "args", ".", "nice", "self", ".", "task_master", ".", "set_work_spec", "(", "work_spec", ")", "if", "args", ".", "no_work", ":", "work_units_fh", "=", "[", "]", "# it just has to be an iterable with no lines", "else", ":", "work_units_fh", "=", "self", ".", "_work_units_fh_from_path", "(", "args", ".", "work_units_path", ")", "if", "work_units_fh", "is", "None", ":", "raise", "RuntimeError", "(", "'need -u/--work-units or --no-work'", ")", "self", ".", "stdout", ".", "write", "(", "'loading work units from {0!r}\\n'", ".", "format", "(", "work_units_fh", ")", ")", "work_units", "=", "dict", "(", "self", ".", "_read_work_units_file", "(", "work_units_fh", ")", ")", "if", "work_units", ":", "self", ".", "stdout", ".", "write", "(", "'pushing work units\\n'", ")", "self", ".", "task_master", ".", "add_work_units", "(", "work_spec", "[", "'name'", "]", ",", "work_units", ".", "items", "(", ")", ")", "self", ".", "stdout", ".", "write", "(", "'finished writing {0} work units to work_spec={1!r}\\n'", ".", "format", "(", "len", "(", "work_units", ")", ",", "work_spec", "[", "'name'", "]", ")", ")", "else", ":", "self", ".", "stdout", ".", "write", "(", "'no work units. done.\\n'", ")" ]
loads work_units into a namespace for a given work_spec --work-units file is JSON, one work unit per line, each line a JSON dict with the one key being the work unit key and the value being a data dict for the work unit.
[ "loads", "work_units", "into", "a", "namespace", "for", "a", "given", "work_spec" ]
5438a4a18be2801d7826c46e2079ba9639d2ecb4
https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/run.py#L324-L352
240,998
diffeo/rejester
rejester/run.py
Manager.do_delete
def do_delete(self, args): '''delete the entire contents of the current namespace''' namespace = self.config['namespace'] if not args.assume_yes: response = raw_input('Delete everything in {0!r}? Enter namespace: ' .format(namespace)) if response != namespace: self.stdout.write('not deleting anything\n') return self.stdout.write('deleting namespace {0!r}\n'.format(namespace)) self.task_master.clear()
python
def do_delete(self, args): '''delete the entire contents of the current namespace''' namespace = self.config['namespace'] if not args.assume_yes: response = raw_input('Delete everything in {0!r}? Enter namespace: ' .format(namespace)) if response != namespace: self.stdout.write('not deleting anything\n') return self.stdout.write('deleting namespace {0!r}\n'.format(namespace)) self.task_master.clear()
[ "def", "do_delete", "(", "self", ",", "args", ")", ":", "namespace", "=", "self", ".", "config", "[", "'namespace'", "]", "if", "not", "args", ".", "assume_yes", ":", "response", "=", "raw_input", "(", "'Delete everything in {0!r}? Enter namespace: '", ".", "format", "(", "namespace", ")", ")", "if", "response", "!=", "namespace", ":", "self", ".", "stdout", ".", "write", "(", "'not deleting anything\\n'", ")", "return", "self", ".", "stdout", ".", "write", "(", "'deleting namespace {0!r}\\n'", ".", "format", "(", "namespace", ")", ")", "self", ".", "task_master", ".", "clear", "(", ")" ]
delete the entire contents of the current namespace
[ "delete", "the", "entire", "contents", "of", "the", "current", "namespace" ]
5438a4a18be2801d7826c46e2079ba9639d2ecb4
https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/run.py#L375-L385
240,999
diffeo/rejester
rejester/run.py
Manager.do_work_specs
def do_work_specs(self, args): '''print the names of all of the work specs''' work_spec_names = [x['name'] for x in self.task_master.iter_work_specs()] work_spec_names.sort() for name in work_spec_names: self.stdout.write('{0}\n'.format(name))
python
def do_work_specs(self, args): '''print the names of all of the work specs''' work_spec_names = [x['name'] for x in self.task_master.iter_work_specs()] work_spec_names.sort() for name in work_spec_names: self.stdout.write('{0}\n'.format(name))
[ "def", "do_work_specs", "(", "self", ",", "args", ")", ":", "work_spec_names", "=", "[", "x", "[", "'name'", "]", "for", "x", "in", "self", ".", "task_master", ".", "iter_work_specs", "(", ")", "]", "work_spec_names", ".", "sort", "(", ")", "for", "name", "in", "work_spec_names", ":", "self", ".", "stdout", ".", "write", "(", "'{0}\\n'", ".", "format", "(", "name", ")", ")" ]
print the names of all of the work specs
[ "print", "the", "names", "of", "all", "of", "the", "work", "specs" ]
5438a4a18be2801d7826c46e2079ba9639d2ecb4
https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/run.py#L389-L394