repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
michaeltcoelho/pagarme.py
pagarme/api.py
PagarmeApi.post
def post(self, action, data=None, headers=None): """Makes a GET request """ return self.request(make_url(self.endpoint, action), method='POST', data=data, headers=headers)
python
def post(self, action, data=None, headers=None): """Makes a GET request """ return self.request(make_url(self.endpoint, action), method='POST', data=data, headers=headers)
[ "def", "post", "(", "self", ",", "action", ",", "data", "=", "None", ",", "headers", "=", "None", ")", ":", "return", "self", ".", "request", "(", "make_url", "(", "self", ".", "endpoint", ",", "action", ")", ",", "method", "=", "'POST'", ",", "data", "=", "data", ",", "headers", "=", "headers", ")" ]
Makes a GET request
[ "Makes", "a", "GET", "request" ]
train
https://github.com/michaeltcoelho/pagarme.py/blob/469fdd6e61e7c24a9eaf23d474d25316c3b5450b/pagarme/api.py#L79-L83
michaeltcoelho/pagarme.py
pagarme/api.py
PagarmeApi.delete
def delete(self, action, headers=None): """Makes a GET request """ return self.request(make_url(self.endpoint, action), method='DELETE', headers=headers)
python
def delete(self, action, headers=None): """Makes a GET request """ return self.request(make_url(self.endpoint, action), method='DELETE', headers=headers)
[ "def", "delete", "(", "self", ",", "action", ",", "headers", "=", "None", ")", ":", "return", "self", ".", "request", "(", "make_url", "(", "self", ".", "endpoint", ",", "action", ")", ",", "method", "=", "'DELETE'", ",", "headers", "=", "headers", ")" ]
Makes a GET request
[ "Makes", "a", "GET", "request" ]
train
https://github.com/michaeltcoelho/pagarme.py/blob/469fdd6e61e7c24a9eaf23d474d25316c3b5450b/pagarme/api.py#L91-L95
JohnDoee/thomas
thomas/piece.py
calc_piece_size
def calc_piece_size(size, min_piece_size=20, max_piece_size=29, max_piece_count=1000): """ Calculates a good piece size for a size """ logger.debug('Calculating piece size for %i' % size) for i in range(min_piece_size, max_piece_size): # 20 = 1MB if size / (2**i) < max_piece_count: break return 2**i
python
def calc_piece_size(size, min_piece_size=20, max_piece_size=29, max_piece_count=1000): """ Calculates a good piece size for a size """ logger.debug('Calculating piece size for %i' % size) for i in range(min_piece_size, max_piece_size): # 20 = 1MB if size / (2**i) < max_piece_count: break return 2**i
[ "def", "calc_piece_size", "(", "size", ",", "min_piece_size", "=", "20", ",", "max_piece_size", "=", "29", ",", "max_piece_count", "=", "1000", ")", ":", "logger", ".", "debug", "(", "'Calculating piece size for %i'", "%", "size", ")", "for", "i", "in", "range", "(", "min_piece_size", ",", "max_piece_size", ")", ":", "# 20 = 1MB", "if", "size", "/", "(", "2", "**", "i", ")", "<", "max_piece_count", ":", "break", "return", "2", "**", "i" ]
Calculates a good piece size for a size
[ "Calculates", "a", "good", "piece", "size", "for", "a", "size" ]
train
https://github.com/JohnDoee/thomas/blob/51916dd110098b189a1c2fbcb71794fd9ec94832/thomas/piece.py#L18-L27
JohnDoee/thomas
thomas/piece.py
split_pieces
def split_pieces(piece_list, segments, num): """ Prepare a list of all pieces grouped together """ piece_groups = [] pieces = list(piece_list) while pieces: for i in range(segments): p = pieces[i::segments][:num] if not p: break piece_groups.append(p) pieces = pieces[num * segments:] return piece_groups
python
def split_pieces(piece_list, segments, num): """ Prepare a list of all pieces grouped together """ piece_groups = [] pieces = list(piece_list) while pieces: for i in range(segments): p = pieces[i::segments][:num] if not p: break piece_groups.append(p) pieces = pieces[num * segments:] return piece_groups
[ "def", "split_pieces", "(", "piece_list", ",", "segments", ",", "num", ")", ":", "piece_groups", "=", "[", "]", "pieces", "=", "list", "(", "piece_list", ")", "while", "pieces", ":", "for", "i", "in", "range", "(", "segments", ")", ":", "p", "=", "pieces", "[", "i", ":", ":", "segments", "]", "[", ":", "num", "]", "if", "not", "p", ":", "break", "piece_groups", ".", "append", "(", "p", ")", "pieces", "=", "pieces", "[", "num", "*", "segments", ":", "]", "return", "piece_groups" ]
Prepare a list of all pieces grouped together
[ "Prepare", "a", "list", "of", "all", "pieces", "grouped", "together" ]
train
https://github.com/JohnDoee/thomas/blob/51916dd110098b189a1c2fbcb71794fd9ec94832/thomas/piece.py#L30-L44
BlackEarth/bl
bl/rglob.py
rglob
def rglob(dirname, pattern, dirs=False, sort=True): """recursive glob, gets all files that match the pattern within the directory tree""" fns = [] path = str(dirname) if os.path.isdir(path): fns = glob(os.path.join(escape(path), pattern)) dns = [fn for fn in [os.path.join(path, fn) for fn in os.listdir(path)] if os.path.isdir(fn)] if dirs==True: fns += dns for d in dns: fns += rglob(d, pattern) if sort==True: fns.sort() else: log.warn("not a directory: %r" % path) return fns
python
def rglob(dirname, pattern, dirs=False, sort=True): """recursive glob, gets all files that match the pattern within the directory tree""" fns = [] path = str(dirname) if os.path.isdir(path): fns = glob(os.path.join(escape(path), pattern)) dns = [fn for fn in [os.path.join(path, fn) for fn in os.listdir(path)] if os.path.isdir(fn)] if dirs==True: fns += dns for d in dns: fns += rglob(d, pattern) if sort==True: fns.sort() else: log.warn("not a directory: %r" % path) return fns
[ "def", "rglob", "(", "dirname", ",", "pattern", ",", "dirs", "=", "False", ",", "sort", "=", "True", ")", ":", "fns", "=", "[", "]", "path", "=", "str", "(", "dirname", ")", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "fns", "=", "glob", "(", "os", ".", "path", ".", "join", "(", "escape", "(", "path", ")", ",", "pattern", ")", ")", "dns", "=", "[", "fn", "for", "fn", "in", "[", "os", ".", "path", ".", "join", "(", "path", ",", "fn", ")", "for", "fn", "in", "os", ".", "listdir", "(", "path", ")", "]", "if", "os", ".", "path", ".", "isdir", "(", "fn", ")", "]", "if", "dirs", "==", "True", ":", "fns", "+=", "dns", "for", "d", "in", "dns", ":", "fns", "+=", "rglob", "(", "d", ",", "pattern", ")", "if", "sort", "==", "True", ":", "fns", ".", "sort", "(", ")", "else", ":", "log", ".", "warn", "(", "\"not a directory: %r\"", "%", "path", ")", "return", "fns" ]
recursive glob, gets all files that match the pattern within the directory tree
[ "recursive", "glob", "gets", "all", "files", "that", "match", "the", "pattern", "within", "the", "directory", "tree" ]
train
https://github.com/BlackEarth/bl/blob/edf6f37dac718987260b90ad0e7f7fe084a7c1a3/bl/rglob.py#L17-L35
teepark/junction
junction/futures.py
after
def after(parents, func=None): '''Create a new Future whose completion depends on parent futures The new future will have a function that it calls once all its parents have completed, the return value of which will be its final value. There is a special case, however, in which the dependent future's callback returns a future or list of futures. In those cases, waiting on the dependent will also wait for all those futures, and the result (or list of results) of those future(s) will then be the final value. :param parents: A list of futures, all of which must be complete before the dependent's function runs. :type parents: list :param function func: The function to determine the value of the dependent future. It will take as many arguments as it has parents, and they will be the results of those futures. :returns: a :class:`Dependent`, which is a subclass of :class:`Future` and has all its capabilities. ''' if func is None: return lambda f: after(parents, f) dep = Dependent(parents, func) for parent in parents: if parent.complete: dep._incoming(parent, parent.value) else: parent._children.append(weakref.ref(dep)) return dep
python
def after(parents, func=None): '''Create a new Future whose completion depends on parent futures The new future will have a function that it calls once all its parents have completed, the return value of which will be its final value. There is a special case, however, in which the dependent future's callback returns a future or list of futures. In those cases, waiting on the dependent will also wait for all those futures, and the result (or list of results) of those future(s) will then be the final value. :param parents: A list of futures, all of which must be complete before the dependent's function runs. :type parents: list :param function func: The function to determine the value of the dependent future. It will take as many arguments as it has parents, and they will be the results of those futures. :returns: a :class:`Dependent`, which is a subclass of :class:`Future` and has all its capabilities. ''' if func is None: return lambda f: after(parents, f) dep = Dependent(parents, func) for parent in parents: if parent.complete: dep._incoming(parent, parent.value) else: parent._children.append(weakref.ref(dep)) return dep
[ "def", "after", "(", "parents", ",", "func", "=", "None", ")", ":", "if", "func", "is", "None", ":", "return", "lambda", "f", ":", "after", "(", "parents", ",", "f", ")", "dep", "=", "Dependent", "(", "parents", ",", "func", ")", "for", "parent", "in", "parents", ":", "if", "parent", ".", "complete", ":", "dep", ".", "_incoming", "(", "parent", ",", "parent", ".", "value", ")", "else", ":", "parent", ".", "_children", ".", "append", "(", "weakref", ".", "ref", "(", "dep", ")", ")", "return", "dep" ]
Create a new Future whose completion depends on parent futures The new future will have a function that it calls once all its parents have completed, the return value of which will be its final value. There is a special case, however, in which the dependent future's callback returns a future or list of futures. In those cases, waiting on the dependent will also wait for all those futures, and the result (or list of results) of those future(s) will then be the final value. :param parents: A list of futures, all of which must be complete before the dependent's function runs. :type parents: list :param function func: The function to determine the value of the dependent future. It will take as many arguments as it has parents, and they will be the results of those futures. :returns: a :class:`Dependent`, which is a subclass of :class:`Future` and has all its capabilities.
[ "Create", "a", "new", "Future", "whose", "completion", "depends", "on", "parent", "futures" ]
train
https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/futures.py#L339-L372
teepark/junction
junction/futures.py
wait_any
def wait_any(futures, timeout=None): '''Wait for the completion of any (the first) one of multiple futures :param list futures: A list of :class:`Future`\s :param timeout: The maximum time to wait. With ``None``, will block indefinitely. :type timeout: float or None :returns: One of the futures from the provided list -- the first one to become complete (or any of the ones that were already complete). :raises WaitTimeout: if a timeout is provided and hit ''' for fut in futures: if fut.complete: return fut wait = _Wait(futures) for fut in futures: fut._waits.add(wait) if wait.done.wait(timeout): raise errors.WaitTimeout() return wait.completed_future
python
def wait_any(futures, timeout=None): '''Wait for the completion of any (the first) one of multiple futures :param list futures: A list of :class:`Future`\s :param timeout: The maximum time to wait. With ``None``, will block indefinitely. :type timeout: float or None :returns: One of the futures from the provided list -- the first one to become complete (or any of the ones that were already complete). :raises WaitTimeout: if a timeout is provided and hit ''' for fut in futures: if fut.complete: return fut wait = _Wait(futures) for fut in futures: fut._waits.add(wait) if wait.done.wait(timeout): raise errors.WaitTimeout() return wait.completed_future
[ "def", "wait_any", "(", "futures", ",", "timeout", "=", "None", ")", ":", "for", "fut", "in", "futures", ":", "if", "fut", ".", "complete", ":", "return", "fut", "wait", "=", "_Wait", "(", "futures", ")", "for", "fut", "in", "futures", ":", "fut", ".", "_waits", ".", "add", "(", "wait", ")", "if", "wait", ".", "done", ".", "wait", "(", "timeout", ")", ":", "raise", "errors", ".", "WaitTimeout", "(", ")", "return", "wait", ".", "completed_future" ]
Wait for the completion of any (the first) one of multiple futures :param list futures: A list of :class:`Future`\s :param timeout: The maximum time to wait. With ``None``, will block indefinitely. :type timeout: float or None :returns: One of the futures from the provided list -- the first one to become complete (or any of the ones that were already complete). :raises WaitTimeout: if a timeout is provided and hit
[ "Wait", "for", "the", "completion", "of", "any", "(", "the", "first", ")", "one", "of", "multiple", "futures" ]
train
https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/futures.py#L375-L401
teepark/junction
junction/futures.py
wait_all
def wait_all(futures, timeout=None): '''Wait for the completion of all futures in a list :param list future: a list of :class:`Future`\s :param timeout: The maximum time to wait. With ``None``, can block indefinitely. :type timeout: float or None :raises WaitTimeout: if a timeout is provided and hit ''' if timeout is not None: deadline = time.time() + timeout for fut in futures: fut.wait(deadline - time.time()) else: for fut in futures: fut.wait()
python
def wait_all(futures, timeout=None): '''Wait for the completion of all futures in a list :param list future: a list of :class:`Future`\s :param timeout: The maximum time to wait. With ``None``, can block indefinitely. :type timeout: float or None :raises WaitTimeout: if a timeout is provided and hit ''' if timeout is not None: deadline = time.time() + timeout for fut in futures: fut.wait(deadline - time.time()) else: for fut in futures: fut.wait()
[ "def", "wait_all", "(", "futures", ",", "timeout", "=", "None", ")", ":", "if", "timeout", "is", "not", "None", ":", "deadline", "=", "time", ".", "time", "(", ")", "+", "timeout", "for", "fut", "in", "futures", ":", "fut", ".", "wait", "(", "deadline", "-", "time", ".", "time", "(", ")", ")", "else", ":", "for", "fut", "in", "futures", ":", "fut", ".", "wait", "(", ")" ]
Wait for the completion of all futures in a list :param list future: a list of :class:`Future`\s :param timeout: The maximum time to wait. With ``None``, can block indefinitely. :type timeout: float or None :raises WaitTimeout: if a timeout is provided and hit
[ "Wait", "for", "the", "completion", "of", "all", "futures", "in", "a", "list" ]
train
https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/futures.py#L404-L420
teepark/junction
junction/futures.py
Future.value
def value(self): '''The final value, if it has arrived :raises: AttributeError, if not yet complete :raises: an exception if the Future was :meth:`abort`\ed ''' if not self._done.is_set(): raise AttributeError("value") if self._failure: raise self._failure[0], self._failure[1], self._failure[2] return self._value
python
def value(self): '''The final value, if it has arrived :raises: AttributeError, if not yet complete :raises: an exception if the Future was :meth:`abort`\ed ''' if not self._done.is_set(): raise AttributeError("value") if self._failure: raise self._failure[0], self._failure[1], self._failure[2] return self._value
[ "def", "value", "(", "self", ")", ":", "if", "not", "self", ".", "_done", ".", "is_set", "(", ")", ":", "raise", "AttributeError", "(", "\"value\"", ")", "if", "self", ".", "_failure", ":", "raise", "self", ".", "_failure", "[", "0", "]", ",", "self", ".", "_failure", "[", "1", "]", ",", "self", ".", "_failure", "[", "2", "]", "return", "self", ".", "_value" ]
The final value, if it has arrived :raises: AttributeError, if not yet complete :raises: an exception if the Future was :meth:`abort`\ed
[ "The", "final", "value", "if", "it", "has", "arrived" ]
train
https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/futures.py#L38-L48
teepark/junction
junction/futures.py
Future.finish
def finish(self, value): '''Give the future it's value and trigger any associated callbacks :param value: the new value for the future :raises: :class:`AlreadyComplete <junction.errors.AlreadyComplete>` if already complete ''' if self._done.is_set(): raise errors.AlreadyComplete() self._value = value for cb in self._cbacks: backend.schedule(cb, args=(value,)) self._cbacks = None for wait in list(self._waits): wait.finish(self) self._waits = None for child in self._children: child = child() if child is None: continue child._incoming(self, value) self._children = None self._done.set()
python
def finish(self, value): '''Give the future it's value and trigger any associated callbacks :param value: the new value for the future :raises: :class:`AlreadyComplete <junction.errors.AlreadyComplete>` if already complete ''' if self._done.is_set(): raise errors.AlreadyComplete() self._value = value for cb in self._cbacks: backend.schedule(cb, args=(value,)) self._cbacks = None for wait in list(self._waits): wait.finish(self) self._waits = None for child in self._children: child = child() if child is None: continue child._incoming(self, value) self._children = None self._done.set()
[ "def", "finish", "(", "self", ",", "value", ")", ":", "if", "self", ".", "_done", ".", "is_set", "(", ")", ":", "raise", "errors", ".", "AlreadyComplete", "(", ")", "self", ".", "_value", "=", "value", "for", "cb", "in", "self", ".", "_cbacks", ":", "backend", ".", "schedule", "(", "cb", ",", "args", "=", "(", "value", ",", ")", ")", "self", ".", "_cbacks", "=", "None", "for", "wait", "in", "list", "(", "self", ".", "_waits", ")", ":", "wait", ".", "finish", "(", "self", ")", "self", ".", "_waits", "=", "None", "for", "child", "in", "self", ".", "_children", ":", "child", "=", "child", "(", ")", "if", "child", "is", "None", ":", "continue", "child", ".", "_incoming", "(", "self", ",", "value", ")", "self", ".", "_children", "=", "None", "self", ".", "_done", ".", "set", "(", ")" ]
Give the future it's value and trigger any associated callbacks :param value: the new value for the future :raises: :class:`AlreadyComplete <junction.errors.AlreadyComplete>` if already complete
[ "Give", "the", "future", "it", "s", "value", "and", "trigger", "any", "associated", "callbacks" ]
train
https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/futures.py#L50-L78
teepark/junction
junction/futures.py
Future.abort
def abort(self, klass, exc, tb=None): '''Finish this future (maybe early) in an error state Takes a standard exception triple as arguments (like returned by ``sys.exc_info``) and will re-raise them as the value. Any :class:`Dependents` that are children of this one will also be aborted. :param class klass: the class of the exception :param Exception exc: the exception instance itself :param traceback tb: the traceback associated with the exception :raises: :class:`AlreadyComplete <junction.errors.AlreadyComplete>` if already complete ''' if self._done.is_set(): raise errors.AlreadyComplete() self._failure = (klass, exc, tb) for eb in self._errbacks: backend.schedule(eb, args=(klass, exc, tb)) self._errbacks = None for wait in list(self._waits): wait.finish(self) self._waits = None for child in self._children: child = child() if child is None: continue child.abort(klass, exc, tb) self._children = None self._done.set()
python
def abort(self, klass, exc, tb=None): '''Finish this future (maybe early) in an error state Takes a standard exception triple as arguments (like returned by ``sys.exc_info``) and will re-raise them as the value. Any :class:`Dependents` that are children of this one will also be aborted. :param class klass: the class of the exception :param Exception exc: the exception instance itself :param traceback tb: the traceback associated with the exception :raises: :class:`AlreadyComplete <junction.errors.AlreadyComplete>` if already complete ''' if self._done.is_set(): raise errors.AlreadyComplete() self._failure = (klass, exc, tb) for eb in self._errbacks: backend.schedule(eb, args=(klass, exc, tb)) self._errbacks = None for wait in list(self._waits): wait.finish(self) self._waits = None for child in self._children: child = child() if child is None: continue child.abort(klass, exc, tb) self._children = None self._done.set()
[ "def", "abort", "(", "self", ",", "klass", ",", "exc", ",", "tb", "=", "None", ")", ":", "if", "self", ".", "_done", ".", "is_set", "(", ")", ":", "raise", "errors", ".", "AlreadyComplete", "(", ")", "self", ".", "_failure", "=", "(", "klass", ",", "exc", ",", "tb", ")", "for", "eb", "in", "self", ".", "_errbacks", ":", "backend", ".", "schedule", "(", "eb", ",", "args", "=", "(", "klass", ",", "exc", ",", "tb", ")", ")", "self", ".", "_errbacks", "=", "None", "for", "wait", "in", "list", "(", "self", ".", "_waits", ")", ":", "wait", ".", "finish", "(", "self", ")", "self", ".", "_waits", "=", "None", "for", "child", "in", "self", ".", "_children", ":", "child", "=", "child", "(", ")", "if", "child", "is", "None", ":", "continue", "child", ".", "abort", "(", "klass", ",", "exc", ",", "tb", ")", "self", ".", "_children", "=", "None", "self", ".", "_done", ".", "set", "(", ")" ]
Finish this future (maybe early) in an error state Takes a standard exception triple as arguments (like returned by ``sys.exc_info``) and will re-raise them as the value. Any :class:`Dependents` that are children of this one will also be aborted. :param class klass: the class of the exception :param Exception exc: the exception instance itself :param traceback tb: the traceback associated with the exception :raises: :class:`AlreadyComplete <junction.errors.AlreadyComplete>` if already complete
[ "Finish", "this", "future", "(", "maybe", "early", ")", "in", "an", "error", "state" ]
train
https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/futures.py#L80-L117
teepark/junction
junction/futures.py
Future.on_finish
def on_finish(self, func): '''Assign a callback function to be run when successfully complete :param function func: A callback to run when complete. It will be given one argument (the value that has arrived), and it's return value is ignored. ''' if self._done.is_set(): if self._failure is None: backend.schedule(func, args=(self._value,)) else: self._cbacks.append(func)
python
def on_finish(self, func): '''Assign a callback function to be run when successfully complete :param function func: A callback to run when complete. It will be given one argument (the value that has arrived), and it's return value is ignored. ''' if self._done.is_set(): if self._failure is None: backend.schedule(func, args=(self._value,)) else: self._cbacks.append(func)
[ "def", "on_finish", "(", "self", ",", "func", ")", ":", "if", "self", ".", "_done", ".", "is_set", "(", ")", ":", "if", "self", ".", "_failure", "is", "None", ":", "backend", ".", "schedule", "(", "func", ",", "args", "=", "(", "self", ".", "_value", ",", ")", ")", "else", ":", "self", ".", "_cbacks", ".", "append", "(", "func", ")" ]
Assign a callback function to be run when successfully complete :param function func: A callback to run when complete. It will be given one argument (the value that has arrived), and it's return value is ignored.
[ "Assign", "a", "callback", "function", "to", "be", "run", "when", "successfully", "complete" ]
train
https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/futures.py#L119-L130
teepark/junction
junction/futures.py
Future.on_abort
def on_abort(self, func): '''Assign a callback function to be run when :meth:`abort`\ed :param function func: A callback to run when aborted. It will be given three arguments: - ``klass``: the exception class - ``exc``: the exception instance - ``tb``: the traceback object associated with the exception ''' if self._done.is_set(): if self._failure is not None: backend.schedule(func, args=self._failure) else: self._errbacks.append(func)
python
def on_abort(self, func): '''Assign a callback function to be run when :meth:`abort`\ed :param function func: A callback to run when aborted. It will be given three arguments: - ``klass``: the exception class - ``exc``: the exception instance - ``tb``: the traceback object associated with the exception ''' if self._done.is_set(): if self._failure is not None: backend.schedule(func, args=self._failure) else: self._errbacks.append(func)
[ "def", "on_abort", "(", "self", ",", "func", ")", ":", "if", "self", ".", "_done", ".", "is_set", "(", ")", ":", "if", "self", ".", "_failure", "is", "not", "None", ":", "backend", ".", "schedule", "(", "func", ",", "args", "=", "self", ".", "_failure", ")", "else", ":", "self", ".", "_errbacks", ".", "append", "(", "func", ")" ]
Assign a callback function to be run when :meth:`abort`\ed :param function func: A callback to run when aborted. It will be given three arguments: - ``klass``: the exception class - ``exc``: the exception instance - ``tb``: the traceback object associated with the exception
[ "Assign", "a", "callback", "function", "to", "be", "run", "when", ":", "meth", ":", "abort", "\\", "ed" ]
train
https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/futures.py#L132-L146
teepark/junction
junction/futures.py
Future.after
def after(self, func=None, other_parents=None): '''Create a new Future whose completion depends on this one The new future will have a function that it calls once all its parents have completed, the return value of which will be its final value. There is a special case, however, in which the dependent future's callback returns a future or list of futures. In those cases, waiting on the dependent will also wait for all those futures, and the result (or list of results) of those future(s) will then be the final value. :param function func: The function to determine the value of the dependent future. It will take as many arguments as it has parents, and they will be the results of those futures. :param other_parents: A list of futures, all of which (along with this one) must be complete before the dependent's function runs. :type other_parents: list or None :returns: a :class:`Dependent`, which is a subclass of :class:`Future` and has all its capabilities. ''' parents = [self] if other_parents is not None: parents += other_parents return after(parents, func)
python
def after(self, func=None, other_parents=None): '''Create a new Future whose completion depends on this one The new future will have a function that it calls once all its parents have completed, the return value of which will be its final value. There is a special case, however, in which the dependent future's callback returns a future or list of futures. In those cases, waiting on the dependent will also wait for all those futures, and the result (or list of results) of those future(s) will then be the final value. :param function func: The function to determine the value of the dependent future. It will take as many arguments as it has parents, and they will be the results of those futures. :param other_parents: A list of futures, all of which (along with this one) must be complete before the dependent's function runs. :type other_parents: list or None :returns: a :class:`Dependent`, which is a subclass of :class:`Future` and has all its capabilities. ''' parents = [self] if other_parents is not None: parents += other_parents return after(parents, func)
[ "def", "after", "(", "self", ",", "func", "=", "None", ",", "other_parents", "=", "None", ")", ":", "parents", "=", "[", "self", "]", "if", "other_parents", "is", "not", "None", ":", "parents", "+=", "other_parents", "return", "after", "(", "parents", ",", "func", ")" ]
Create a new Future whose completion depends on this one The new future will have a function that it calls once all its parents have completed, the return value of which will be its final value. There is a special case, however, in which the dependent future's callback returns a future or list of futures. In those cases, waiting on the dependent will also wait for all those futures, and the result (or list of results) of those future(s) will then be the final value. :param function func: The function to determine the value of the dependent future. It will take as many arguments as it has parents, and they will be the results of those futures. :param other_parents: A list of futures, all of which (along with this one) must be complete before the dependent's function runs. :type other_parents: list or None :returns: a :class:`Dependent`, which is a subclass of :class:`Future` and has all its capabilities.
[ "Create", "a", "new", "Future", "whose", "completion", "depends", "on", "this", "one" ]
train
https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/futures.py#L161-L188
teepark/junction
junction/futures.py
RPC.partial_results
def partial_results(self): '''The results that the RPC has received *so far* This may also be the complete results if :attr:`complete` is ``True``. ''' results = [] for r in self._results: if isinstance(r, Exception): results.append(type(r)(*deepcopy(r.args))) elif hasattr(r, "__iter__") and not hasattr(r, "__len__"): # pass generators straight through results.append(r) else: results.append(deepcopy(r)) return results
python
def partial_results(self): '''The results that the RPC has received *so far* This may also be the complete results if :attr:`complete` is ``True``. ''' results = [] for r in self._results: if isinstance(r, Exception): results.append(type(r)(*deepcopy(r.args))) elif hasattr(r, "__iter__") and not hasattr(r, "__len__"): # pass generators straight through results.append(r) else: results.append(deepcopy(r)) return results
[ "def", "partial_results", "(", "self", ")", ":", "results", "=", "[", "]", "for", "r", "in", "self", ".", "_results", ":", "if", "isinstance", "(", "r", ",", "Exception", ")", ":", "results", ".", "append", "(", "type", "(", "r", ")", "(", "*", "deepcopy", "(", "r", ".", "args", ")", ")", ")", "elif", "hasattr", "(", "r", ",", "\"__iter__\"", ")", "and", "not", "hasattr", "(", "r", ",", "\"__len__\"", ")", ":", "# pass generators straight through", "results", ".", "append", "(", "r", ")", "else", ":", "results", ".", "append", "(", "deepcopy", "(", "r", ")", ")", "return", "results" ]
The results that the RPC has received *so far* This may also be the complete results if :attr:`complete` is ``True``.
[ "The", "results", "that", "the", "RPC", "has", "received", "*", "so", "far", "*" ]
train
https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/futures.py#L236-L250
20c/facsimile
facsimile/install.py
Install.os_path_transform
def os_path_transform(self, s, sep=os.path.sep): """ transforms any os path into unix style """ if sep == '/': return s else: return s.replace(sep, '/')
python
def os_path_transform(self, s, sep=os.path.sep): """ transforms any os path into unix style """ if sep == '/': return s else: return s.replace(sep, '/')
[ "def", "os_path_transform", "(", "self", ",", "s", ",", "sep", "=", "os", ".", "path", ".", "sep", ")", ":", "if", "sep", "==", "'/'", ":", "return", "s", "else", ":", "return", "s", ".", "replace", "(", "sep", ",", "'/'", ")" ]
transforms any os path into unix style
[ "transforms", "any", "os", "path", "into", "unix", "style" ]
train
https://github.com/20c/facsimile/blob/570e28568475d5be1b1a2c95b8e941fbfbc167eb/facsimile/install.py#L96-L101
20c/facsimile
facsimile/install.py
Install.transform
def transform(self, tr_list, files): """ replaces $tokens$ with values will be replaced with config rendering """ singular = False if not isinstance(files, list) and not isinstance(files, tuple): singular = True files = [files] for _find, _replace in tr_list: files = [opt.replace(_find, _replace) for opt in files] if singular: return files[0] else: return files
python
def transform(self, tr_list, files): """ replaces $tokens$ with values will be replaced with config rendering """ singular = False if not isinstance(files, list) and not isinstance(files, tuple): singular = True files = [files] for _find, _replace in tr_list: files = [opt.replace(_find, _replace) for opt in files] if singular: return files[0] else: return files
[ "def", "transform", "(", "self", ",", "tr_list", ",", "files", ")", ":", "singular", "=", "False", "if", "not", "isinstance", "(", "files", ",", "list", ")", "and", "not", "isinstance", "(", "files", ",", "tuple", ")", ":", "singular", "=", "True", "files", "=", "[", "files", "]", "for", "_find", ",", "_replace", "in", "tr_list", ":", "files", "=", "[", "opt", ".", "replace", "(", "_find", ",", "_replace", ")", "for", "opt", "in", "files", "]", "if", "singular", ":", "return", "files", "[", "0", "]", "else", ":", "return", "files" ]
replaces $tokens$ with values will be replaced with config rendering
[ "replaces", "$tokens$", "with", "values", "will", "be", "replaced", "with", "config", "rendering" ]
train
https://github.com/20c/facsimile/blob/570e28568475d5be1b1a2c95b8e941fbfbc167eb/facsimile/install.py#L103-L119
20c/facsimile
facsimile/install.py
Install.resolve_dst
def resolve_dst(self, dst_dir, src): """ finds the destination based on source if source is an absolute path, and there's no pattern, it copies the file to base dst_dir """ if os.path.isabs(src): return os.path.join(dst_dir, os.path.basename(src)) return os.path.join(dst_dir, src)
python
def resolve_dst(self, dst_dir, src): """ finds the destination based on source if source is an absolute path, and there's no pattern, it copies the file to base dst_dir """ if os.path.isabs(src): return os.path.join(dst_dir, os.path.basename(src)) return os.path.join(dst_dir, src)
[ "def", "resolve_dst", "(", "self", ",", "dst_dir", ",", "src", ")", ":", "if", "os", ".", "path", ".", "isabs", "(", "src", ")", ":", "return", "os", ".", "path", ".", "join", "(", "dst_dir", ",", "os", ".", "path", ".", "basename", "(", "src", ")", ")", "return", "os", ".", "path", ".", "join", "(", "dst_dir", ",", "src", ")" ]
finds the destination based on source if source is an absolute path, and there's no pattern, it copies the file to base dst_dir
[ "finds", "the", "destination", "based", "on", "source", "if", "source", "is", "an", "absolute", "path", "and", "there", "s", "no", "pattern", "it", "copies", "the", "file", "to", "base", "dst_dir" ]
train
https://github.com/20c/facsimile/blob/570e28568475d5be1b1a2c95b8e941fbfbc167eb/facsimile/install.py#L133-L140
BlackEarth/bl
bl/zip.py
ZIP.write
def write(self, fn=None): """copy the zip file from its filename to the given filename.""" fn = fn or self.fn if not os.path.exists(os.path.dirname(fn)): os.makedirs(os.path.dirname(fn)) f = open(self.fn, 'rb') b = f.read() f.close() f = open(fn, 'wb') f.write(b) f.close()
python
def write(self, fn=None): """copy the zip file from its filename to the given filename.""" fn = fn or self.fn if not os.path.exists(os.path.dirname(fn)): os.makedirs(os.path.dirname(fn)) f = open(self.fn, 'rb') b = f.read() f.close() f = open(fn, 'wb') f.write(b) f.close()
[ "def", "write", "(", "self", ",", "fn", "=", "None", ")", ":", "fn", "=", "fn", "or", "self", ".", "fn", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "dirname", "(", "fn", ")", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "fn", ")", ")", "f", "=", "open", "(", "self", ".", "fn", ",", "'rb'", ")", "b", "=", "f", ".", "read", "(", ")", "f", ".", "close", "(", ")", "f", "=", "open", "(", "fn", ",", "'wb'", ")", "f", ".", "write", "(", "b", ")", "f", ".", "close", "(", ")" ]
copy the zip file from its filename to the given filename.
[ "copy", "the", "zip", "file", "from", "its", "filename", "to", "the", "given", "filename", "." ]
train
https://github.com/BlackEarth/bl/blob/edf6f37dac718987260b90ad0e7f7fe084a7c1a3/bl/zip.py#L20-L30
michaeltcoelho/pagarme.py
pagarme/resources/resource.py
Resource.assign
def assign(self, attrs): """Merge new attributes """ for k, v in attrs.items(): setattr(self, k, v)
python
def assign(self, attrs): """Merge new attributes """ for k, v in attrs.items(): setattr(self, k, v)
[ "def", "assign", "(", "self", ",", "attrs", ")", ":", "for", "k", ",", "v", "in", "attrs", ".", "items", "(", ")", ":", "setattr", "(", "self", ",", "k", ",", "v", ")" ]
Merge new attributes
[ "Merge", "new", "attributes" ]
train
https://github.com/michaeltcoelho/pagarme.py/blob/469fdd6e61e7c24a9eaf23d474d25316c3b5450b/pagarme/resources/resource.py#L25-L29
willkg/socorro-siggen
siggen/rules.py
CSignatureTool.normalize_rust_function
def normalize_rust_function(self, function, line): """Normalizes a single rust frame with a function""" # Drop the prefix and return type if there is any function = drop_prefix_and_return_type(function) # Collapse types function = collapse( function, open_string='<', close_string='>', replacement='<T>', exceptions=(' as ',) ) # Collapse arguments if self.collapse_arguments: function = collapse( function, open_string='(', close_string=')', replacement='' ) if self.signatures_with_line_numbers_re.match(function): function = '{}:{}'.format(function, line) # Remove spaces before all stars, ampersands, and commas function = self.fixup_space.sub('', function) # Ensure a space after commas function = self.fixup_comma.sub(', ', function) # Remove rust-generated uniqueness hashes function = self.fixup_hash.sub('', function) return function
python
def normalize_rust_function(self, function, line): """Normalizes a single rust frame with a function""" # Drop the prefix and return type if there is any function = drop_prefix_and_return_type(function) # Collapse types function = collapse( function, open_string='<', close_string='>', replacement='<T>', exceptions=(' as ',) ) # Collapse arguments if self.collapse_arguments: function = collapse( function, open_string='(', close_string=')', replacement='' ) if self.signatures_with_line_numbers_re.match(function): function = '{}:{}'.format(function, line) # Remove spaces before all stars, ampersands, and commas function = self.fixup_space.sub('', function) # Ensure a space after commas function = self.fixup_comma.sub(', ', function) # Remove rust-generated uniqueness hashes function = self.fixup_hash.sub('', function) return function
[ "def", "normalize_rust_function", "(", "self", ",", "function", ",", "line", ")", ":", "# Drop the prefix and return type if there is any", "function", "=", "drop_prefix_and_return_type", "(", "function", ")", "# Collapse types", "function", "=", "collapse", "(", "function", ",", "open_string", "=", "'<'", ",", "close_string", "=", "'>'", ",", "replacement", "=", "'<T>'", ",", "exceptions", "=", "(", "' as '", ",", ")", ")", "# Collapse arguments", "if", "self", ".", "collapse_arguments", ":", "function", "=", "collapse", "(", "function", ",", "open_string", "=", "'('", ",", "close_string", "=", "')'", ",", "replacement", "=", "''", ")", "if", "self", ".", "signatures_with_line_numbers_re", ".", "match", "(", "function", ")", ":", "function", "=", "'{}:{}'", ".", "format", "(", "function", ",", "line", ")", "# Remove spaces before all stars, ampersands, and commas", "function", "=", "self", ".", "fixup_space", ".", "sub", "(", "''", ",", "function", ")", "# Ensure a space after commas", "function", "=", "self", ".", "fixup_comma", ".", "sub", "(", "', '", ",", "function", ")", "# Remove rust-generated uniqueness hashes", "function", "=", "self", ".", "fixup_hash", ".", "sub", "(", "''", ",", "function", ")", "return", "function" ]
Normalizes a single rust frame with a function
[ "Normalizes", "a", "single", "rust", "frame", "with", "a", "function" ]
train
https://github.com/willkg/socorro-siggen/blob/db7e3233e665a458a961c48da22e93a69b1d08d6/siggen/rules.py#L121-L156
willkg/socorro-siggen
siggen/rules.py
CSignatureTool.normalize_cpp_function
def normalize_cpp_function(self, function, line): """Normalizes a single cpp frame with a function""" # Drop member function cv/ref qualifiers like const, const&, &, and && for ref in ('const', 'const&', '&&', '&'): if function.endswith(ref): function = function[:-len(ref)].strip() # Drop the prefix and return type if there is any if it's not operator # overloading--operator overloading syntax doesn't have the things # we're dropping here and can look curious, so don't try if '::operator' not in function: function = drop_prefix_and_return_type(function) # Collapse types function = collapse( function, open_string='<', close_string='>', replacement='<T>', exceptions=('name omitted', 'IPC::ParamTraits') ) # Collapse arguments if self.collapse_arguments: function = collapse( function, open_string='(', close_string=')', replacement='', exceptions=('anonymous namespace', 'operator') ) # Remove PGO cold block labels like "[clone .cold.222]". bug #1397926 if 'clone .cold' in function: function = collapse( function, open_string='[', close_string=']', replacement='' ) if self.signatures_with_line_numbers_re.match(function): function = '{}:{}'.format(function, line) # Remove spaces before all stars, ampersands, and commas function = self.fixup_space.sub('', function) # Ensure a space after commas function = self.fixup_comma.sub(', ', function) return function
python
def normalize_cpp_function(self, function, line): """Normalizes a single cpp frame with a function""" # Drop member function cv/ref qualifiers like const, const&, &, and && for ref in ('const', 'const&', '&&', '&'): if function.endswith(ref): function = function[:-len(ref)].strip() # Drop the prefix and return type if there is any if it's not operator # overloading--operator overloading syntax doesn't have the things # we're dropping here and can look curious, so don't try if '::operator' not in function: function = drop_prefix_and_return_type(function) # Collapse types function = collapse( function, open_string='<', close_string='>', replacement='<T>', exceptions=('name omitted', 'IPC::ParamTraits') ) # Collapse arguments if self.collapse_arguments: function = collapse( function, open_string='(', close_string=')', replacement='', exceptions=('anonymous namespace', 'operator') ) # Remove PGO cold block labels like "[clone .cold.222]". bug #1397926 if 'clone .cold' in function: function = collapse( function, open_string='[', close_string=']', replacement='' ) if self.signatures_with_line_numbers_re.match(function): function = '{}:{}'.format(function, line) # Remove spaces before all stars, ampersands, and commas function = self.fixup_space.sub('', function) # Ensure a space after commas function = self.fixup_comma.sub(', ', function) return function
[ "def", "normalize_cpp_function", "(", "self", ",", "function", ",", "line", ")", ":", "# Drop member function cv/ref qualifiers like const, const&, &, and &&", "for", "ref", "in", "(", "'const'", ",", "'const&'", ",", "'&&'", ",", "'&'", ")", ":", "if", "function", ".", "endswith", "(", "ref", ")", ":", "function", "=", "function", "[", ":", "-", "len", "(", "ref", ")", "]", ".", "strip", "(", ")", "# Drop the prefix and return type if there is any if it's not operator", "# overloading--operator overloading syntax doesn't have the things", "# we're dropping here and can look curious, so don't try", "if", "'::operator'", "not", "in", "function", ":", "function", "=", "drop_prefix_and_return_type", "(", "function", ")", "# Collapse types", "function", "=", "collapse", "(", "function", ",", "open_string", "=", "'<'", ",", "close_string", "=", "'>'", ",", "replacement", "=", "'<T>'", ",", "exceptions", "=", "(", "'name omitted'", ",", "'IPC::ParamTraits'", ")", ")", "# Collapse arguments", "if", "self", ".", "collapse_arguments", ":", "function", "=", "collapse", "(", "function", ",", "open_string", "=", "'('", ",", "close_string", "=", "')'", ",", "replacement", "=", "''", ",", "exceptions", "=", "(", "'anonymous namespace'", ",", "'operator'", ")", ")", "# Remove PGO cold block labels like \"[clone .cold.222]\". bug #1397926", "if", "'clone .cold'", "in", "function", ":", "function", "=", "collapse", "(", "function", ",", "open_string", "=", "'['", ",", "close_string", "=", "']'", ",", "replacement", "=", "''", ")", "if", "self", ".", "signatures_with_line_numbers_re", ".", "match", "(", "function", ")", ":", "function", "=", "'{}:{}'", ".", "format", "(", "function", ",", "line", ")", "# Remove spaces before all stars, ampersands, and commas", "function", "=", "self", ".", "fixup_space", ".", "sub", "(", "''", ",", "function", ")", "# Ensure a space after commas", "function", "=", "self", ".", "fixup_comma", ".", "sub", "(", "', '", ",", "function", ")", "return", "function" ]
Normalizes a single cpp frame with a function
[ "Normalizes", "a", "single", "cpp", "frame", "with", "a", "function" ]
train
https://github.com/willkg/socorro-siggen/blob/db7e3233e665a458a961c48da22e93a69b1d08d6/siggen/rules.py#L158-L208
willkg/socorro-siggen
siggen/rules.py
CSignatureTool.normalize_frame
def normalize_frame( self, module=None, function=None, file=None, line=None, module_offset=None, offset=None, normalized=None, **kwargs # eat any extra kwargs passed in ): """Normalizes a single frame Returns a structured conglomeration of the input parameters to serve as a signature. The parameter names of this function reflect the exact names of the fields from the jsonMDSW frame output. This allows this function to be invoked by passing a frame as ``**a_frame``. Sometimes, a frame may already have a normalized version cached. If that exists, return it instead. """ # If there's a cached normalized value, use that so we don't spend time # figuring it out again if normalized is not None: return normalized if function: # If there's a filename and it ends in .rs, then normalize using # Rust rules if file and (parse_source_file(file) or '').endswith('.rs'): return self.normalize_rust_function( function=function, line=line ) # Otherwise normalize it with C/C++ rules return self.normalize_cpp_function( function=function, line=line ) # If there's a file and line number, use that if file and line: filename = file.rstrip('/\\') if '\\' in filename: file = filename.rsplit('\\')[-1] else: file = filename.rsplit('/')[-1] return '{}#{}'.format(file, line) # If there's an offset and no module/module_offset, use that if not module and not module_offset and offset: return '@{}'.format(offset) # Return module/module_offset return '{}@{}'.format(module or '', module_offset)
python
def normalize_frame( self, module=None, function=None, file=None, line=None, module_offset=None, offset=None, normalized=None, **kwargs # eat any extra kwargs passed in ): """Normalizes a single frame Returns a structured conglomeration of the input parameters to serve as a signature. The parameter names of this function reflect the exact names of the fields from the jsonMDSW frame output. This allows this function to be invoked by passing a frame as ``**a_frame``. Sometimes, a frame may already have a normalized version cached. If that exists, return it instead. """ # If there's a cached normalized value, use that so we don't spend time # figuring it out again if normalized is not None: return normalized if function: # If there's a filename and it ends in .rs, then normalize using # Rust rules if file and (parse_source_file(file) or '').endswith('.rs'): return self.normalize_rust_function( function=function, line=line ) # Otherwise normalize it with C/C++ rules return self.normalize_cpp_function( function=function, line=line ) # If there's a file and line number, use that if file and line: filename = file.rstrip('/\\') if '\\' in filename: file = filename.rsplit('\\')[-1] else: file = filename.rsplit('/')[-1] return '{}#{}'.format(file, line) # If there's an offset and no module/module_offset, use that if not module and not module_offset and offset: return '@{}'.format(offset) # Return module/module_offset return '{}@{}'.format(module or '', module_offset)
[ "def", "normalize_frame", "(", "self", ",", "module", "=", "None", ",", "function", "=", "None", ",", "file", "=", "None", ",", "line", "=", "None", ",", "module_offset", "=", "None", ",", "offset", "=", "None", ",", "normalized", "=", "None", ",", "*", "*", "kwargs", "# eat any extra kwargs passed in", ")", ":", "# If there's a cached normalized value, use that so we don't spend time", "# figuring it out again", "if", "normalized", "is", "not", "None", ":", "return", "normalized", "if", "function", ":", "# If there's a filename and it ends in .rs, then normalize using", "# Rust rules", "if", "file", "and", "(", "parse_source_file", "(", "file", ")", "or", "''", ")", ".", "endswith", "(", "'.rs'", ")", ":", "return", "self", ".", "normalize_rust_function", "(", "function", "=", "function", ",", "line", "=", "line", ")", "# Otherwise normalize it with C/C++ rules", "return", "self", ".", "normalize_cpp_function", "(", "function", "=", "function", ",", "line", "=", "line", ")", "# If there's a file and line number, use that", "if", "file", "and", "line", ":", "filename", "=", "file", ".", "rstrip", "(", "'/\\\\'", ")", "if", "'\\\\'", "in", "filename", ":", "file", "=", "filename", ".", "rsplit", "(", "'\\\\'", ")", "[", "-", "1", "]", "else", ":", "file", "=", "filename", ".", "rsplit", "(", "'/'", ")", "[", "-", "1", "]", "return", "'{}#{}'", ".", "format", "(", "file", ",", "line", ")", "# If there's an offset and no module/module_offset, use that", "if", "not", "module", "and", "not", "module_offset", "and", "offset", ":", "return", "'@{}'", ".", "format", "(", "offset", ")", "# Return module/module_offset", "return", "'{}@{}'", ".", "format", "(", "module", "or", "''", ",", "module_offset", ")" ]
Normalizes a single frame Returns a structured conglomeration of the input parameters to serve as a signature. The parameter names of this function reflect the exact names of the fields from the jsonMDSW frame output. This allows this function to be invoked by passing a frame as ``**a_frame``. Sometimes, a frame may already have a normalized version cached. If that exists, return it instead.
[ "Normalizes", "a", "single", "frame" ]
train
https://github.com/willkg/socorro-siggen/blob/db7e3233e665a458a961c48da22e93a69b1d08d6/siggen/rules.py#L210-L266
willkg/socorro-siggen
siggen/rules.py
CSignatureTool._do_generate
def _do_generate(self, source_list, hang_type, crashed_thread, delimiter=' | '): """ each element of signatureList names a frame in the crash stack; and is: - a prefix of a relevant frame: Append this element to the signature - a relevant frame: Append this element and stop looking - irrelevant: Append this element only after seeing a prefix frame The signature is a ' | ' separated string of frame names. """ notes = [] debug_notes = [] # shorten source_list to the first signatureSentinel sentinel_locations = [] for a_sentinel in self.signature_sentinels: if type(a_sentinel) == tuple: a_sentinel, condition_fn = a_sentinel if not condition_fn(source_list): continue try: sentinel_locations.append(source_list.index(a_sentinel)) except ValueError: pass if sentinel_locations: min_index = min(sentinel_locations) debug_notes.append( 'sentinel; starting at "{}" index {}'.format(source_list[min_index], min_index) ) source_list = source_list[min_index:] # Get all the relevant frame signatures. Note that these function signatures # have already been normalized at this point. new_signature_list = [] for a_signature in source_list: # If the signature matches the irrelevant signatures regex, skip to the next frame. if self.irrelevant_signature_re.match(a_signature): debug_notes.append('irrelevant; ignoring: "{}"'.format(a_signature)) continue # If the frame signature is a dll, remove the @xxxxx part. if '.dll' in a_signature.lower(): a_signature = a_signature.split('@')[0] # If this trimmed DLL signature is the same as the previous frame's, skip it. if new_signature_list and a_signature == new_signature_list[-1]: continue new_signature_list.append(a_signature) # If the signature does not match the prefix signatures regex, then it is the last # one we add to the list. if not self.prefix_signature_re.match(a_signature): debug_notes.append('not a prefix; stop: "{}"'.format(a_signature)) break debug_notes.append('prefix; continue iterating: "{}"'.format(a_signature)) # Add a special marker for hang crash reports. if hang_type: debug_notes.append( 'hang_type {}: prepending {}'.format(hang_type, self.hang_prefixes[hang_type]) ) new_signature_list.insert(0, self.hang_prefixes[hang_type]) signature = delimiter.join(new_signature_list) # Handle empty signatures to explain why we failed generating them. if signature == '' or signature is None: if crashed_thread is None: notes.append( "CSignatureTool: No signature could be created because we do not know which " "thread crashed" ) signature = "EMPTY: no crashing thread identified" else: notes.append( "CSignatureTool: No proper signature could be created because no good data " "for the crashing thread ({}) was found".format(crashed_thread) ) try: signature = source_list[0] except IndexError: signature = "EMPTY: no frame data available" return signature, notes, debug_notes
python
def _do_generate(self, source_list, hang_type, crashed_thread, delimiter=' | '): """ each element of signatureList names a frame in the crash stack; and is: - a prefix of a relevant frame: Append this element to the signature - a relevant frame: Append this element and stop looking - irrelevant: Append this element only after seeing a prefix frame The signature is a ' | ' separated string of frame names. """ notes = [] debug_notes = [] # shorten source_list to the first signatureSentinel sentinel_locations = [] for a_sentinel in self.signature_sentinels: if type(a_sentinel) == tuple: a_sentinel, condition_fn = a_sentinel if not condition_fn(source_list): continue try: sentinel_locations.append(source_list.index(a_sentinel)) except ValueError: pass if sentinel_locations: min_index = min(sentinel_locations) debug_notes.append( 'sentinel; starting at "{}" index {}'.format(source_list[min_index], min_index) ) source_list = source_list[min_index:] # Get all the relevant frame signatures. Note that these function signatures # have already been normalized at this point. new_signature_list = [] for a_signature in source_list: # If the signature matches the irrelevant signatures regex, skip to the next frame. if self.irrelevant_signature_re.match(a_signature): debug_notes.append('irrelevant; ignoring: "{}"'.format(a_signature)) continue # If the frame signature is a dll, remove the @xxxxx part. if '.dll' in a_signature.lower(): a_signature = a_signature.split('@')[0] # If this trimmed DLL signature is the same as the previous frame's, skip it. if new_signature_list and a_signature == new_signature_list[-1]: continue new_signature_list.append(a_signature) # If the signature does not match the prefix signatures regex, then it is the last # one we add to the list. if not self.prefix_signature_re.match(a_signature): debug_notes.append('not a prefix; stop: "{}"'.format(a_signature)) break debug_notes.append('prefix; continue iterating: "{}"'.format(a_signature)) # Add a special marker for hang crash reports. if hang_type: debug_notes.append( 'hang_type {}: prepending {}'.format(hang_type, self.hang_prefixes[hang_type]) ) new_signature_list.insert(0, self.hang_prefixes[hang_type]) signature = delimiter.join(new_signature_list) # Handle empty signatures to explain why we failed generating them. if signature == '' or signature is None: if crashed_thread is None: notes.append( "CSignatureTool: No signature could be created because we do not know which " "thread crashed" ) signature = "EMPTY: no crashing thread identified" else: notes.append( "CSignatureTool: No proper signature could be created because no good data " "for the crashing thread ({}) was found".format(crashed_thread) ) try: signature = source_list[0] except IndexError: signature = "EMPTY: no frame data available" return signature, notes, debug_notes
[ "def", "_do_generate", "(", "self", ",", "source_list", ",", "hang_type", ",", "crashed_thread", ",", "delimiter", "=", "' | '", ")", ":", "notes", "=", "[", "]", "debug_notes", "=", "[", "]", "# shorten source_list to the first signatureSentinel", "sentinel_locations", "=", "[", "]", "for", "a_sentinel", "in", "self", ".", "signature_sentinels", ":", "if", "type", "(", "a_sentinel", ")", "==", "tuple", ":", "a_sentinel", ",", "condition_fn", "=", "a_sentinel", "if", "not", "condition_fn", "(", "source_list", ")", ":", "continue", "try", ":", "sentinel_locations", ".", "append", "(", "source_list", ".", "index", "(", "a_sentinel", ")", ")", "except", "ValueError", ":", "pass", "if", "sentinel_locations", ":", "min_index", "=", "min", "(", "sentinel_locations", ")", "debug_notes", ".", "append", "(", "'sentinel; starting at \"{}\" index {}'", ".", "format", "(", "source_list", "[", "min_index", "]", ",", "min_index", ")", ")", "source_list", "=", "source_list", "[", "min_index", ":", "]", "# Get all the relevant frame signatures. Note that these function signatures", "# have already been normalized at this point.", "new_signature_list", "=", "[", "]", "for", "a_signature", "in", "source_list", ":", "# If the signature matches the irrelevant signatures regex, skip to the next frame.", "if", "self", ".", "irrelevant_signature_re", ".", "match", "(", "a_signature", ")", ":", "debug_notes", ".", "append", "(", "'irrelevant; ignoring: \"{}\"'", ".", "format", "(", "a_signature", ")", ")", "continue", "# If the frame signature is a dll, remove the @xxxxx part.", "if", "'.dll'", "in", "a_signature", ".", "lower", "(", ")", ":", "a_signature", "=", "a_signature", ".", "split", "(", "'@'", ")", "[", "0", "]", "# If this trimmed DLL signature is the same as the previous frame's, skip it.", "if", "new_signature_list", "and", "a_signature", "==", "new_signature_list", "[", "-", "1", "]", ":", "continue", "new_signature_list", ".", "append", "(", "a_signature", ")", "# If the signature does not match the prefix signatures regex, then it is the last", "# one we add to the list.", "if", "not", "self", ".", "prefix_signature_re", ".", "match", "(", "a_signature", ")", ":", "debug_notes", ".", "append", "(", "'not a prefix; stop: \"{}\"'", ".", "format", "(", "a_signature", ")", ")", "break", "debug_notes", ".", "append", "(", "'prefix; continue iterating: \"{}\"'", ".", "format", "(", "a_signature", ")", ")", "# Add a special marker for hang crash reports.", "if", "hang_type", ":", "debug_notes", ".", "append", "(", "'hang_type {}: prepending {}'", ".", "format", "(", "hang_type", ",", "self", ".", "hang_prefixes", "[", "hang_type", "]", ")", ")", "new_signature_list", ".", "insert", "(", "0", ",", "self", ".", "hang_prefixes", "[", "hang_type", "]", ")", "signature", "=", "delimiter", ".", "join", "(", "new_signature_list", ")", "# Handle empty signatures to explain why we failed generating them.", "if", "signature", "==", "''", "or", "signature", "is", "None", ":", "if", "crashed_thread", "is", "None", ":", "notes", ".", "append", "(", "\"CSignatureTool: No signature could be created because we do not know which \"", "\"thread crashed\"", ")", "signature", "=", "\"EMPTY: no crashing thread identified\"", "else", ":", "notes", ".", "append", "(", "\"CSignatureTool: No proper signature could be created because no good data \"", "\"for the crashing thread ({}) was found\"", ".", "format", "(", "crashed_thread", ")", ")", "try", ":", "signature", "=", "source_list", "[", "0", "]", "except", "IndexError", ":", "signature", "=", "\"EMPTY: no frame data available\"", "return", "signature", ",", "notes", ",", "debug_notes" ]
each element of signatureList names a frame in the crash stack; and is: - a prefix of a relevant frame: Append this element to the signature - a relevant frame: Append this element and stop looking - irrelevant: Append this element only after seeing a prefix frame The signature is a ' | ' separated string of frame names.
[ "each", "element", "of", "signatureList", "names", "a", "frame", "in", "the", "crash", "stack", ";", "and", "is", ":", "-", "a", "prefix", "of", "a", "relevant", "frame", ":", "Append", "this", "element", "to", "the", "signature", "-", "a", "relevant", "frame", ":", "Append", "this", "element", "and", "stop", "looking", "-", "irrelevant", ":", "Append", "this", "element", "only", "after", "seeing", "a", "prefix", "frame", "The", "signature", "is", "a", "|", "separated", "string", "of", "frame", "names", "." ]
train
https://github.com/willkg/socorro-siggen/blob/db7e3233e665a458a961c48da22e93a69b1d08d6/siggen/rules.py#L268-L351
jkahn/islex
islex/tokens.py
_clean_tag
def _clean_tag(t): """Fix up some garbage errors.""" # TODO: when score present, include info. t = _scored_patt.sub(string=t, repl='') if t == '_country_' or t.startswith('_country:'): t = 'nnp_country' elif t == 'vpb': t = 'vb' # "carjack" is listed with vpb tag. elif t == 'nnd': t = 'nns' # "abbes" is listed with nnd tag. elif t == 'nns_root:': t = 'nns' # 'micros' is listed as nns_root. elif t == 'root:zygote': t = 'nn' # 'root:zygote' for zygote. :-/ elif t.startswith('root:'): t = 'uh' # Don't know why, but these are all UH tokens. elif t in ('abbr_united_states_marine_corps', 'abbr_orange_juice'): t = "abbreviation" elif t == '+abbreviation': t = 'abbreviation' elif t.startswith('fw_misspelling:'): t = 'fw' return t
python
def _clean_tag(t): """Fix up some garbage errors.""" # TODO: when score present, include info. t = _scored_patt.sub(string=t, repl='') if t == '_country_' or t.startswith('_country:'): t = 'nnp_country' elif t == 'vpb': t = 'vb' # "carjack" is listed with vpb tag. elif t == 'nnd': t = 'nns' # "abbes" is listed with nnd tag. elif t == 'nns_root:': t = 'nns' # 'micros' is listed as nns_root. elif t == 'root:zygote': t = 'nn' # 'root:zygote' for zygote. :-/ elif t.startswith('root:'): t = 'uh' # Don't know why, but these are all UH tokens. elif t in ('abbr_united_states_marine_corps', 'abbr_orange_juice'): t = "abbreviation" elif t == '+abbreviation': t = 'abbreviation' elif t.startswith('fw_misspelling:'): t = 'fw' return t
[ "def", "_clean_tag", "(", "t", ")", ":", "# TODO: when score present, include info.", "t", "=", "_scored_patt", ".", "sub", "(", "string", "=", "t", ",", "repl", "=", "''", ")", "if", "t", "==", "'_country_'", "or", "t", ".", "startswith", "(", "'_country:'", ")", ":", "t", "=", "'nnp_country'", "elif", "t", "==", "'vpb'", ":", "t", "=", "'vb'", "# \"carjack\" is listed with vpb tag.", "elif", "t", "==", "'nnd'", ":", "t", "=", "'nns'", "# \"abbes\" is listed with nnd tag.", "elif", "t", "==", "'nns_root:'", ":", "t", "=", "'nns'", "# 'micros' is listed as nns_root.", "elif", "t", "==", "'root:zygote'", ":", "t", "=", "'nn'", "# 'root:zygote' for zygote. :-/", "elif", "t", ".", "startswith", "(", "'root:'", ")", ":", "t", "=", "'uh'", "# Don't know why, but these are all UH tokens.", "elif", "t", "in", "(", "'abbr_united_states_marine_corps'", ",", "'abbr_orange_juice'", ")", ":", "t", "=", "\"abbreviation\"", "elif", "t", "==", "'+abbreviation'", ":", "t", "=", "'abbreviation'", "elif", "t", ".", "startswith", "(", "'fw_misspelling:'", ")", ":", "t", "=", "'fw'", "return", "t" ]
Fix up some garbage errors.
[ "Fix", "up", "some", "garbage", "errors", "." ]
train
https://github.com/jkahn/islex/blob/c60fee062e9ebe34a3bef338539749463e47faf0/islex/tokens.py#L91-L113
openstax/pyramid_sawing
pyramid_sawing/utils.py
local_settings
def local_settings(settings, prefix): """Localizes the settings for the dotted prefix. For example, if the prefix where 'xyz':: {'xyz.foo': 'bar', 'other': 'something'} Would become:: {'foo': 'bar'} Note, that non-prefixed items are left out and the prefix is dropped. """ prefix = "{}.".format(prefix) new_settings = {k[len(prefix):]: v for k, v in settings.items() if k.startswith(prefix)} return new_settings
python
def local_settings(settings, prefix): """Localizes the settings for the dotted prefix. For example, if the prefix where 'xyz':: {'xyz.foo': 'bar', 'other': 'something'} Would become:: {'foo': 'bar'} Note, that non-prefixed items are left out and the prefix is dropped. """ prefix = "{}.".format(prefix) new_settings = {k[len(prefix):]: v for k, v in settings.items() if k.startswith(prefix)} return new_settings
[ "def", "local_settings", "(", "settings", ",", "prefix", ")", ":", "prefix", "=", "\"{}.\"", ".", "format", "(", "prefix", ")", "new_settings", "=", "{", "k", "[", "len", "(", "prefix", ")", ":", "]", ":", "v", "for", "k", ",", "v", "in", "settings", ".", "items", "(", ")", "if", "k", ".", "startswith", "(", "prefix", ")", "}", "return", "new_settings" ]
Localizes the settings for the dotted prefix. For example, if the prefix where 'xyz':: {'xyz.foo': 'bar', 'other': 'something'} Would become:: {'foo': 'bar'} Note, that non-prefixed items are left out and the prefix is dropped.
[ "Localizes", "the", "settings", "for", "the", "dotted", "prefix", ".", "For", "example", "if", "the", "prefix", "where", "xyz", "::" ]
train
https://github.com/openstax/pyramid_sawing/blob/d2ac7faf30c1517ed4621b8e62b7848e926078b9/pyramid_sawing/utils.py#L13-L28
JohnDoee/thomas
thomas/humanize.py
humanize_bytes
def humanize_bytes(bytes, precision=1): """Return a humanized string representation of a number of bytes. Assumes `from __future__ import division`. >>> humanize_bytes(1) '1 byte' >>> humanize_bytes(1024) '1.0 kB' >>> humanize_bytes(1024*123) '123.0 kB' >>> humanize_bytes(1024*12342) '12.1 MB' >>> humanize_bytes(1024*12342,2) '12.05 MB' >>> humanize_bytes(1024*1234,2) '1.21 MB' >>> humanize_bytes(1024*1234*1111,2) '1.31 GB' >>> humanize_bytes(1024*1234*1111,1) '1.3 GB' """ abbrevs = ( (1<<50, 'PB'), (1<<40, 'TB'), (1<<30, 'GB'), (1<<20, 'MB'), (1<<10, 'kB'), (1, 'bytes') ) if bytes == 1: return '1 byte' for factor, suffix in abbrevs: if bytes >= factor: break return '%.*f %s' % (precision, bytes / factor, suffix)
python
def humanize_bytes(bytes, precision=1): """Return a humanized string representation of a number of bytes. Assumes `from __future__ import division`. >>> humanize_bytes(1) '1 byte' >>> humanize_bytes(1024) '1.0 kB' >>> humanize_bytes(1024*123) '123.0 kB' >>> humanize_bytes(1024*12342) '12.1 MB' >>> humanize_bytes(1024*12342,2) '12.05 MB' >>> humanize_bytes(1024*1234,2) '1.21 MB' >>> humanize_bytes(1024*1234*1111,2) '1.31 GB' >>> humanize_bytes(1024*1234*1111,1) '1.3 GB' """ abbrevs = ( (1<<50, 'PB'), (1<<40, 'TB'), (1<<30, 'GB'), (1<<20, 'MB'), (1<<10, 'kB'), (1, 'bytes') ) if bytes == 1: return '1 byte' for factor, suffix in abbrevs: if bytes >= factor: break return '%.*f %s' % (precision, bytes / factor, suffix)
[ "def", "humanize_bytes", "(", "bytes", ",", "precision", "=", "1", ")", ":", "abbrevs", "=", "(", "(", "1", "<<", "50", ",", "'PB'", ")", ",", "(", "1", "<<", "40", ",", "'TB'", ")", ",", "(", "1", "<<", "30", ",", "'GB'", ")", ",", "(", "1", "<<", "20", ",", "'MB'", ")", ",", "(", "1", "<<", "10", ",", "'kB'", ")", ",", "(", "1", ",", "'bytes'", ")", ")", "if", "bytes", "==", "1", ":", "return", "'1 byte'", "for", "factor", ",", "suffix", "in", "abbrevs", ":", "if", "bytes", ">=", "factor", ":", "break", "return", "'%.*f %s'", "%", "(", "precision", ",", "bytes", "/", "factor", ",", "suffix", ")" ]
Return a humanized string representation of a number of bytes. Assumes `from __future__ import division`. >>> humanize_bytes(1) '1 byte' >>> humanize_bytes(1024) '1.0 kB' >>> humanize_bytes(1024*123) '123.0 kB' >>> humanize_bytes(1024*12342) '12.1 MB' >>> humanize_bytes(1024*12342,2) '12.05 MB' >>> humanize_bytes(1024*1234,2) '1.21 MB' >>> humanize_bytes(1024*1234*1111,2) '1.31 GB' >>> humanize_bytes(1024*1234*1111,1) '1.3 GB'
[ "Return", "a", "humanized", "string", "representation", "of", "a", "number", "of", "bytes", "." ]
train
https://github.com/JohnDoee/thomas/blob/51916dd110098b189a1c2fbcb71794fd9ec94832/thomas/humanize.py#L7-L42
BlackEarth/bl
bl/url.py
URL.parent
def parent(self): """return the parent URL, with params, query, and fragment in place""" path = '/'.join(self.path.split('/')[:-1]) s = path.strip('/').split(':') if len(s)==2 and s[1]=='': return None else: return self.__class__(self, path=path)
python
def parent(self): """return the parent URL, with params, query, and fragment in place""" path = '/'.join(self.path.split('/')[:-1]) s = path.strip('/').split(':') if len(s)==2 and s[1]=='': return None else: return self.__class__(self, path=path)
[ "def", "parent", "(", "self", ")", ":", "path", "=", "'/'", ".", "join", "(", "self", ".", "path", ".", "split", "(", "'/'", ")", "[", ":", "-", "1", "]", ")", "s", "=", "path", ".", "strip", "(", "'/'", ")", ".", "split", "(", "':'", ")", "if", "len", "(", "s", ")", "==", "2", "and", "s", "[", "1", "]", "==", "''", ":", "return", "None", "else", ":", "return", "self", ".", "__class__", "(", "self", ",", "path", "=", "path", ")" ]
return the parent URL, with params, query, and fragment in place
[ "return", "the", "parent", "URL", "with", "params", "query", "and", "fragment", "in", "place" ]
train
https://github.com/BlackEarth/bl/blob/edf6f37dac718987260b90ad0e7f7fe084a7c1a3/bl/url.py#L100-L107
BlackEarth/bl
bl/url.py
URL.join
def join(C, *args, **kwargs): """join a list of url elements, and include any keyword arguments, as a new URL""" u = C('/'.join([str(arg).strip('/') for arg in args]), **kwargs) return u
python
def join(C, *args, **kwargs): """join a list of url elements, and include any keyword arguments, as a new URL""" u = C('/'.join([str(arg).strip('/') for arg in args]), **kwargs) return u
[ "def", "join", "(", "C", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "u", "=", "C", "(", "'/'", ".", "join", "(", "[", "str", "(", "arg", ")", ".", "strip", "(", "'/'", ")", "for", "arg", "in", "args", "]", ")", ",", "*", "*", "kwargs", ")", "return", "u" ]
join a list of url elements, and include any keyword arguments, as a new URL
[ "join", "a", "list", "of", "url", "elements", "and", "include", "any", "keyword", "arguments", "as", "a", "new", "URL" ]
train
https://github.com/BlackEarth/bl/blob/edf6f37dac718987260b90ad0e7f7fe084a7c1a3/bl/url.py#L150-L153
teepark/junction
junction/hooks.py
select_peer
def select_peer(peer_addrs, service, routing_id, method): '''Choose a target from the available peers for a singular message :param peer_addrs: the ``(host, port)``s of the peers eligible to handle the RPC, and possibly a ``None`` entry if this hub can handle it locally :type peer_addrs: list :param service: the service of the message :type service: anything hash-able :param routing_id: the routing_id of the message :type routing_id: int :param method: the message method name :type method: string :returns: one of the provided peer_addrs There is no reason to call this method directly, but it may be useful to override it in a Hub subclass. This default implementation uses ``None`` if it is available (prefer local handling), then falls back to a random selection. ''' if any(p is None for p in peer_addrs): return None return random.choice(peer_addrs)
python
def select_peer(peer_addrs, service, routing_id, method): '''Choose a target from the available peers for a singular message :param peer_addrs: the ``(host, port)``s of the peers eligible to handle the RPC, and possibly a ``None`` entry if this hub can handle it locally :type peer_addrs: list :param service: the service of the message :type service: anything hash-able :param routing_id: the routing_id of the message :type routing_id: int :param method: the message method name :type method: string :returns: one of the provided peer_addrs There is no reason to call this method directly, but it may be useful to override it in a Hub subclass. This default implementation uses ``None`` if it is available (prefer local handling), then falls back to a random selection. ''' if any(p is None for p in peer_addrs): return None return random.choice(peer_addrs)
[ "def", "select_peer", "(", "peer_addrs", ",", "service", ",", "routing_id", ",", "method", ")", ":", "if", "any", "(", "p", "is", "None", "for", "p", "in", "peer_addrs", ")", ":", "return", "None", "return", "random", ".", "choice", "(", "peer_addrs", ")" ]
Choose a target from the available peers for a singular message :param peer_addrs: the ``(host, port)``s of the peers eligible to handle the RPC, and possibly a ``None`` entry if this hub can handle it locally :type peer_addrs: list :param service: the service of the message :type service: anything hash-able :param routing_id: the routing_id of the message :type routing_id: int :param method: the message method name :type method: string :returns: one of the provided peer_addrs There is no reason to call this method directly, but it may be useful to override it in a Hub subclass. This default implementation uses ``None`` if it is available (prefer local handling), then falls back to a random selection.
[ "Choose", "a", "target", "from", "the", "available", "peers", "for", "a", "singular", "message" ]
train
https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/hooks.py#L8-L32
storax/jinjaapidoc
src/jinjaapidoc/__init__.py
setup
def setup(app): """Setup the sphinx extension This will setup autodoc and autosummary. Add the :class:`ext.ModDocstringDocumenter`. Add the config values. Connect builder-inited event to :func:`gendoc.main`. :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :returns: None :rtype: None :raises: None """ # Connect before autosummary app.connect('builder-inited', gendoc.main) app.setup_extension('sphinx.ext.autodoc') app.setup_extension('sphinx.ext.autosummary') app.add_autodocumenter(ext.ModDocstringDocumenter) app.add_config_value('jinjaapi_outputdir', '', 'env') app.add_config_value('jinjaapi_nodelete', True, 'env') app.add_config_value('jinjaapi_srcdir', '', 'env') app.add_config_value('jinjaapi_exclude_paths', [], 'env') app.add_config_value('jinjaapi_force', True, 'env') app.add_config_value('jinjaapi_followlinks', True, 'env') app.add_config_value('jinjaapi_dryrun', False, 'env') app.add_config_value('jinjaapi_includeprivate', True, 'env') app.add_config_value('jinjaapi_addsummarytemplate', True, 'env') app.add_config_value('jinjaapi_include_from_all', True, 'env') return {'version': __version__, 'parallel_read_safe': True}
python
def setup(app): """Setup the sphinx extension This will setup autodoc and autosummary. Add the :class:`ext.ModDocstringDocumenter`. Add the config values. Connect builder-inited event to :func:`gendoc.main`. :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :returns: None :rtype: None :raises: None """ # Connect before autosummary app.connect('builder-inited', gendoc.main) app.setup_extension('sphinx.ext.autodoc') app.setup_extension('sphinx.ext.autosummary') app.add_autodocumenter(ext.ModDocstringDocumenter) app.add_config_value('jinjaapi_outputdir', '', 'env') app.add_config_value('jinjaapi_nodelete', True, 'env') app.add_config_value('jinjaapi_srcdir', '', 'env') app.add_config_value('jinjaapi_exclude_paths', [], 'env') app.add_config_value('jinjaapi_force', True, 'env') app.add_config_value('jinjaapi_followlinks', True, 'env') app.add_config_value('jinjaapi_dryrun', False, 'env') app.add_config_value('jinjaapi_includeprivate', True, 'env') app.add_config_value('jinjaapi_addsummarytemplate', True, 'env') app.add_config_value('jinjaapi_include_from_all', True, 'env') return {'version': __version__, 'parallel_read_safe': True}
[ "def", "setup", "(", "app", ")", ":", "# Connect before autosummary\r", "app", ".", "connect", "(", "'builder-inited'", ",", "gendoc", ".", "main", ")", "app", ".", "setup_extension", "(", "'sphinx.ext.autodoc'", ")", "app", ".", "setup_extension", "(", "'sphinx.ext.autosummary'", ")", "app", ".", "add_autodocumenter", "(", "ext", ".", "ModDocstringDocumenter", ")", "app", ".", "add_config_value", "(", "'jinjaapi_outputdir'", ",", "''", ",", "'env'", ")", "app", ".", "add_config_value", "(", "'jinjaapi_nodelete'", ",", "True", ",", "'env'", ")", "app", ".", "add_config_value", "(", "'jinjaapi_srcdir'", ",", "''", ",", "'env'", ")", "app", ".", "add_config_value", "(", "'jinjaapi_exclude_paths'", ",", "[", "]", ",", "'env'", ")", "app", ".", "add_config_value", "(", "'jinjaapi_force'", ",", "True", ",", "'env'", ")", "app", ".", "add_config_value", "(", "'jinjaapi_followlinks'", ",", "True", ",", "'env'", ")", "app", ".", "add_config_value", "(", "'jinjaapi_dryrun'", ",", "False", ",", "'env'", ")", "app", ".", "add_config_value", "(", "'jinjaapi_includeprivate'", ",", "True", ",", "'env'", ")", "app", ".", "add_config_value", "(", "'jinjaapi_addsummarytemplate'", ",", "True", ",", "'env'", ")", "app", ".", "add_config_value", "(", "'jinjaapi_include_from_all'", ",", "True", ",", "'env'", ")", "return", "{", "'version'", ":", "__version__", ",", "'parallel_read_safe'", ":", "True", "}" ]
Setup the sphinx extension This will setup autodoc and autosummary. Add the :class:`ext.ModDocstringDocumenter`. Add the config values. Connect builder-inited event to :func:`gendoc.main`. :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :returns: None :rtype: None :raises: None
[ "Setup", "the", "sphinx", "extension", "This", "will", "setup", "autodoc", "and", "autosummary", ".", "Add", "the", ":", "class", ":", "ext", ".", "ModDocstringDocumenter", ".", "Add", "the", "config", "values", ".", "Connect", "builder", "-", "inited", "event", "to", ":", "func", ":", "gendoc", ".", "main", ".", ":", "param", "app", ":", "the", "sphinx", "app", ":", "type", "app", ":", ":", "class", ":", "sphinx", ".", "application", ".", "Sphinx", ":", "returns", ":", "None", ":", "rtype", ":", "None", ":", "raises", ":", "None" ]
train
https://github.com/storax/jinjaapidoc/blob/f1eeb6ab5bd1a96c4130306718c6423f37c76856/src/jinjaapidoc/__init__.py#L9-L42
BlackEarth/bl
bl/progress.py
Progress.start
def start(self, key=None, **params): """initialize process timing for the current stack""" self.params.update(**params) key = key or self.stack_key if key is not None: self.current_times[key] = time()
python
def start(self, key=None, **params): """initialize process timing for the current stack""" self.params.update(**params) key = key or self.stack_key if key is not None: self.current_times[key] = time()
[ "def", "start", "(", "self", ",", "key", "=", "None", ",", "*", "*", "params", ")", ":", "self", ".", "params", ".", "update", "(", "*", "*", "params", ")", "key", "=", "key", "or", "self", ".", "stack_key", "if", "key", "is", "not", "None", ":", "self", ".", "current_times", "[", "key", "]", "=", "time", "(", ")" ]
initialize process timing for the current stack
[ "initialize", "process", "timing", "for", "the", "current", "stack" ]
train
https://github.com/BlackEarth/bl/blob/edf6f37dac718987260b90ad0e7f7fe084a7c1a3/bl/progress.py#L19-L24
BlackEarth/bl
bl/progress.py
Progress.report
def report(self, fraction=None): """report the total progress for the current stack, optionally given the local fraction completed. fraction=None: if given, used as the fraction of the local method so far completed. runtimes=None: if given, used as the expected runtimes for the current stack. """ r = Dict() local_key = self.stack_key if local_key is None: return {} runtimes = self.runtimes() for key in self.stack_keys: if self.current_times.get(key) is None: self.start(key=key) runtime = runtimes.get(key) or self.runtime(key) if key == local_key and fraction is not None: r[key] = fraction elif runtime is not None: r[key] = (time() - self.current_times[key]) / runtime return r
python
def report(self, fraction=None): """report the total progress for the current stack, optionally given the local fraction completed. fraction=None: if given, used as the fraction of the local method so far completed. runtimes=None: if given, used as the expected runtimes for the current stack. """ r = Dict() local_key = self.stack_key if local_key is None: return {} runtimes = self.runtimes() for key in self.stack_keys: if self.current_times.get(key) is None: self.start(key=key) runtime = runtimes.get(key) or self.runtime(key) if key == local_key and fraction is not None: r[key] = fraction elif runtime is not None: r[key] = (time() - self.current_times[key]) / runtime return r
[ "def", "report", "(", "self", ",", "fraction", "=", "None", ")", ":", "r", "=", "Dict", "(", ")", "local_key", "=", "self", ".", "stack_key", "if", "local_key", "is", "None", ":", "return", "{", "}", "runtimes", "=", "self", ".", "runtimes", "(", ")", "for", "key", "in", "self", ".", "stack_keys", ":", "if", "self", ".", "current_times", ".", "get", "(", "key", ")", "is", "None", ":", "self", ".", "start", "(", "key", "=", "key", ")", "runtime", "=", "runtimes", ".", "get", "(", "key", ")", "or", "self", ".", "runtime", "(", "key", ")", "if", "key", "==", "local_key", "and", "fraction", "is", "not", "None", ":", "r", "[", "key", "]", "=", "fraction", "elif", "runtime", "is", "not", "None", ":", "r", "[", "key", "]", "=", "(", "time", "(", ")", "-", "self", ".", "current_times", "[", "key", "]", ")", "/", "runtime", "return", "r" ]
report the total progress for the current stack, optionally given the local fraction completed. fraction=None: if given, used as the fraction of the local method so far completed. runtimes=None: if given, used as the expected runtimes for the current stack.
[ "report", "the", "total", "progress", "for", "the", "current", "stack", "optionally", "given", "the", "local", "fraction", "completed", ".", "fraction", "=", "None", ":", "if", "given", "used", "as", "the", "fraction", "of", "the", "local", "method", "so", "far", "completed", ".", "runtimes", "=", "None", ":", "if", "given", "used", "as", "the", "expected", "runtimes", "for", "the", "current", "stack", "." ]
train
https://github.com/BlackEarth/bl/blob/edf6f37dac718987260b90ad0e7f7fe084a7c1a3/bl/progress.py#L34-L51
BlackEarth/bl
bl/progress.py
Progress.finish
def finish(self): """record the current stack process as finished""" self.report(fraction=1.0) key = self.stack_key if key is not None: if self.data.get(key) is None: self.data[key] = [] start_time = self.current_times.get(key) or time() self.data[key].append(Dict(runtime=time()-start_time, **self.params))
python
def finish(self): """record the current stack process as finished""" self.report(fraction=1.0) key = self.stack_key if key is not None: if self.data.get(key) is None: self.data[key] = [] start_time = self.current_times.get(key) or time() self.data[key].append(Dict(runtime=time()-start_time, **self.params))
[ "def", "finish", "(", "self", ")", ":", "self", ".", "report", "(", "fraction", "=", "1.0", ")", "key", "=", "self", ".", "stack_key", "if", "key", "is", "not", "None", ":", "if", "self", ".", "data", ".", "get", "(", "key", ")", "is", "None", ":", "self", ".", "data", "[", "key", "]", "=", "[", "]", "start_time", "=", "self", ".", "current_times", ".", "get", "(", "key", ")", "or", "time", "(", ")", "self", ".", "data", "[", "key", "]", ".", "append", "(", "Dict", "(", "runtime", "=", "time", "(", ")", "-", "start_time", ",", "*", "*", "self", ".", "params", ")", ")" ]
record the current stack process as finished
[ "record", "the", "current", "stack", "process", "as", "finished" ]
train
https://github.com/BlackEarth/bl/blob/edf6f37dac718987260b90ad0e7f7fe084a7c1a3/bl/progress.py#L53-L61
storax/jinjaapidoc
src/jinjaapidoc/ext.py
ModDocstringDocumenter.add_directive_header
def add_directive_header(self, sig): """Add the directive header and options to the generated content.""" domain = getattr(self, 'domain', 'py') directive = getattr(self, 'directivetype', "module") name = self.format_name() self.add_line(u'.. %s:%s:: %s%s' % (domain, directive, name, sig), '<autodoc>') if self.options.noindex: self.add_line(u' :noindex:', '<autodoc>') if self.objpath: # Be explicit about the module, this is necessary since .. class:: # etc. don't support a prepended module name self.add_line(u' :module: %s' % self.modname, '<autodoc>')
python
def add_directive_header(self, sig): """Add the directive header and options to the generated content.""" domain = getattr(self, 'domain', 'py') directive = getattr(self, 'directivetype', "module") name = self.format_name() self.add_line(u'.. %s:%s:: %s%s' % (domain, directive, name, sig), '<autodoc>') if self.options.noindex: self.add_line(u' :noindex:', '<autodoc>') if self.objpath: # Be explicit about the module, this is necessary since .. class:: # etc. don't support a prepended module name self.add_line(u' :module: %s' % self.modname, '<autodoc>')
[ "def", "add_directive_header", "(", "self", ",", "sig", ")", ":", "domain", "=", "getattr", "(", "self", ",", "'domain'", ",", "'py'", ")", "directive", "=", "getattr", "(", "self", ",", "'directivetype'", ",", "\"module\"", ")", "name", "=", "self", ".", "format_name", "(", ")", "self", ".", "add_line", "(", "u'.. %s:%s:: %s%s'", "%", "(", "domain", ",", "directive", ",", "name", ",", "sig", ")", ",", "'<autodoc>'", ")", "if", "self", ".", "options", ".", "noindex", ":", "self", ".", "add_line", "(", "u' :noindex:'", ",", "'<autodoc>'", ")", "if", "self", ".", "objpath", ":", "# Be explicit about the module, this is necessary since .. class::", "# etc. don't support a prepended module name", "self", ".", "add_line", "(", "u' :module: %s'", "%", "self", ".", "modname", ",", "'<autodoc>'", ")" ]
Add the directive header and options to the generated content.
[ "Add", "the", "directive", "header", "and", "options", "to", "the", "generated", "content", "." ]
train
https://github.com/storax/jinjaapidoc/blob/f1eeb6ab5bd1a96c4130306718c6423f37c76856/src/jinjaapidoc/ext.py#L13-L25
teepark/junction
junction/client.py
Client.connect
def connect(self): "Initiate the connection to a proxying hub" log.info("connecting") # don't have the connection attempt reconnects, because when it goes # down we are going to cycle to the next potential peer from the Client self._peer = connection.Peer( None, self._dispatcher, self._addrs.popleft(), backend.Socket(), reconnect=False) self._peer.start()
python
def connect(self): "Initiate the connection to a proxying hub" log.info("connecting") # don't have the connection attempt reconnects, because when it goes # down we are going to cycle to the next potential peer from the Client self._peer = connection.Peer( None, self._dispatcher, self._addrs.popleft(), backend.Socket(), reconnect=False) self._peer.start()
[ "def", "connect", "(", "self", ")", ":", "log", ".", "info", "(", "\"connecting\"", ")", "# don't have the connection attempt reconnects, because when it goes", "# down we are going to cycle to the next potential peer from the Client", "self", ".", "_peer", "=", "connection", ".", "Peer", "(", "None", ",", "self", ".", "_dispatcher", ",", "self", ".", "_addrs", ".", "popleft", "(", ")", ",", "backend", ".", "Socket", "(", ")", ",", "reconnect", "=", "False", ")", "self", ".", "_peer", ".", "start", "(", ")" ]
Initiate the connection to a proxying hub
[ "Initiate", "the", "connection", "to", "a", "proxying", "hub" ]
train
https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/client.py#L29-L38
teepark/junction
junction/client.py
Client.wait_connected
def wait_connected(self, timeout=None): '''Wait for connections to be made and their handshakes to finish :param timeout: maximum time to wait in seconds. with None, there is no timeout. :type timeout: float or None :returns: ``True`` if all connections were made, ``False`` if one or more failed. ''' result = self._peer.wait_connected(timeout) if not result: if timeout is not None: log.warn("connect wait timed out after %.2f seconds" % timeout) return result
python
def wait_connected(self, timeout=None): '''Wait for connections to be made and their handshakes to finish :param timeout: maximum time to wait in seconds. with None, there is no timeout. :type timeout: float or None :returns: ``True`` if all connections were made, ``False`` if one or more failed. ''' result = self._peer.wait_connected(timeout) if not result: if timeout is not None: log.warn("connect wait timed out after %.2f seconds" % timeout) return result
[ "def", "wait_connected", "(", "self", ",", "timeout", "=", "None", ")", ":", "result", "=", "self", ".", "_peer", ".", "wait_connected", "(", "timeout", ")", "if", "not", "result", ":", "if", "timeout", "is", "not", "None", ":", "log", ".", "warn", "(", "\"connect wait timed out after %.2f seconds\"", "%", "timeout", ")", "return", "result" ]
Wait for connections to be made and their handshakes to finish :param timeout: maximum time to wait in seconds. with None, there is no timeout. :type timeout: float or None :returns: ``True`` if all connections were made, ``False`` if one or more failed.
[ "Wait", "for", "connections", "to", "be", "made", "and", "their", "handshakes", "to", "finish" ]
train
https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/client.py#L40-L55
teepark/junction
junction/client.py
Client.reset
def reset(self): "Close the current failed connection and prepare for a new one" log.info("resetting client") rpc_client = self._rpc_client self._addrs.append(self._peer.addr) self.__init__(self._addrs) self._rpc_client = rpc_client self._dispatcher.rpc_client = rpc_client rpc_client._client = weakref.ref(self)
python
def reset(self): "Close the current failed connection and prepare for a new one" log.info("resetting client") rpc_client = self._rpc_client self._addrs.append(self._peer.addr) self.__init__(self._addrs) self._rpc_client = rpc_client self._dispatcher.rpc_client = rpc_client rpc_client._client = weakref.ref(self)
[ "def", "reset", "(", "self", ")", ":", "log", ".", "info", "(", "\"resetting client\"", ")", "rpc_client", "=", "self", ".", "_rpc_client", "self", ".", "_addrs", ".", "append", "(", "self", ".", "_peer", ".", "addr", ")", "self", ".", "__init__", "(", "self", ".", "_addrs", ")", "self", ".", "_rpc_client", "=", "rpc_client", "self", ".", "_dispatcher", ".", "rpc_client", "=", "rpc_client", "rpc_client", ".", "_client", "=", "weakref", ".", "ref", "(", "self", ")" ]
Close the current failed connection and prepare for a new one
[ "Close", "the", "current", "failed", "connection", "and", "prepare", "for", "a", "new", "one" ]
train
https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/client.py#L57-L65
teepark/junction
junction/client.py
Client.shutdown
def shutdown(self): 'Close the hub connection' log.info("shutting down") self._peer.go_down(reconnect=False, expected=True)
python
def shutdown(self): 'Close the hub connection' log.info("shutting down") self._peer.go_down(reconnect=False, expected=True)
[ "def", "shutdown", "(", "self", ")", ":", "log", ".", "info", "(", "\"shutting down\"", ")", "self", ".", "_peer", ".", "go_down", "(", "reconnect", "=", "False", ",", "expected", "=", "True", ")" ]
Close the hub connection
[ "Close", "the", "hub", "connection" ]
train
https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/client.py#L67-L70
teepark/junction
junction/client.py
Client.publish
def publish(self, service, routing_id, method, args=None, kwargs=None, broadcast=False): '''Send a 1-way message :param service: the service name (the routing top level) :type service: anything hash-able :param routing_id: The id used for routing within the registered handlers of the service. :type routing_id: int :param method: the method name to call :type method: string :param args: the positional arguments to send along with the request :type args: tuple :param kwargs: keyword arguments to send along with the request :type kwargs: dict :param broadcast: if ``True``, send to every peer with a matching subscription :type broadcast: bool :returns: None. use 'rpc' methods for requests with responses. :raises: :class:`Unroutable <junction.errors.Unroutable>` if the client doesn't have a connection to a hub ''' if not self._peer.up: raise errors.Unroutable() self._dispatcher.send_proxied_publish(service, routing_id, method, args or (), kwargs or {}, singular=not broadcast)
python
def publish(self, service, routing_id, method, args=None, kwargs=None, broadcast=False): '''Send a 1-way message :param service: the service name (the routing top level) :type service: anything hash-able :param routing_id: The id used for routing within the registered handlers of the service. :type routing_id: int :param method: the method name to call :type method: string :param args: the positional arguments to send along with the request :type args: tuple :param kwargs: keyword arguments to send along with the request :type kwargs: dict :param broadcast: if ``True``, send to every peer with a matching subscription :type broadcast: bool :returns: None. use 'rpc' methods for requests with responses. :raises: :class:`Unroutable <junction.errors.Unroutable>` if the client doesn't have a connection to a hub ''' if not self._peer.up: raise errors.Unroutable() self._dispatcher.send_proxied_publish(service, routing_id, method, args or (), kwargs or {}, singular=not broadcast)
[ "def", "publish", "(", "self", ",", "service", ",", "routing_id", ",", "method", ",", "args", "=", "None", ",", "kwargs", "=", "None", ",", "broadcast", "=", "False", ")", ":", "if", "not", "self", ".", "_peer", ".", "up", ":", "raise", "errors", ".", "Unroutable", "(", ")", "self", ".", "_dispatcher", ".", "send_proxied_publish", "(", "service", ",", "routing_id", ",", "method", ",", "args", "or", "(", ")", ",", "kwargs", "or", "{", "}", ",", "singular", "=", "not", "broadcast", ")" ]
Send a 1-way message :param service: the service name (the routing top level) :type service: anything hash-able :param routing_id: The id used for routing within the registered handlers of the service. :type routing_id: int :param method: the method name to call :type method: string :param args: the positional arguments to send along with the request :type args: tuple :param kwargs: keyword arguments to send along with the request :type kwargs: dict :param broadcast: if ``True``, send to every peer with a matching subscription :type broadcast: bool :returns: None. use 'rpc' methods for requests with responses. :raises: :class:`Unroutable <junction.errors.Unroutable>` if the client doesn't have a connection to a hub
[ "Send", "a", "1", "-", "way", "message" ]
train
https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/client.py#L72-L102
teepark/junction
junction/client.py
Client.publish_receiver_count
def publish_receiver_count( self, service, routing_id, method, timeout=None): '''Get the number of peers that would handle a particular publish This method will block until a response arrives :param service: the service name :type service: anything hash-able :param routing_id: the id used for narrowing within the service handlers :type routing_id: int :param method: the method name :type method: string :param timeout: maximum time to wait for the response :type timeout: int, float or None :raises: - :class:`Unroutable <junction.errors.Unroutable>` if no peers are registered to receive the message - :class:`WaitTimeout <junction.errors.WaitTimeout>` if a timeout was provided and it expires ''' if not self._peer.up: raise errors.Unroutable() return self._rpc_client.recipient_count(self._peer, const.MSG_TYPE_PUBLISH, service, routing_id, method).wait( timeout)[0]
python
def publish_receiver_count( self, service, routing_id, method, timeout=None): '''Get the number of peers that would handle a particular publish This method will block until a response arrives :param service: the service name :type service: anything hash-able :param routing_id: the id used for narrowing within the service handlers :type routing_id: int :param method: the method name :type method: string :param timeout: maximum time to wait for the response :type timeout: int, float or None :raises: - :class:`Unroutable <junction.errors.Unroutable>` if no peers are registered to receive the message - :class:`WaitTimeout <junction.errors.WaitTimeout>` if a timeout was provided and it expires ''' if not self._peer.up: raise errors.Unroutable() return self._rpc_client.recipient_count(self._peer, const.MSG_TYPE_PUBLISH, service, routing_id, method).wait( timeout)[0]
[ "def", "publish_receiver_count", "(", "self", ",", "service", ",", "routing_id", ",", "method", ",", "timeout", "=", "None", ")", ":", "if", "not", "self", ".", "_peer", ".", "up", ":", "raise", "errors", ".", "Unroutable", "(", ")", "return", "self", ".", "_rpc_client", ".", "recipient_count", "(", "self", ".", "_peer", ",", "const", ".", "MSG_TYPE_PUBLISH", ",", "service", ",", "routing_id", ",", "method", ")", ".", "wait", "(", "timeout", ")", "[", "0", "]" ]
Get the number of peers that would handle a particular publish This method will block until a response arrives :param service: the service name :type service: anything hash-able :param routing_id: the id used for narrowing within the service handlers :type routing_id: int :param method: the method name :type method: string :param timeout: maximum time to wait for the response :type timeout: int, float or None :raises: - :class:`Unroutable <junction.errors.Unroutable>` if no peers are registered to receive the message - :class:`WaitTimeout <junction.errors.WaitTimeout>` if a timeout was provided and it expires
[ "Get", "the", "number", "of", "peers", "that", "would", "handle", "a", "particular", "publish" ]
train
https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/client.py#L104-L131
teepark/junction
junction/client.py
Client.send_rpc
def send_rpc(self, service, routing_id, method, args=None, kwargs=None, broadcast=False): '''Send out an RPC request :param service: the service name (the routing top level) :type service: anything hash-able :param routing_id: The id used for routing within the registered handlers of the service. :type routing_id: int :param method: the method name to call :type method: string :param args: the positional arguments to send along with the request :type args: tuple :param kwargs: keyword arguments to send along with the request :type kwargs: dict :param broadcast: if ``True``, send to all peers with matching subscriptions :type broadcast: bool :returns: a :class:`RPC <junction.futures.RPC>` object representing the RPC and its future response. :raises: :class:`Unroutable <junction.errors.Unroutable>` if the client doesn't have a connection to a hub ''' if not self._peer.up: raise errors.Unroutable() return self._dispatcher.send_proxied_rpc(service, routing_id, method, args or (), kwargs or {}, not broadcast)
python
def send_rpc(self, service, routing_id, method, args=None, kwargs=None, broadcast=False): '''Send out an RPC request :param service: the service name (the routing top level) :type service: anything hash-able :param routing_id: The id used for routing within the registered handlers of the service. :type routing_id: int :param method: the method name to call :type method: string :param args: the positional arguments to send along with the request :type args: tuple :param kwargs: keyword arguments to send along with the request :type kwargs: dict :param broadcast: if ``True``, send to all peers with matching subscriptions :type broadcast: bool :returns: a :class:`RPC <junction.futures.RPC>` object representing the RPC and its future response. :raises: :class:`Unroutable <junction.errors.Unroutable>` if the client doesn't have a connection to a hub ''' if not self._peer.up: raise errors.Unroutable() return self._dispatcher.send_proxied_rpc(service, routing_id, method, args or (), kwargs or {}, not broadcast)
[ "def", "send_rpc", "(", "self", ",", "service", ",", "routing_id", ",", "method", ",", "args", "=", "None", ",", "kwargs", "=", "None", ",", "broadcast", "=", "False", ")", ":", "if", "not", "self", ".", "_peer", ".", "up", ":", "raise", "errors", ".", "Unroutable", "(", ")", "return", "self", ".", "_dispatcher", ".", "send_proxied_rpc", "(", "service", ",", "routing_id", ",", "method", ",", "args", "or", "(", ")", ",", "kwargs", "or", "{", "}", ",", "not", "broadcast", ")" ]
Send out an RPC request :param service: the service name (the routing top level) :type service: anything hash-able :param routing_id: The id used for routing within the registered handlers of the service. :type routing_id: int :param method: the method name to call :type method: string :param args: the positional arguments to send along with the request :type args: tuple :param kwargs: keyword arguments to send along with the request :type kwargs: dict :param broadcast: if ``True``, send to all peers with matching subscriptions :type broadcast: bool :returns: a :class:`RPC <junction.futures.RPC>` object representing the RPC and its future response. :raises: :class:`Unroutable <junction.errors.Unroutable>` if the client doesn't have a connection to a hub
[ "Send", "out", "an", "RPC", "request" ]
train
https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/client.py#L133-L165
teepark/junction
junction/client.py
Client.rpc_receiver_count
def rpc_receiver_count(self, service, routing_id, method, timeout=None): '''Get the number of peers that would handle a particular RPC This method will block until a response arrives :param service: the service name :type service: anything hash-able :param routing_id: the id used for narrowing within the service handlers :type routing_id: int :param method: the method name :type method: string :returns: the integer number of peers that would receive the described RPC :raises: - :class:`Unroutable <junction.errors.Unroutable>` if no peers are registered to receive the message - :class:`WaitTimeout <junction.errors.WaitTimeout>` if a timeout was provided and it expires ''' if not self._peer.up: raise errors.Unroutable() return self._rpc_client.recipient_count(self._peer, const.MSG_TYPE_RPC_REQUEST, service, routing_id, method).wait( timeout)[0]
python
def rpc_receiver_count(self, service, routing_id, method, timeout=None): '''Get the number of peers that would handle a particular RPC This method will block until a response arrives :param service: the service name :type service: anything hash-able :param routing_id: the id used for narrowing within the service handlers :type routing_id: int :param method: the method name :type method: string :returns: the integer number of peers that would receive the described RPC :raises: - :class:`Unroutable <junction.errors.Unroutable>` if no peers are registered to receive the message - :class:`WaitTimeout <junction.errors.WaitTimeout>` if a timeout was provided and it expires ''' if not self._peer.up: raise errors.Unroutable() return self._rpc_client.recipient_count(self._peer, const.MSG_TYPE_RPC_REQUEST, service, routing_id, method).wait( timeout)[0]
[ "def", "rpc_receiver_count", "(", "self", ",", "service", ",", "routing_id", ",", "method", ",", "timeout", "=", "None", ")", ":", "if", "not", "self", ".", "_peer", ".", "up", ":", "raise", "errors", ".", "Unroutable", "(", ")", "return", "self", ".", "_rpc_client", ".", "recipient_count", "(", "self", ".", "_peer", ",", "const", ".", "MSG_TYPE_RPC_REQUEST", ",", "service", ",", "routing_id", ",", "method", ")", ".", "wait", "(", "timeout", ")", "[", "0", "]" ]
Get the number of peers that would handle a particular RPC This method will block until a response arrives :param service: the service name :type service: anything hash-able :param routing_id: the id used for narrowing within the service handlers :type routing_id: int :param method: the method name :type method: string :returns: the integer number of peers that would receive the described RPC :raises: - :class:`Unroutable <junction.errors.Unroutable>` if no peers are registered to receive the message - :class:`WaitTimeout <junction.errors.WaitTimeout>` if a timeout was provided and it expires
[ "Get", "the", "number", "of", "peers", "that", "would", "handle", "a", "particular", "RPC" ]
train
https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/client.py#L207-L234
berlotto/restapi-client
restapi/__init__.py
Endpoint._headers
def _headers(self, others={}): """Return the default headers and others as necessary""" headers = { 'Content-Type': 'application/json' } for p in others.keys(): headers[p] = others[p] return headers
python
def _headers(self, others={}): """Return the default headers and others as necessary""" headers = { 'Content-Type': 'application/json' } for p in others.keys(): headers[p] = others[p] return headers
[ "def", "_headers", "(", "self", ",", "others", "=", "{", "}", ")", ":", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", "}", "for", "p", "in", "others", ".", "keys", "(", ")", ":", "headers", "[", "p", "]", "=", "others", "[", "p", "]", "return", "headers" ]
Return the default headers and others as necessary
[ "Return", "the", "default", "headers", "and", "others", "as", "necessary" ]
train
https://github.com/berlotto/restapi-client/blob/c35406c3fa8163fadb2e25ef5c2604d03569a6a1/restapi/__init__.py#L64-L72
Eyepea/tanto
monitoring_agent/configurator.py
read
def read(config_file, configspec, server_mode=False, default_section='default_settings', list_values=True): ''' Read the config file with spec validation ''' # configspec = ConfigObj(path.join(path.abspath(path.dirname(__file__)), configspec), # encoding='UTF8', # interpolation='Template', # list_values=False, # _inspec=True) config = ConfigObj(config_file, configspec=path.join(path.abspath(path.dirname(__file__)), configspec), list_values=list_values) validation = config.validate(validate.Validator(), preserve_errors=True) if validation == True: config = dict(config) for section in config: if section != default_section: if server_mode: # When it's a servers config file, retrieve the correct fqdn config[section]['availability'] = True if config[section]['custom_fqdn'] == None: config[section]['custom_fqdn'] = socket.getfqdn() for option in config[section]: # retrieve default configuration for missing values if config[section][option] == None: config[section][option] = config[default_section][option] del(config[default_section]) return config else: raise ConfiguratorException(config_file, validation)
python
def read(config_file, configspec, server_mode=False, default_section='default_settings', list_values=True): ''' Read the config file with spec validation ''' # configspec = ConfigObj(path.join(path.abspath(path.dirname(__file__)), configspec), # encoding='UTF8', # interpolation='Template', # list_values=False, # _inspec=True) config = ConfigObj(config_file, configspec=path.join(path.abspath(path.dirname(__file__)), configspec), list_values=list_values) validation = config.validate(validate.Validator(), preserve_errors=True) if validation == True: config = dict(config) for section in config: if section != default_section: if server_mode: # When it's a servers config file, retrieve the correct fqdn config[section]['availability'] = True if config[section]['custom_fqdn'] == None: config[section]['custom_fqdn'] = socket.getfqdn() for option in config[section]: # retrieve default configuration for missing values if config[section][option] == None: config[section][option] = config[default_section][option] del(config[default_section]) return config else: raise ConfiguratorException(config_file, validation)
[ "def", "read", "(", "config_file", ",", "configspec", ",", "server_mode", "=", "False", ",", "default_section", "=", "'default_settings'", ",", "list_values", "=", "True", ")", ":", "# configspec = ConfigObj(path.join(path.abspath(path.dirname(__file__)), configspec),", "# encoding='UTF8',", "# interpolation='Template',", "# list_values=False,", "# _inspec=True)", "config", "=", "ConfigObj", "(", "config_file", ",", "configspec", "=", "path", ".", "join", "(", "path", ".", "abspath", "(", "path", ".", "dirname", "(", "__file__", ")", ")", ",", "configspec", ")", ",", "list_values", "=", "list_values", ")", "validation", "=", "config", ".", "validate", "(", "validate", ".", "Validator", "(", ")", ",", "preserve_errors", "=", "True", ")", "if", "validation", "==", "True", ":", "config", "=", "dict", "(", "config", ")", "for", "section", "in", "config", ":", "if", "section", "!=", "default_section", ":", "if", "server_mode", ":", "# When it's a servers config file, retrieve the correct fqdn", "config", "[", "section", "]", "[", "'availability'", "]", "=", "True", "if", "config", "[", "section", "]", "[", "'custom_fqdn'", "]", "==", "None", ":", "config", "[", "section", "]", "[", "'custom_fqdn'", "]", "=", "socket", ".", "getfqdn", "(", ")", "for", "option", "in", "config", "[", "section", "]", ":", "# retrieve default configuration for missing values", "if", "config", "[", "section", "]", "[", "option", "]", "==", "None", ":", "config", "[", "section", "]", "[", "option", "]", "=", "config", "[", "default_section", "]", "[", "option", "]", "del", "(", "config", "[", "default_section", "]", ")", "return", "config", "else", ":", "raise", "ConfiguratorException", "(", "config_file", ",", "validation", ")" ]
Read the config file with spec validation
[ "Read", "the", "config", "file", "with", "spec", "validation" ]
train
https://github.com/Eyepea/tanto/blob/ad8fd32e0fd3b7bc3dee5dabb984a2567ae46fe9/monitoring_agent/configurator.py#L21-L52
pmacosta/pmisc
pmisc/strings.py
elapsed_time_string
def elapsed_time_string(start_time, stop_time): r""" Return a formatted string with the elapsed time between two time points. The string includes years (365 days), months (30 days), days (24 hours), hours (60 minutes), minutes (60 seconds) and seconds. If both arguments are equal, the string returned is :code:`'None'`; otherwise, the string returned is [YY year[s], [MM month[s], [DD day[s], [HH hour[s], [MM minute[s] [and SS second[s\]\]\]\]\]\]. Any part (year[s], month[s], etc.) is omitted if the value of that part is null/zero :param start_time: Starting time point :type start_time: `datetime <https://docs.python.org/3/library/ datetime.html#datetime-objects>`_ :param stop_time: Ending time point :type stop_time: `datetime` :rtype: string :raises: RuntimeError (Invalid time delta specification) For example: >>> import datetime, pmisc >>> start_time = datetime.datetime(2014, 1, 1, 1, 10, 1) >>> stop_time = datetime.datetime(2015, 1, 3, 1, 10, 3) >>> pmisc.elapsed_time_string(start_time, stop_time) '1 year, 2 days and 2 seconds' """ if start_time > stop_time: raise RuntimeError("Invalid time delta specification") delta_time = stop_time - start_time # Python 2.6 datetime objects do not have total_seconds() method tot_seconds = int( ( delta_time.microseconds + (delta_time.seconds + delta_time.days * 24 * 3600) * 10 ** 6 ) / 10 ** 6 ) years, remainder = divmod(tot_seconds, 365 * 24 * 60 * 60) months, remainder = divmod(remainder, 30 * 24 * 60 * 60) days, remainder = divmod(remainder, 24 * 60 * 60) hours, remainder = divmod(remainder, 60 * 60) minutes, seconds = divmod(remainder, 60) token_iter = zip( [years, months, days, hours, minutes, seconds], ["year", "month", "day", "hour", "minute", "second"], ) ret_list = [ "{token} {token_name}{plural}".format( token=num, token_name=desc, plural="s" if num > 1 else "" ) for num, desc in token_iter if num > 0 ] if not ret_list: return "None" if len(ret_list) == 1: return ret_list[0] if len(ret_list) == 2: return ret_list[0] + " and " + ret_list[1] return (", ".join(ret_list[0:-1])) + " and " + ret_list[-1]
python
def elapsed_time_string(start_time, stop_time): r""" Return a formatted string with the elapsed time between two time points. The string includes years (365 days), months (30 days), days (24 hours), hours (60 minutes), minutes (60 seconds) and seconds. If both arguments are equal, the string returned is :code:`'None'`; otherwise, the string returned is [YY year[s], [MM month[s], [DD day[s], [HH hour[s], [MM minute[s] [and SS second[s\]\]\]\]\]\]. Any part (year[s], month[s], etc.) is omitted if the value of that part is null/zero :param start_time: Starting time point :type start_time: `datetime <https://docs.python.org/3/library/ datetime.html#datetime-objects>`_ :param stop_time: Ending time point :type stop_time: `datetime` :rtype: string :raises: RuntimeError (Invalid time delta specification) For example: >>> import datetime, pmisc >>> start_time = datetime.datetime(2014, 1, 1, 1, 10, 1) >>> stop_time = datetime.datetime(2015, 1, 3, 1, 10, 3) >>> pmisc.elapsed_time_string(start_time, stop_time) '1 year, 2 days and 2 seconds' """ if start_time > stop_time: raise RuntimeError("Invalid time delta specification") delta_time = stop_time - start_time # Python 2.6 datetime objects do not have total_seconds() method tot_seconds = int( ( delta_time.microseconds + (delta_time.seconds + delta_time.days * 24 * 3600) * 10 ** 6 ) / 10 ** 6 ) years, remainder = divmod(tot_seconds, 365 * 24 * 60 * 60) months, remainder = divmod(remainder, 30 * 24 * 60 * 60) days, remainder = divmod(remainder, 24 * 60 * 60) hours, remainder = divmod(remainder, 60 * 60) minutes, seconds = divmod(remainder, 60) token_iter = zip( [years, months, days, hours, minutes, seconds], ["year", "month", "day", "hour", "minute", "second"], ) ret_list = [ "{token} {token_name}{plural}".format( token=num, token_name=desc, plural="s" if num > 1 else "" ) for num, desc in token_iter if num > 0 ] if not ret_list: return "None" if len(ret_list) == 1: return ret_list[0] if len(ret_list) == 2: return ret_list[0] + " and " + ret_list[1] return (", ".join(ret_list[0:-1])) + " and " + ret_list[-1]
[ "def", "elapsed_time_string", "(", "start_time", ",", "stop_time", ")", ":", "if", "start_time", ">", "stop_time", ":", "raise", "RuntimeError", "(", "\"Invalid time delta specification\"", ")", "delta_time", "=", "stop_time", "-", "start_time", "# Python 2.6 datetime objects do not have total_seconds() method", "tot_seconds", "=", "int", "(", "(", "delta_time", ".", "microseconds", "+", "(", "delta_time", ".", "seconds", "+", "delta_time", ".", "days", "*", "24", "*", "3600", ")", "*", "10", "**", "6", ")", "/", "10", "**", "6", ")", "years", ",", "remainder", "=", "divmod", "(", "tot_seconds", ",", "365", "*", "24", "*", "60", "*", "60", ")", "months", ",", "remainder", "=", "divmod", "(", "remainder", ",", "30", "*", "24", "*", "60", "*", "60", ")", "days", ",", "remainder", "=", "divmod", "(", "remainder", ",", "24", "*", "60", "*", "60", ")", "hours", ",", "remainder", "=", "divmod", "(", "remainder", ",", "60", "*", "60", ")", "minutes", ",", "seconds", "=", "divmod", "(", "remainder", ",", "60", ")", "token_iter", "=", "zip", "(", "[", "years", ",", "months", ",", "days", ",", "hours", ",", "minutes", ",", "seconds", "]", ",", "[", "\"year\"", ",", "\"month\"", ",", "\"day\"", ",", "\"hour\"", ",", "\"minute\"", ",", "\"second\"", "]", ",", ")", "ret_list", "=", "[", "\"{token} {token_name}{plural}\"", ".", "format", "(", "token", "=", "num", ",", "token_name", "=", "desc", ",", "plural", "=", "\"s\"", "if", "num", ">", "1", "else", "\"\"", ")", "for", "num", ",", "desc", "in", "token_iter", "if", "num", ">", "0", "]", "if", "not", "ret_list", ":", "return", "\"None\"", "if", "len", "(", "ret_list", ")", "==", "1", ":", "return", "ret_list", "[", "0", "]", "if", "len", "(", "ret_list", ")", "==", "2", ":", "return", "ret_list", "[", "0", "]", "+", "\" and \"", "+", "ret_list", "[", "1", "]", "return", "(", "\", \"", ".", "join", "(", "ret_list", "[", "0", ":", "-", "1", "]", ")", ")", "+", "\" and \"", "+", "ret_list", "[", "-", "1", "]" ]
r""" Return a formatted string with the elapsed time between two time points. The string includes years (365 days), months (30 days), days (24 hours), hours (60 minutes), minutes (60 seconds) and seconds. If both arguments are equal, the string returned is :code:`'None'`; otherwise, the string returned is [YY year[s], [MM month[s], [DD day[s], [HH hour[s], [MM minute[s] [and SS second[s\]\]\]\]\]\]. Any part (year[s], month[s], etc.) is omitted if the value of that part is null/zero :param start_time: Starting time point :type start_time: `datetime <https://docs.python.org/3/library/ datetime.html#datetime-objects>`_ :param stop_time: Ending time point :type stop_time: `datetime` :rtype: string :raises: RuntimeError (Invalid time delta specification) For example: >>> import datetime, pmisc >>> start_time = datetime.datetime(2014, 1, 1, 1, 10, 1) >>> stop_time = datetime.datetime(2015, 1, 3, 1, 10, 3) >>> pmisc.elapsed_time_string(start_time, stop_time) '1 year, 2 days and 2 seconds'
[ "r", "Return", "a", "formatted", "string", "with", "the", "elapsed", "time", "between", "two", "time", "points", "." ]
train
https://github.com/pmacosta/pmisc/blob/dd2bb32e59eee872f1ef2db2d9921a396ab9f50b/pmisc/strings.py#L95-L158
pmacosta/pmisc
pmisc/strings.py
pcolor
def pcolor(text, color, indent=0): r""" Return a string that once printed is colorized. :param text: Text to colorize :type text: string :param color: Color to use, one of :code:`'black'`, :code:`'red'`, :code:`'green'`, :code:`'yellow'`, :code:`'blue'`, :code:`'magenta'`, :code:`'cyan'`, :code:`'white'` or :code:`'none'` (case insensitive) :type color: string :param indent: Number of spaces to prefix the output with :type indent: integer :rtype: string :raises: * RuntimeError (Argument \`color\` is not valid) * RuntimeError (Argument \`indent\` is not valid) * RuntimeError (Argument \`text\` is not valid) * ValueError (Unknown color *[color]*) """ esc_dict = { "black": 30, "red": 31, "green": 32, "yellow": 33, "blue": 34, "magenta": 35, "cyan": 36, "white": 37, "none": -1, } if not isinstance(text, str): raise RuntimeError("Argument `text` is not valid") if not isinstance(color, str): raise RuntimeError("Argument `color` is not valid") if not isinstance(indent, int): raise RuntimeError("Argument `indent` is not valid") color = color.lower() if color not in esc_dict: raise ValueError("Unknown color {color}".format(color=color)) if esc_dict[color] != -1: return "\033[{color_code}m{indent}{text}\033[0m".format( color_code=esc_dict[color], indent=" " * indent, text=text ) return "{indent}{text}".format(indent=" " * indent, text=text)
python
def pcolor(text, color, indent=0): r""" Return a string that once printed is colorized. :param text: Text to colorize :type text: string :param color: Color to use, one of :code:`'black'`, :code:`'red'`, :code:`'green'`, :code:`'yellow'`, :code:`'blue'`, :code:`'magenta'`, :code:`'cyan'`, :code:`'white'` or :code:`'none'` (case insensitive) :type color: string :param indent: Number of spaces to prefix the output with :type indent: integer :rtype: string :raises: * RuntimeError (Argument \`color\` is not valid) * RuntimeError (Argument \`indent\` is not valid) * RuntimeError (Argument \`text\` is not valid) * ValueError (Unknown color *[color]*) """ esc_dict = { "black": 30, "red": 31, "green": 32, "yellow": 33, "blue": 34, "magenta": 35, "cyan": 36, "white": 37, "none": -1, } if not isinstance(text, str): raise RuntimeError("Argument `text` is not valid") if not isinstance(color, str): raise RuntimeError("Argument `color` is not valid") if not isinstance(indent, int): raise RuntimeError("Argument `indent` is not valid") color = color.lower() if color not in esc_dict: raise ValueError("Unknown color {color}".format(color=color)) if esc_dict[color] != -1: return "\033[{color_code}m{indent}{text}\033[0m".format( color_code=esc_dict[color], indent=" " * indent, text=text ) return "{indent}{text}".format(indent=" " * indent, text=text)
[ "def", "pcolor", "(", "text", ",", "color", ",", "indent", "=", "0", ")", ":", "esc_dict", "=", "{", "\"black\"", ":", "30", ",", "\"red\"", ":", "31", ",", "\"green\"", ":", "32", ",", "\"yellow\"", ":", "33", ",", "\"blue\"", ":", "34", ",", "\"magenta\"", ":", "35", ",", "\"cyan\"", ":", "36", ",", "\"white\"", ":", "37", ",", "\"none\"", ":", "-", "1", ",", "}", "if", "not", "isinstance", "(", "text", ",", "str", ")", ":", "raise", "RuntimeError", "(", "\"Argument `text` is not valid\"", ")", "if", "not", "isinstance", "(", "color", ",", "str", ")", ":", "raise", "RuntimeError", "(", "\"Argument `color` is not valid\"", ")", "if", "not", "isinstance", "(", "indent", ",", "int", ")", ":", "raise", "RuntimeError", "(", "\"Argument `indent` is not valid\"", ")", "color", "=", "color", ".", "lower", "(", ")", "if", "color", "not", "in", "esc_dict", ":", "raise", "ValueError", "(", "\"Unknown color {color}\"", ".", "format", "(", "color", "=", "color", ")", ")", "if", "esc_dict", "[", "color", "]", "!=", "-", "1", ":", "return", "\"\\033[{color_code}m{indent}{text}\\033[0m\"", ".", "format", "(", "color_code", "=", "esc_dict", "[", "color", "]", ",", "indent", "=", "\" \"", "*", "indent", ",", "text", "=", "text", ")", "return", "\"{indent}{text}\"", ".", "format", "(", "indent", "=", "\" \"", "*", "indent", ",", "text", "=", "text", ")" ]
r""" Return a string that once printed is colorized. :param text: Text to colorize :type text: string :param color: Color to use, one of :code:`'black'`, :code:`'red'`, :code:`'green'`, :code:`'yellow'`, :code:`'blue'`, :code:`'magenta'`, :code:`'cyan'`, :code:`'white'` or :code:`'none'` (case insensitive) :type color: string :param indent: Number of spaces to prefix the output with :type indent: integer :rtype: string :raises: * RuntimeError (Argument \`color\` is not valid) * RuntimeError (Argument \`indent\` is not valid) * RuntimeError (Argument \`text\` is not valid) * ValueError (Unknown color *[color]*)
[ "r", "Return", "a", "string", "that", "once", "printed", "is", "colorized", "." ]
train
https://github.com/pmacosta/pmisc/blob/dd2bb32e59eee872f1ef2db2d9921a396ab9f50b/pmisc/strings.py#L161-L212
pmacosta/pmisc
pmisc/strings.py
quote_str
def quote_str(obj): r""" Add extra quotes to a string. If the argument is not a string it is returned unmodified. :param obj: Object :type obj: any :rtype: Same as argument For example: >>> import pmisc >>> pmisc.quote_str(5) 5 >>> pmisc.quote_str('Hello!') '"Hello!"' >>> pmisc.quote_str('He said "hello!"') '\'He said "hello!"\'' """ if not isinstance(obj, str): return obj return "'{obj}'".format(obj=obj) if '"' in obj else '"{obj}"'.format(obj=obj)
python
def quote_str(obj): r""" Add extra quotes to a string. If the argument is not a string it is returned unmodified. :param obj: Object :type obj: any :rtype: Same as argument For example: >>> import pmisc >>> pmisc.quote_str(5) 5 >>> pmisc.quote_str('Hello!') '"Hello!"' >>> pmisc.quote_str('He said "hello!"') '\'He said "hello!"\'' """ if not isinstance(obj, str): return obj return "'{obj}'".format(obj=obj) if '"' in obj else '"{obj}"'.format(obj=obj)
[ "def", "quote_str", "(", "obj", ")", ":", "if", "not", "isinstance", "(", "obj", ",", "str", ")", ":", "return", "obj", "return", "\"'{obj}'\"", ".", "format", "(", "obj", "=", "obj", ")", "if", "'\"'", "in", "obj", "else", "'\"{obj}\"'", ".", "format", "(", "obj", "=", "obj", ")" ]
r""" Add extra quotes to a string. If the argument is not a string it is returned unmodified. :param obj: Object :type obj: any :rtype: Same as argument For example: >>> import pmisc >>> pmisc.quote_str(5) 5 >>> pmisc.quote_str('Hello!') '"Hello!"' >>> pmisc.quote_str('He said "hello!"') '\'He said "hello!"\''
[ "r", "Add", "extra", "quotes", "to", "a", "string", "." ]
train
https://github.com/pmacosta/pmisc/blob/dd2bb32e59eee872f1ef2db2d9921a396ab9f50b/pmisc/strings.py#L215-L238
pmacosta/pmisc
pmisc/strings.py
strframe
def strframe(obj, extended=False): """ Return a string with a frame record pretty-formatted. The record is typically an item in a list generated by `inspect.stack() <https://docs.python.org/3/library/inspect.html#inspect.stack>`_). :param obj: Frame record :type obj: tuple :param extended: Flag that indicates whether contents of the frame object are printed (True) or not (False) :type extended: boolean :rtype: string """ # Stack frame -> (frame object [0], filename [1], line number of current # line [2], function name [3], list of lines of context from source # code [4], index of current line within list [5]) fname = normalize_windows_fname(obj[1]) ret = list() ret.append(pcolor("Frame object ID: {0}".format(hex(id(obj[0]))), "yellow")) ret.append("File name......: {0}".format(fname)) ret.append("Line number....: {0}".format(obj[2])) ret.append("Function name..: {0}".format(obj[3])) ret.append("Context........: {0}".format(obj[4])) ret.append("Index..........: {0}".format(obj[5])) if extended: ret.append("f_back ID......: {0}".format(hex(id(obj[0].f_back)))) ret.append("f_builtins.....: {0}".format(obj[0].f_builtins)) ret.append("f_code.........: {0}".format(obj[0].f_code)) ret.append("f_globals......: {0}".format(obj[0].f_globals)) ret.append("f_lasti........: {0}".format(obj[0].f_lasti)) ret.append("f_lineno.......: {0}".format(obj[0].f_lineno)) ret.append("f_locals.......: {0}".format(obj[0].f_locals)) if hasattr(obj[0], "f_restricted"): # pragma: no cover ret.append("f_restricted...: {0}".format(obj[0].f_restricted)) ret.append("f_trace........: {0}".format(obj[0].f_trace)) return "\n".join(ret)
python
def strframe(obj, extended=False): """ Return a string with a frame record pretty-formatted. The record is typically an item in a list generated by `inspect.stack() <https://docs.python.org/3/library/inspect.html#inspect.stack>`_). :param obj: Frame record :type obj: tuple :param extended: Flag that indicates whether contents of the frame object are printed (True) or not (False) :type extended: boolean :rtype: string """ # Stack frame -> (frame object [0], filename [1], line number of current # line [2], function name [3], list of lines of context from source # code [4], index of current line within list [5]) fname = normalize_windows_fname(obj[1]) ret = list() ret.append(pcolor("Frame object ID: {0}".format(hex(id(obj[0]))), "yellow")) ret.append("File name......: {0}".format(fname)) ret.append("Line number....: {0}".format(obj[2])) ret.append("Function name..: {0}".format(obj[3])) ret.append("Context........: {0}".format(obj[4])) ret.append("Index..........: {0}".format(obj[5])) if extended: ret.append("f_back ID......: {0}".format(hex(id(obj[0].f_back)))) ret.append("f_builtins.....: {0}".format(obj[0].f_builtins)) ret.append("f_code.........: {0}".format(obj[0].f_code)) ret.append("f_globals......: {0}".format(obj[0].f_globals)) ret.append("f_lasti........: {0}".format(obj[0].f_lasti)) ret.append("f_lineno.......: {0}".format(obj[0].f_lineno)) ret.append("f_locals.......: {0}".format(obj[0].f_locals)) if hasattr(obj[0], "f_restricted"): # pragma: no cover ret.append("f_restricted...: {0}".format(obj[0].f_restricted)) ret.append("f_trace........: {0}".format(obj[0].f_trace)) return "\n".join(ret)
[ "def", "strframe", "(", "obj", ",", "extended", "=", "False", ")", ":", "# Stack frame -> (frame object [0], filename [1], line number of current", "# line [2], function name [3], list of lines of context from source", "# code [4], index of current line within list [5])", "fname", "=", "normalize_windows_fname", "(", "obj", "[", "1", "]", ")", "ret", "=", "list", "(", ")", "ret", ".", "append", "(", "pcolor", "(", "\"Frame object ID: {0}\"", ".", "format", "(", "hex", "(", "id", "(", "obj", "[", "0", "]", ")", ")", ")", ",", "\"yellow\"", ")", ")", "ret", ".", "append", "(", "\"File name......: {0}\"", ".", "format", "(", "fname", ")", ")", "ret", ".", "append", "(", "\"Line number....: {0}\"", ".", "format", "(", "obj", "[", "2", "]", ")", ")", "ret", ".", "append", "(", "\"Function name..: {0}\"", ".", "format", "(", "obj", "[", "3", "]", ")", ")", "ret", ".", "append", "(", "\"Context........: {0}\"", ".", "format", "(", "obj", "[", "4", "]", ")", ")", "ret", ".", "append", "(", "\"Index..........: {0}\"", ".", "format", "(", "obj", "[", "5", "]", ")", ")", "if", "extended", ":", "ret", ".", "append", "(", "\"f_back ID......: {0}\"", ".", "format", "(", "hex", "(", "id", "(", "obj", "[", "0", "]", ".", "f_back", ")", ")", ")", ")", "ret", ".", "append", "(", "\"f_builtins.....: {0}\"", ".", "format", "(", "obj", "[", "0", "]", ".", "f_builtins", ")", ")", "ret", ".", "append", "(", "\"f_code.........: {0}\"", ".", "format", "(", "obj", "[", "0", "]", ".", "f_code", ")", ")", "ret", ".", "append", "(", "\"f_globals......: {0}\"", ".", "format", "(", "obj", "[", "0", "]", ".", "f_globals", ")", ")", "ret", ".", "append", "(", "\"f_lasti........: {0}\"", ".", "format", "(", "obj", "[", "0", "]", ".", "f_lasti", ")", ")", "ret", ".", "append", "(", "\"f_lineno.......: {0}\"", ".", "format", "(", "obj", "[", "0", "]", ".", "f_lineno", ")", ")", "ret", ".", "append", "(", "\"f_locals.......: {0}\"", ".", "format", "(", "obj", "[", "0", "]", ".", "f_locals", ")", ")", "if", "hasattr", "(", "obj", "[", "0", "]", ",", "\"f_restricted\"", ")", ":", "# pragma: no cover", "ret", ".", "append", "(", "\"f_restricted...: {0}\"", ".", "format", "(", "obj", "[", "0", "]", ".", "f_restricted", ")", ")", "ret", ".", "append", "(", "\"f_trace........: {0}\"", ".", "format", "(", "obj", "[", "0", "]", ".", "f_trace", ")", ")", "return", "\"\\n\"", ".", "join", "(", "ret", ")" ]
Return a string with a frame record pretty-formatted. The record is typically an item in a list generated by `inspect.stack() <https://docs.python.org/3/library/inspect.html#inspect.stack>`_). :param obj: Frame record :type obj: tuple :param extended: Flag that indicates whether contents of the frame object are printed (True) or not (False) :type extended: boolean :rtype: string
[ "Return", "a", "string", "with", "a", "frame", "record", "pretty", "-", "formatted", "." ]
train
https://github.com/pmacosta/pmisc/blob/dd2bb32e59eee872f1ef2db2d9921a396ab9f50b/pmisc/strings.py#L241-L279
limix/optimix
optimix/_variables.py
Variables.set
def set(self, x): """ Set variable values via a dictionary mapping name to value. """ for name, value in iter(x.items()): if hasattr(value, "ndim"): if self[name].value.ndim < value.ndim: self[name].value.itemset(value.squeeze()) else: self[name].value = value else: self[name].value.itemset(value)
python
def set(self, x): """ Set variable values via a dictionary mapping name to value. """ for name, value in iter(x.items()): if hasattr(value, "ndim"): if self[name].value.ndim < value.ndim: self[name].value.itemset(value.squeeze()) else: self[name].value = value else: self[name].value.itemset(value)
[ "def", "set", "(", "self", ",", "x", ")", ":", "for", "name", ",", "value", "in", "iter", "(", "x", ".", "items", "(", ")", ")", ":", "if", "hasattr", "(", "value", ",", "\"ndim\"", ")", ":", "if", "self", "[", "name", "]", ".", "value", ".", "ndim", "<", "value", ".", "ndim", ":", "self", "[", "name", "]", ".", "value", ".", "itemset", "(", "value", ".", "squeeze", "(", ")", ")", "else", ":", "self", "[", "name", "]", ".", "value", "=", "value", "else", ":", "self", "[", "name", "]", ".", "value", ".", "itemset", "(", "value", ")" ]
Set variable values via a dictionary mapping name to value.
[ "Set", "variable", "values", "via", "a", "dictionary", "mapping", "name", "to", "value", "." ]
train
https://github.com/limix/optimix/blob/d7b1356df259c9f6ee0d658258fb47d0074fc416/optimix/_variables.py#L6-L17
limix/optimix
optimix/_variables.py
Variables.select
def select(self, fixed): """ Return a subset of variables according to ``fixed``. """ names = [n for n in self.names() if self[n].isfixed == fixed] return Variables({n: self[n] for n in names})
python
def select(self, fixed): """ Return a subset of variables according to ``fixed``. """ names = [n for n in self.names() if self[n].isfixed == fixed] return Variables({n: self[n] for n in names})
[ "def", "select", "(", "self", ",", "fixed", ")", ":", "names", "=", "[", "n", "for", "n", "in", "self", ".", "names", "(", ")", "if", "self", "[", "n", "]", ".", "isfixed", "==", "fixed", "]", "return", "Variables", "(", "{", "n", ":", "self", "[", "n", "]", "for", "n", "in", "names", "}", ")" ]
Return a subset of variables according to ``fixed``.
[ "Return", "a", "subset", "of", "variables", "according", "to", "fixed", "." ]
train
https://github.com/limix/optimix/blob/d7b1356df259c9f6ee0d658258fb47d0074fc416/optimix/_variables.py#L19-L24
storborg/packagetrack
packagetrack/usps.py
USPSInterface.validate
def validate(self, tracking_number): "Return True if this is a valid USPS tracking number." tracking_num = tracking_number[:-1].replace(' ', '') odd_total = 0 even_total = 0 for ii, digit in enumerate(tracking_num): if ii % 2: odd_total += int(digit) else: even_total += int(digit) total = odd_total + even_total * 3 check = ((total - (total % 10) + 10) - total) % 10 return (check == int(tracking_number[-1:]))
python
def validate(self, tracking_number): "Return True if this is a valid USPS tracking number." tracking_num = tracking_number[:-1].replace(' ', '') odd_total = 0 even_total = 0 for ii, digit in enumerate(tracking_num): if ii % 2: odd_total += int(digit) else: even_total += int(digit) total = odd_total + even_total * 3 check = ((total - (total % 10) + 10) - total) % 10 return (check == int(tracking_number[-1:]))
[ "def", "validate", "(", "self", ",", "tracking_number", ")", ":", "tracking_num", "=", "tracking_number", "[", ":", "-", "1", "]", ".", "replace", "(", "' '", ",", "''", ")", "odd_total", "=", "0", "even_total", "=", "0", "for", "ii", ",", "digit", "in", "enumerate", "(", "tracking_num", ")", ":", "if", "ii", "%", "2", ":", "odd_total", "+=", "int", "(", "digit", ")", "else", ":", "even_total", "+=", "int", "(", "digit", ")", "total", "=", "odd_total", "+", "even_total", "*", "3", "check", "=", "(", "(", "total", "-", "(", "total", "%", "10", ")", "+", "10", ")", "-", "total", ")", "%", "10", "return", "(", "check", "==", "int", "(", "tracking_number", "[", "-", "1", ":", "]", ")", ")" ]
Return True if this is a valid USPS tracking number.
[ "Return", "True", "if", "this", "is", "a", "valid", "USPS", "tracking", "number", "." ]
train
https://github.com/storborg/packagetrack/blob/e1e5417565b8e2a919936713e1939db5aa895e56/packagetrack/usps.py#L11-L23
storborg/packagetrack
packagetrack/ups.py
UPSInterface.validate
def validate(self, tracking_number): "Return True if this is a valid UPS tracking number." tracking_num = tracking_number[2:-1] odd_total = 0 even_total = 0 for ii, digit in enumerate(tracking_num.upper()): try: value = int(digit) except ValueError: value = int((ord(digit) - 63) % 10) if (ii + 1) % 2: odd_total += value else: even_total += value total = odd_total + even_total * 2 check = ((total - (total % 10) + 10) - total) % 10 return (check == int(tracking_number[-1:]))
python
def validate(self, tracking_number): "Return True if this is a valid UPS tracking number." tracking_num = tracking_number[2:-1] odd_total = 0 even_total = 0 for ii, digit in enumerate(tracking_num.upper()): try: value = int(digit) except ValueError: value = int((ord(digit) - 63) % 10) if (ii + 1) % 2: odd_total += value else: even_total += value total = odd_total + even_total * 2 check = ((total - (total % 10) + 10) - total) % 10 return (check == int(tracking_number[-1:]))
[ "def", "validate", "(", "self", ",", "tracking_number", ")", ":", "tracking_num", "=", "tracking_number", "[", "2", ":", "-", "1", "]", "odd_total", "=", "0", "even_total", "=", "0", "for", "ii", ",", "digit", "in", "enumerate", "(", "tracking_num", ".", "upper", "(", ")", ")", ":", "try", ":", "value", "=", "int", "(", "digit", ")", "except", "ValueError", ":", "value", "=", "int", "(", "(", "ord", "(", "digit", ")", "-", "63", ")", "%", "10", ")", "if", "(", "ii", "+", "1", ")", "%", "2", ":", "odd_total", "+=", "value", "else", ":", "even_total", "+=", "value", "total", "=", "odd_total", "+", "even_total", "*", "2", "check", "=", "(", "(", "total", "-", "(", "total", "%", "10", ")", "+", "10", ")", "-", "total", ")", "%", "10", "return", "(", "check", "==", "int", "(", "tracking_number", "[", "-", "1", ":", "]", ")", ")" ]
Return True if this is a valid UPS tracking number.
[ "Return", "True", "if", "this", "is", "a", "valid", "UPS", "tracking", "number", "." ]
train
https://github.com/storborg/packagetrack/blob/e1e5417565b8e2a919936713e1939db5aa895e56/packagetrack/ups.py#L18-L36
storborg/packagetrack
packagetrack/ups.py
UPSInterface.track
def track(self, tracking_number): "Track a UPS package by number. Returns just a delivery date." resp = self.send_request(tracking_number) return self.parse_response(resp)
python
def track(self, tracking_number): "Track a UPS package by number. Returns just a delivery date." resp = self.send_request(tracking_number) return self.parse_response(resp)
[ "def", "track", "(", "self", ",", "tracking_number", ")", ":", "resp", "=", "self", ".", "send_request", "(", "tracking_number", ")", "return", "self", ".", "parse_response", "(", "resp", ")" ]
Track a UPS package by number. Returns just a delivery date.
[ "Track", "a", "UPS", "package", "by", "number", ".", "Returns", "just", "a", "delivery", "date", "." ]
train
https://github.com/storborg/packagetrack/blob/e1e5417565b8e2a919936713e1939db5aa895e56/packagetrack/ups.py#L86-L89
stephantul/reach
reach/reach.py
Reach.load
def load(pathtovector, wordlist=(), num_to_load=None, truncate_embeddings=None, unk_word=None, sep=" "): r""" Read a file in word2vec .txt format. The load function will raise a ValueError when trying to load items which do not conform to line lengths. Parameters ---------- pathtovector : string The path to the vector file. header : bool Whether the vector file has a header of the type (NUMBER OF ITEMS, SIZE OF VECTOR). wordlist : iterable, optional, default () A list of words you want loaded from the vector file. If this is None (default), all words will be loaded. num_to_load : int, optional, default None The number of items to load from the file. Because loading can take some time, it is sometimes useful to onlyl load the first n items from a vector file for quick inspection. truncate_embeddings : int, optional, default None If this value is not None, the vectors in the vector space will be truncated to the number of dimensions indicated by this value. unk_word : object The object to treat as UNK in your vector space. If this is not in your items dictionary after loading, we add it with a zero vector. Returns ------- r : Reach An initialized Reach instance. """ vectors, items = Reach._load(pathtovector, wordlist, num_to_load, truncate_embeddings, sep) if unk_word is not None: if unk_word not in set(items): unk_vec = np.zeros((1, vectors.shape[1])) vectors = np.concatenate([unk_vec, vectors], 0) items = [unk_word] + items unk_index = 0 else: unk_index = items.index(unk_word) else: unk_index = None return Reach(vectors, items, name=os.path.split(pathtovector)[-1], unk_index=unk_index)
python
def load(pathtovector, wordlist=(), num_to_load=None, truncate_embeddings=None, unk_word=None, sep=" "): r""" Read a file in word2vec .txt format. The load function will raise a ValueError when trying to load items which do not conform to line lengths. Parameters ---------- pathtovector : string The path to the vector file. header : bool Whether the vector file has a header of the type (NUMBER OF ITEMS, SIZE OF VECTOR). wordlist : iterable, optional, default () A list of words you want loaded from the vector file. If this is None (default), all words will be loaded. num_to_load : int, optional, default None The number of items to load from the file. Because loading can take some time, it is sometimes useful to onlyl load the first n items from a vector file for quick inspection. truncate_embeddings : int, optional, default None If this value is not None, the vectors in the vector space will be truncated to the number of dimensions indicated by this value. unk_word : object The object to treat as UNK in your vector space. If this is not in your items dictionary after loading, we add it with a zero vector. Returns ------- r : Reach An initialized Reach instance. """ vectors, items = Reach._load(pathtovector, wordlist, num_to_load, truncate_embeddings, sep) if unk_word is not None: if unk_word not in set(items): unk_vec = np.zeros((1, vectors.shape[1])) vectors = np.concatenate([unk_vec, vectors], 0) items = [unk_word] + items unk_index = 0 else: unk_index = items.index(unk_word) else: unk_index = None return Reach(vectors, items, name=os.path.split(pathtovector)[-1], unk_index=unk_index)
[ "def", "load", "(", "pathtovector", ",", "wordlist", "=", "(", ")", ",", "num_to_load", "=", "None", ",", "truncate_embeddings", "=", "None", ",", "unk_word", "=", "None", ",", "sep", "=", "\" \"", ")", ":", "vectors", ",", "items", "=", "Reach", ".", "_load", "(", "pathtovector", ",", "wordlist", ",", "num_to_load", ",", "truncate_embeddings", ",", "sep", ")", "if", "unk_word", "is", "not", "None", ":", "if", "unk_word", "not", "in", "set", "(", "items", ")", ":", "unk_vec", "=", "np", ".", "zeros", "(", "(", "1", ",", "vectors", ".", "shape", "[", "1", "]", ")", ")", "vectors", "=", "np", ".", "concatenate", "(", "[", "unk_vec", ",", "vectors", "]", ",", "0", ")", "items", "=", "[", "unk_word", "]", "+", "items", "unk_index", "=", "0", "else", ":", "unk_index", "=", "items", ".", "index", "(", "unk_word", ")", "else", ":", "unk_index", "=", "None", "return", "Reach", "(", "vectors", ",", "items", ",", "name", "=", "os", ".", "path", ".", "split", "(", "pathtovector", ")", "[", "-", "1", "]", ",", "unk_index", "=", "unk_index", ")" ]
r""" Read a file in word2vec .txt format. The load function will raise a ValueError when trying to load items which do not conform to line lengths. Parameters ---------- pathtovector : string The path to the vector file. header : bool Whether the vector file has a header of the type (NUMBER OF ITEMS, SIZE OF VECTOR). wordlist : iterable, optional, default () A list of words you want loaded from the vector file. If this is None (default), all words will be loaded. num_to_load : int, optional, default None The number of items to load from the file. Because loading can take some time, it is sometimes useful to onlyl load the first n items from a vector file for quick inspection. truncate_embeddings : int, optional, default None If this value is not None, the vectors in the vector space will be truncated to the number of dimensions indicated by this value. unk_word : object The object to treat as UNK in your vector space. If this is not in your items dictionary after loading, we add it with a zero vector. Returns ------- r : Reach An initialized Reach instance.
[ "r", "Read", "a", "file", "in", "word2vec", ".", "txt", "format", "." ]
train
https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L74-L133
stephantul/reach
reach/reach.py
Reach._load
def _load(pathtovector, wordlist, num_to_load=None, truncate_embeddings=None, sep=" "): """Load a matrix and wordlist from a .vec file.""" vectors = [] addedwords = set() words = [] try: wordlist = set(wordlist) except ValueError: wordlist = set() logger.info("Loading {0}".format(pathtovector)) firstline = open(pathtovector).readline().strip() try: num, size = firstline.split(sep) num, size = int(num), int(size) logger.info("Vector space: {} by {}".format(num, size)) header = True except ValueError: size = len(firstline.split(sep)) - 1 logger.info("Vector space: {} dim, # items unknown".format(size)) word, rest = firstline.split(sep, 1) # If the first line is correctly parseable, set header to False. header = False if truncate_embeddings is None or truncate_embeddings == 0: truncate_embeddings = size for idx, line in enumerate(open(pathtovector, encoding='utf-8')): if header and idx == 0: continue word, rest = line.rstrip(" \n").split(sep, 1) if wordlist and word not in wordlist: continue if word in addedwords: raise ValueError("Duplicate: {} on line {} was in the " "vector space twice".format(word, idx)) if len(rest.split(sep)) != size: raise ValueError("Incorrect input at index {}, size " "is {}, expected " "{}".format(idx+1, len(rest.split(sep)), size)) words.append(word) addedwords.add(word) vectors.append(np.fromstring(rest, sep=sep)[:truncate_embeddings]) if num_to_load is not None and len(addedwords) >= num_to_load: break vectors = np.array(vectors).astype(np.float32) logger.info("Loading finished") if wordlist: diff = wordlist - addedwords if diff: logger.info("Not all items from your wordlist were in your " "vector space: {}.".format(diff)) return vectors, words
python
def _load(pathtovector, wordlist, num_to_load=None, truncate_embeddings=None, sep=" "): """Load a matrix and wordlist from a .vec file.""" vectors = [] addedwords = set() words = [] try: wordlist = set(wordlist) except ValueError: wordlist = set() logger.info("Loading {0}".format(pathtovector)) firstline = open(pathtovector).readline().strip() try: num, size = firstline.split(sep) num, size = int(num), int(size) logger.info("Vector space: {} by {}".format(num, size)) header = True except ValueError: size = len(firstline.split(sep)) - 1 logger.info("Vector space: {} dim, # items unknown".format(size)) word, rest = firstline.split(sep, 1) # If the first line is correctly parseable, set header to False. header = False if truncate_embeddings is None or truncate_embeddings == 0: truncate_embeddings = size for idx, line in enumerate(open(pathtovector, encoding='utf-8')): if header and idx == 0: continue word, rest = line.rstrip(" \n").split(sep, 1) if wordlist and word not in wordlist: continue if word in addedwords: raise ValueError("Duplicate: {} on line {} was in the " "vector space twice".format(word, idx)) if len(rest.split(sep)) != size: raise ValueError("Incorrect input at index {}, size " "is {}, expected " "{}".format(idx+1, len(rest.split(sep)), size)) words.append(word) addedwords.add(word) vectors.append(np.fromstring(rest, sep=sep)[:truncate_embeddings]) if num_to_load is not None and len(addedwords) >= num_to_load: break vectors = np.array(vectors).astype(np.float32) logger.info("Loading finished") if wordlist: diff = wordlist - addedwords if diff: logger.info("Not all items from your wordlist were in your " "vector space: {}.".format(diff)) return vectors, words
[ "def", "_load", "(", "pathtovector", ",", "wordlist", ",", "num_to_load", "=", "None", ",", "truncate_embeddings", "=", "None", ",", "sep", "=", "\" \"", ")", ":", "vectors", "=", "[", "]", "addedwords", "=", "set", "(", ")", "words", "=", "[", "]", "try", ":", "wordlist", "=", "set", "(", "wordlist", ")", "except", "ValueError", ":", "wordlist", "=", "set", "(", ")", "logger", ".", "info", "(", "\"Loading {0}\"", ".", "format", "(", "pathtovector", ")", ")", "firstline", "=", "open", "(", "pathtovector", ")", ".", "readline", "(", ")", ".", "strip", "(", ")", "try", ":", "num", ",", "size", "=", "firstline", ".", "split", "(", "sep", ")", "num", ",", "size", "=", "int", "(", "num", ")", ",", "int", "(", "size", ")", "logger", ".", "info", "(", "\"Vector space: {} by {}\"", ".", "format", "(", "num", ",", "size", ")", ")", "header", "=", "True", "except", "ValueError", ":", "size", "=", "len", "(", "firstline", ".", "split", "(", "sep", ")", ")", "-", "1", "logger", ".", "info", "(", "\"Vector space: {} dim, # items unknown\"", ".", "format", "(", "size", ")", ")", "word", ",", "rest", "=", "firstline", ".", "split", "(", "sep", ",", "1", ")", "# If the first line is correctly parseable, set header to False.", "header", "=", "False", "if", "truncate_embeddings", "is", "None", "or", "truncate_embeddings", "==", "0", ":", "truncate_embeddings", "=", "size", "for", "idx", ",", "line", "in", "enumerate", "(", "open", "(", "pathtovector", ",", "encoding", "=", "'utf-8'", ")", ")", ":", "if", "header", "and", "idx", "==", "0", ":", "continue", "word", ",", "rest", "=", "line", ".", "rstrip", "(", "\" \\n\"", ")", ".", "split", "(", "sep", ",", "1", ")", "if", "wordlist", "and", "word", "not", "in", "wordlist", ":", "continue", "if", "word", "in", "addedwords", ":", "raise", "ValueError", "(", "\"Duplicate: {} on line {} was in the \"", "\"vector space twice\"", ".", "format", "(", "word", ",", "idx", ")", ")", "if", "len", "(", "rest", ".", "split", "(", "sep", ")", ")", "!=", "size", ":", "raise", "ValueError", "(", "\"Incorrect input at index {}, size \"", "\"is {}, expected \"", "\"{}\"", ".", "format", "(", "idx", "+", "1", ",", "len", "(", "rest", ".", "split", "(", "sep", ")", ")", ",", "size", ")", ")", "words", ".", "append", "(", "word", ")", "addedwords", ".", "add", "(", "word", ")", "vectors", ".", "append", "(", "np", ".", "fromstring", "(", "rest", ",", "sep", "=", "sep", ")", "[", ":", "truncate_embeddings", "]", ")", "if", "num_to_load", "is", "not", "None", "and", "len", "(", "addedwords", ")", ">=", "num_to_load", ":", "break", "vectors", "=", "np", ".", "array", "(", "vectors", ")", ".", "astype", "(", "np", ".", "float32", ")", "logger", ".", "info", "(", "\"Loading finished\"", ")", "if", "wordlist", ":", "diff", "=", "wordlist", "-", "addedwords", "if", "diff", ":", "logger", ".", "info", "(", "\"Not all items from your wordlist were in your \"", "\"vector space: {}.\"", ".", "format", "(", "diff", ")", ")", "return", "vectors", ",", "words" ]
Load a matrix and wordlist from a .vec file.
[ "Load", "a", "matrix", "and", "wordlist", "from", "a", ".", "vec", "file", "." ]
train
https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L136-L205
stephantul/reach
reach/reach.py
Reach.vectorize
def vectorize(self, tokens, remove_oov=False, norm=False): """ Vectorize a sentence by replacing all items with their vectors. Parameters ---------- tokens : object or list of objects The tokens to vectorize. remove_oov : bool, optional, default False Whether to remove OOV items. If False, OOV items are replaced by the UNK glyph. If this is True, the returned sequence might have a different length than the original sequence. norm : bool, optional, default False Whether to return the unit vectors, or the regular vectors. Returns ------- s : numpy array An M * N matrix, where every item has been replaced by its vector. OOV items are either removed, or replaced by the value of the UNK glyph. """ if not tokens: raise ValueError("You supplied an empty list.") index = list(self.bow(tokens, remove_oov=remove_oov)) if not index: raise ValueError("You supplied a list with only OOV tokens: {}, " "which then got removed. Set remove_oov to False," " or filter your sentences to remove any in which" " all items are OOV.") if norm: return np.stack([self.norm_vectors[x] for x in index]) else: return np.stack([self.vectors[x] for x in index])
python
def vectorize(self, tokens, remove_oov=False, norm=False): """ Vectorize a sentence by replacing all items with their vectors. Parameters ---------- tokens : object or list of objects The tokens to vectorize. remove_oov : bool, optional, default False Whether to remove OOV items. If False, OOV items are replaced by the UNK glyph. If this is True, the returned sequence might have a different length than the original sequence. norm : bool, optional, default False Whether to return the unit vectors, or the regular vectors. Returns ------- s : numpy array An M * N matrix, where every item has been replaced by its vector. OOV items are either removed, or replaced by the value of the UNK glyph. """ if not tokens: raise ValueError("You supplied an empty list.") index = list(self.bow(tokens, remove_oov=remove_oov)) if not index: raise ValueError("You supplied a list with only OOV tokens: {}, " "which then got removed. Set remove_oov to False," " or filter your sentences to remove any in which" " all items are OOV.") if norm: return np.stack([self.norm_vectors[x] for x in index]) else: return np.stack([self.vectors[x] for x in index])
[ "def", "vectorize", "(", "self", ",", "tokens", ",", "remove_oov", "=", "False", ",", "norm", "=", "False", ")", ":", "if", "not", "tokens", ":", "raise", "ValueError", "(", "\"You supplied an empty list.\"", ")", "index", "=", "list", "(", "self", ".", "bow", "(", "tokens", ",", "remove_oov", "=", "remove_oov", ")", ")", "if", "not", "index", ":", "raise", "ValueError", "(", "\"You supplied a list with only OOV tokens: {}, \"", "\"which then got removed. Set remove_oov to False,\"", "\" or filter your sentences to remove any in which\"", "\" all items are OOV.\"", ")", "if", "norm", ":", "return", "np", ".", "stack", "(", "[", "self", ".", "norm_vectors", "[", "x", "]", "for", "x", "in", "index", "]", ")", "else", ":", "return", "np", ".", "stack", "(", "[", "self", ".", "vectors", "[", "x", "]", "for", "x", "in", "index", "]", ")" ]
Vectorize a sentence by replacing all items with their vectors. Parameters ---------- tokens : object or list of objects The tokens to vectorize. remove_oov : bool, optional, default False Whether to remove OOV items. If False, OOV items are replaced by the UNK glyph. If this is True, the returned sequence might have a different length than the original sequence. norm : bool, optional, default False Whether to return the unit vectors, or the regular vectors. Returns ------- s : numpy array An M * N matrix, where every item has been replaced by its vector. OOV items are either removed, or replaced by the value of the UNK glyph.
[ "Vectorize", "a", "sentence", "by", "replacing", "all", "items", "with", "their", "vectors", "." ]
train
https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L211-L245
stephantul/reach
reach/reach.py
Reach.bow
def bow(self, tokens, remove_oov=False): """ Create a bow representation of a list of tokens. Parameters ---------- tokens : list. The list of items to change into a bag of words representation. remove_oov : bool. Whether to remove OOV items from the input. If this is True, the length of the returned BOW representation might not be the length of the original representation. Returns ------- bow : generator A BOW representation of the list of items. """ if remove_oov: tokens = [x for x in tokens if x in self.items] for t in tokens: try: yield self.items[t] except KeyError: if self.unk_index is None: raise ValueError("You supplied OOV items but didn't " "provide the index of the replacement " "glyph. Either set remove_oov to True, " "or set unk_index to the index of the " "item which replaces any OOV items.") yield self.unk_index
python
def bow(self, tokens, remove_oov=False): """ Create a bow representation of a list of tokens. Parameters ---------- tokens : list. The list of items to change into a bag of words representation. remove_oov : bool. Whether to remove OOV items from the input. If this is True, the length of the returned BOW representation might not be the length of the original representation. Returns ------- bow : generator A BOW representation of the list of items. """ if remove_oov: tokens = [x for x in tokens if x in self.items] for t in tokens: try: yield self.items[t] except KeyError: if self.unk_index is None: raise ValueError("You supplied OOV items but didn't " "provide the index of the replacement " "glyph. Either set remove_oov to True, " "or set unk_index to the index of the " "item which replaces any OOV items.") yield self.unk_index
[ "def", "bow", "(", "self", ",", "tokens", ",", "remove_oov", "=", "False", ")", ":", "if", "remove_oov", ":", "tokens", "=", "[", "x", "for", "x", "in", "tokens", "if", "x", "in", "self", ".", "items", "]", "for", "t", "in", "tokens", ":", "try", ":", "yield", "self", ".", "items", "[", "t", "]", "except", "KeyError", ":", "if", "self", ".", "unk_index", "is", "None", ":", "raise", "ValueError", "(", "\"You supplied OOV items but didn't \"", "\"provide the index of the replacement \"", "\"glyph. Either set remove_oov to True, \"", "\"or set unk_index to the index of the \"", "\"item which replaces any OOV items.\"", ")", "yield", "self", ".", "unk_index" ]
Create a bow representation of a list of tokens. Parameters ---------- tokens : list. The list of items to change into a bag of words representation. remove_oov : bool. Whether to remove OOV items from the input. If this is True, the length of the returned BOW representation might not be the length of the original representation. Returns ------- bow : generator A BOW representation of the list of items.
[ "Create", "a", "bow", "representation", "of", "a", "list", "of", "tokens", "." ]
train
https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L247-L279
stephantul/reach
reach/reach.py
Reach.transform
def transform(self, corpus, remove_oov=False, norm=False): """ Transform a corpus by repeated calls to vectorize, defined above. Parameters ---------- corpus : A list of strings, list of list of strings. Represents a corpus as a list of sentences, where sentences can either be strings or lists of tokens. remove_oov : bool, optional, default False If True, removes OOV items from the input before vectorization. Returns ------- c : list A list of numpy arrays, where each array represents the transformed sentence in the original list. The list is guaranteed to be the same length as the input list, but the arrays in the list may be of different lengths, depending on whether remove_oov is True. """ return [self.vectorize(s, remove_oov=remove_oov, norm=norm) for s in corpus]
python
def transform(self, corpus, remove_oov=False, norm=False): """ Transform a corpus by repeated calls to vectorize, defined above. Parameters ---------- corpus : A list of strings, list of list of strings. Represents a corpus as a list of sentences, where sentences can either be strings or lists of tokens. remove_oov : bool, optional, default False If True, removes OOV items from the input before vectorization. Returns ------- c : list A list of numpy arrays, where each array represents the transformed sentence in the original list. The list is guaranteed to be the same length as the input list, but the arrays in the list may be of different lengths, depending on whether remove_oov is True. """ return [self.vectorize(s, remove_oov=remove_oov, norm=norm) for s in corpus]
[ "def", "transform", "(", "self", ",", "corpus", ",", "remove_oov", "=", "False", ",", "norm", "=", "False", ")", ":", "return", "[", "self", ".", "vectorize", "(", "s", ",", "remove_oov", "=", "remove_oov", ",", "norm", "=", "norm", ")", "for", "s", "in", "corpus", "]" ]
Transform a corpus by repeated calls to vectorize, defined above. Parameters ---------- corpus : A list of strings, list of list of strings. Represents a corpus as a list of sentences, where sentences can either be strings or lists of tokens. remove_oov : bool, optional, default False If True, removes OOV items from the input before vectorization. Returns ------- c : list A list of numpy arrays, where each array represents the transformed sentence in the original list. The list is guaranteed to be the same length as the input list, but the arrays in the list may be of different lengths, depending on whether remove_oov is True.
[ "Transform", "a", "corpus", "by", "repeated", "calls", "to", "vectorize", "defined", "above", "." ]
train
https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L281-L303
stephantul/reach
reach/reach.py
Reach.most_similar
def most_similar(self, items, num=10, batch_size=100, show_progressbar=False, return_names=True): """ Return the num most similar items to a given list of items. Parameters ---------- items : list of objects or a single object. The items to get the most similar items to. num : int, optional, default 10 The number of most similar items to retrieve. batch_size : int, optional, default 100. The batch size to use. 100 is a good default option. Increasing the batch size may increase the speed. show_progressbar : bool, optional, default False Whether to show a progressbar. return_names : bool, optional, default True Whether to return the item names, or just the distances. Returns ------- sim : array For each items in the input the num most similar items are returned in the form of (NAME, DISTANCE) tuples. If return_names is false, the returned list just contains distances. """ # This line allows users to input single items. # We used to rely on string identities, but we now also allow # anything hashable as keys. # Might fail if a list of passed items is also in the vocabulary. # but I can't think of cases when this would happen, and what # user expectations are. try: if items in self.items: items = [items] except TypeError: pass x = np.stack([self.norm_vectors[self.items[x]] for x in items]) result = self._batch(x, batch_size, num+1, show_progressbar, return_names) # list call consumes the generator. return [x[1:] for x in result]
python
def most_similar(self, items, num=10, batch_size=100, show_progressbar=False, return_names=True): """ Return the num most similar items to a given list of items. Parameters ---------- items : list of objects or a single object. The items to get the most similar items to. num : int, optional, default 10 The number of most similar items to retrieve. batch_size : int, optional, default 100. The batch size to use. 100 is a good default option. Increasing the batch size may increase the speed. show_progressbar : bool, optional, default False Whether to show a progressbar. return_names : bool, optional, default True Whether to return the item names, or just the distances. Returns ------- sim : array For each items in the input the num most similar items are returned in the form of (NAME, DISTANCE) tuples. If return_names is false, the returned list just contains distances. """ # This line allows users to input single items. # We used to rely on string identities, but we now also allow # anything hashable as keys. # Might fail if a list of passed items is also in the vocabulary. # but I can't think of cases when this would happen, and what # user expectations are. try: if items in self.items: items = [items] except TypeError: pass x = np.stack([self.norm_vectors[self.items[x]] for x in items]) result = self._batch(x, batch_size, num+1, show_progressbar, return_names) # list call consumes the generator. return [x[1:] for x in result]
[ "def", "most_similar", "(", "self", ",", "items", ",", "num", "=", "10", ",", "batch_size", "=", "100", ",", "show_progressbar", "=", "False", ",", "return_names", "=", "True", ")", ":", "# This line allows users to input single items.", "# We used to rely on string identities, but we now also allow", "# anything hashable as keys.", "# Might fail if a list of passed items is also in the vocabulary.", "# but I can't think of cases when this would happen, and what", "# user expectations are.", "try", ":", "if", "items", "in", "self", ".", "items", ":", "items", "=", "[", "items", "]", "except", "TypeError", ":", "pass", "x", "=", "np", ".", "stack", "(", "[", "self", ".", "norm_vectors", "[", "self", ".", "items", "[", "x", "]", "]", "for", "x", "in", "items", "]", ")", "result", "=", "self", ".", "_batch", "(", "x", ",", "batch_size", ",", "num", "+", "1", ",", "show_progressbar", ",", "return_names", ")", "# list call consumes the generator.", "return", "[", "x", "[", "1", ":", "]", "for", "x", "in", "result", "]" ]
Return the num most similar items to a given list of items. Parameters ---------- items : list of objects or a single object. The items to get the most similar items to. num : int, optional, default 10 The number of most similar items to retrieve. batch_size : int, optional, default 100. The batch size to use. 100 is a good default option. Increasing the batch size may increase the speed. show_progressbar : bool, optional, default False Whether to show a progressbar. return_names : bool, optional, default True Whether to return the item names, or just the distances. Returns ------- sim : array For each items in the input the num most similar items are returned in the form of (NAME, DISTANCE) tuples. If return_names is false, the returned list just contains distances.
[ "Return", "the", "num", "most", "similar", "items", "to", "a", "given", "list", "of", "items", "." ]
train
https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L305-L356
stephantul/reach
reach/reach.py
Reach.threshold
def threshold(self, items, threshold=.5, batch_size=100, show_progressbar=False, return_names=True): """ Return all items whose similarity is higher than threshold. Parameters ---------- items : list of objects or a single object. The items to get the most similar items to. threshold : float, optional, default .5 The radius within which to retrieve items. batch_size : int, optional, default 100. The batch size to use. 100 is a good default option. Increasing the batch size may increase the speed. show_progressbar : bool, optional, default False Whether to show a progressbar. return_names : bool, optional, default True Whether to return the item names, or just the distances. Returns ------- sim : array For each items in the input the num most similar items are returned in the form of (NAME, DISTANCE) tuples. If return_names is false, the returned list just contains distances. """ # This line allows users to input single items. # We used to rely on string identities, but we now also allow # anything hashable as keys. # Might fail if a list of passed items is also in the vocabulary. # but I can't think of cases when this would happen, and what # user expectations are. try: if items in self.items: items = [items] except TypeError: pass x = np.stack([self.norm_vectors[self.items[x]] for x in items]) result = self._threshold_batch(x, batch_size, threshold, show_progressbar, return_names) # list call consumes the generator. return [x[1:] for x in result]
python
def threshold(self, items, threshold=.5, batch_size=100, show_progressbar=False, return_names=True): """ Return all items whose similarity is higher than threshold. Parameters ---------- items : list of objects or a single object. The items to get the most similar items to. threshold : float, optional, default .5 The radius within which to retrieve items. batch_size : int, optional, default 100. The batch size to use. 100 is a good default option. Increasing the batch size may increase the speed. show_progressbar : bool, optional, default False Whether to show a progressbar. return_names : bool, optional, default True Whether to return the item names, or just the distances. Returns ------- sim : array For each items in the input the num most similar items are returned in the form of (NAME, DISTANCE) tuples. If return_names is false, the returned list just contains distances. """ # This line allows users to input single items. # We used to rely on string identities, but we now also allow # anything hashable as keys. # Might fail if a list of passed items is also in the vocabulary. # but I can't think of cases when this would happen, and what # user expectations are. try: if items in self.items: items = [items] except TypeError: pass x = np.stack([self.norm_vectors[self.items[x]] for x in items]) result = self._threshold_batch(x, batch_size, threshold, show_progressbar, return_names) # list call consumes the generator. return [x[1:] for x in result]
[ "def", "threshold", "(", "self", ",", "items", ",", "threshold", "=", ".5", ",", "batch_size", "=", "100", ",", "show_progressbar", "=", "False", ",", "return_names", "=", "True", ")", ":", "# This line allows users to input single items.", "# We used to rely on string identities, but we now also allow", "# anything hashable as keys.", "# Might fail if a list of passed items is also in the vocabulary.", "# but I can't think of cases when this would happen, and what", "# user expectations are.", "try", ":", "if", "items", "in", "self", ".", "items", ":", "items", "=", "[", "items", "]", "except", "TypeError", ":", "pass", "x", "=", "np", ".", "stack", "(", "[", "self", ".", "norm_vectors", "[", "self", ".", "items", "[", "x", "]", "]", "for", "x", "in", "items", "]", ")", "result", "=", "self", ".", "_threshold_batch", "(", "x", ",", "batch_size", ",", "threshold", ",", "show_progressbar", ",", "return_names", ")", "# list call consumes the generator.", "return", "[", "x", "[", "1", ":", "]", "for", "x", "in", "result", "]" ]
Return all items whose similarity is higher than threshold. Parameters ---------- items : list of objects or a single object. The items to get the most similar items to. threshold : float, optional, default .5 The radius within which to retrieve items. batch_size : int, optional, default 100. The batch size to use. 100 is a good default option. Increasing the batch size may increase the speed. show_progressbar : bool, optional, default False Whether to show a progressbar. return_names : bool, optional, default True Whether to return the item names, or just the distances. Returns ------- sim : array For each items in the input the num most similar items are returned in the form of (NAME, DISTANCE) tuples. If return_names is false, the returned list just contains distances.
[ "Return", "all", "items", "whose", "similarity", "is", "higher", "than", "threshold", "." ]
train
https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L358-L409
stephantul/reach
reach/reach.py
Reach.nearest_neighbor
def nearest_neighbor(self, vectors, num=10, batch_size=100, show_progressbar=False, return_names=True): """ Find the nearest neighbors to some arbitrary vector. This function is meant to be used in composition operations. The most_similar function can only handle items that are in vocab, and looks up their vector through a dictionary. Compositions, e.g. "King - man + woman" are necessarily not in the vocabulary. Parameters ---------- vectors : list of arrays or numpy array The vectors to find the nearest neighbors to. num : int, optional, default 10 The number of most similar items to retrieve. batch_size : int, optional, default 100. The batch size to use. 100 is a good default option. Increasing the batch size may increase speed. show_progressbar : bool, optional, default False Whether to show a progressbar. return_names : bool, optional, default True Whether to return the item names, or just the distances. Returns ------- sim : list of tuples. For each item in the input the num most similar items are returned in the form of (NAME, DISTANCE) tuples. If return_names is set to false, only the distances are returned. """ vectors = np.array(vectors) if np.ndim(vectors) == 1: vectors = vectors[None, :] result = [] result = self._batch(vectors, batch_size, num+1, show_progressbar, return_names) return list(result)
python
def nearest_neighbor(self, vectors, num=10, batch_size=100, show_progressbar=False, return_names=True): """ Find the nearest neighbors to some arbitrary vector. This function is meant to be used in composition operations. The most_similar function can only handle items that are in vocab, and looks up their vector through a dictionary. Compositions, e.g. "King - man + woman" are necessarily not in the vocabulary. Parameters ---------- vectors : list of arrays or numpy array The vectors to find the nearest neighbors to. num : int, optional, default 10 The number of most similar items to retrieve. batch_size : int, optional, default 100. The batch size to use. 100 is a good default option. Increasing the batch size may increase speed. show_progressbar : bool, optional, default False Whether to show a progressbar. return_names : bool, optional, default True Whether to return the item names, or just the distances. Returns ------- sim : list of tuples. For each item in the input the num most similar items are returned in the form of (NAME, DISTANCE) tuples. If return_names is set to false, only the distances are returned. """ vectors = np.array(vectors) if np.ndim(vectors) == 1: vectors = vectors[None, :] result = [] result = self._batch(vectors, batch_size, num+1, show_progressbar, return_names) return list(result)
[ "def", "nearest_neighbor", "(", "self", ",", "vectors", ",", "num", "=", "10", ",", "batch_size", "=", "100", ",", "show_progressbar", "=", "False", ",", "return_names", "=", "True", ")", ":", "vectors", "=", "np", ".", "array", "(", "vectors", ")", "if", "np", ".", "ndim", "(", "vectors", ")", "==", "1", ":", "vectors", "=", "vectors", "[", "None", ",", ":", "]", "result", "=", "[", "]", "result", "=", "self", ".", "_batch", "(", "vectors", ",", "batch_size", ",", "num", "+", "1", ",", "show_progressbar", ",", "return_names", ")", "return", "list", "(", "result", ")" ]
Find the nearest neighbors to some arbitrary vector. This function is meant to be used in composition operations. The most_similar function can only handle items that are in vocab, and looks up their vector through a dictionary. Compositions, e.g. "King - man + woman" are necessarily not in the vocabulary. Parameters ---------- vectors : list of arrays or numpy array The vectors to find the nearest neighbors to. num : int, optional, default 10 The number of most similar items to retrieve. batch_size : int, optional, default 100. The batch size to use. 100 is a good default option. Increasing the batch size may increase speed. show_progressbar : bool, optional, default False Whether to show a progressbar. return_names : bool, optional, default True Whether to return the item names, or just the distances. Returns ------- sim : list of tuples. For each item in the input the num most similar items are returned in the form of (NAME, DISTANCE) tuples. If return_names is set to false, only the distances are returned.
[ "Find", "the", "nearest", "neighbors", "to", "some", "arbitrary", "vector", "." ]
train
https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L411-L459
stephantul/reach
reach/reach.py
Reach.nearest_neighbor_threshold
def nearest_neighbor_threshold(self, vectors, threshold=.5, batch_size=100, show_progressbar=False, return_names=True): """ Find the nearest neighbors to some arbitrary vector. This function is meant to be used in composition operations. The most_similar function can only handle items that are in vocab, and looks up their vector through a dictionary. Compositions, e.g. "King - man + woman" are necessarily not in the vocabulary. Parameters ---------- vectors : list of arrays or numpy array The vectors to find the nearest neighbors to. threshold : float, optional, default .5 The threshold within to retrieve items. batch_size : int, optional, default 100. The batch size to use. 100 is a good default option. Increasing the batch size may increase speed. show_progressbar : bool, optional, default False Whether to show a progressbar. return_names : bool, optional, default True Whether to return the item names, or just the distances. Returns ------- sim : list of tuples. For each item in the input the num most similar items are returned in the form of (NAME, DISTANCE) tuples. If return_names is set to false, only the distances are returned. """ vectors = np.array(vectors) if np.ndim(vectors) == 1: vectors = vectors[None, :] result = [] result = self._threshold_batch(vectors, batch_size, threshold, show_progressbar, return_names) return list(result)
python
def nearest_neighbor_threshold(self, vectors, threshold=.5, batch_size=100, show_progressbar=False, return_names=True): """ Find the nearest neighbors to some arbitrary vector. This function is meant to be used in composition operations. The most_similar function can only handle items that are in vocab, and looks up their vector through a dictionary. Compositions, e.g. "King - man + woman" are necessarily not in the vocabulary. Parameters ---------- vectors : list of arrays or numpy array The vectors to find the nearest neighbors to. threshold : float, optional, default .5 The threshold within to retrieve items. batch_size : int, optional, default 100. The batch size to use. 100 is a good default option. Increasing the batch size may increase speed. show_progressbar : bool, optional, default False Whether to show a progressbar. return_names : bool, optional, default True Whether to return the item names, or just the distances. Returns ------- sim : list of tuples. For each item in the input the num most similar items are returned in the form of (NAME, DISTANCE) tuples. If return_names is set to false, only the distances are returned. """ vectors = np.array(vectors) if np.ndim(vectors) == 1: vectors = vectors[None, :] result = [] result = self._threshold_batch(vectors, batch_size, threshold, show_progressbar, return_names) return list(result)
[ "def", "nearest_neighbor_threshold", "(", "self", ",", "vectors", ",", "threshold", "=", ".5", ",", "batch_size", "=", "100", ",", "show_progressbar", "=", "False", ",", "return_names", "=", "True", ")", ":", "vectors", "=", "np", ".", "array", "(", "vectors", ")", "if", "np", ".", "ndim", "(", "vectors", ")", "==", "1", ":", "vectors", "=", "vectors", "[", "None", ",", ":", "]", "result", "=", "[", "]", "result", "=", "self", ".", "_threshold_batch", "(", "vectors", ",", "batch_size", ",", "threshold", ",", "show_progressbar", ",", "return_names", ")", "return", "list", "(", "result", ")" ]
Find the nearest neighbors to some arbitrary vector. This function is meant to be used in composition operations. The most_similar function can only handle items that are in vocab, and looks up their vector through a dictionary. Compositions, e.g. "King - man + woman" are necessarily not in the vocabulary. Parameters ---------- vectors : list of arrays or numpy array The vectors to find the nearest neighbors to. threshold : float, optional, default .5 The threshold within to retrieve items. batch_size : int, optional, default 100. The batch size to use. 100 is a good default option. Increasing the batch size may increase speed. show_progressbar : bool, optional, default False Whether to show a progressbar. return_names : bool, optional, default True Whether to return the item names, or just the distances. Returns ------- sim : list of tuples. For each item in the input the num most similar items are returned in the form of (NAME, DISTANCE) tuples. If return_names is set to false, only the distances are returned.
[ "Find", "the", "nearest", "neighbors", "to", "some", "arbitrary", "vector", "." ]
train
https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L461-L509
stephantul/reach
reach/reach.py
Reach._threshold_batch
def _threshold_batch(self, vectors, batch_size, threshold, show_progressbar, return_names): """Batched cosine distance.""" vectors = self.normalize(vectors) # Single transpose, makes things faster. reference_transposed = self.norm_vectors.T for i in tqdm(range(0, len(vectors), batch_size), disable=not show_progressbar): distances = vectors[i: i+batch_size].dot(reference_transposed) # For safety we clip distances = np.clip(distances, a_min=.0, a_max=1.0) for lidx, dists in enumerate(distances): indices = np.flatnonzero(dists >= threshold) sorted_indices = indices[np.argsort(-dists[indices])] if return_names: yield [(self.indices[d], dists[d]) for d in sorted_indices] else: yield list(dists[sorted_indices])
python
def _threshold_batch(self, vectors, batch_size, threshold, show_progressbar, return_names): """Batched cosine distance.""" vectors = self.normalize(vectors) # Single transpose, makes things faster. reference_transposed = self.norm_vectors.T for i in tqdm(range(0, len(vectors), batch_size), disable=not show_progressbar): distances = vectors[i: i+batch_size].dot(reference_transposed) # For safety we clip distances = np.clip(distances, a_min=.0, a_max=1.0) for lidx, dists in enumerate(distances): indices = np.flatnonzero(dists >= threshold) sorted_indices = indices[np.argsort(-dists[indices])] if return_names: yield [(self.indices[d], dists[d]) for d in sorted_indices] else: yield list(dists[sorted_indices])
[ "def", "_threshold_batch", "(", "self", ",", "vectors", ",", "batch_size", ",", "threshold", ",", "show_progressbar", ",", "return_names", ")", ":", "vectors", "=", "self", ".", "normalize", "(", "vectors", ")", "# Single transpose, makes things faster.", "reference_transposed", "=", "self", ".", "norm_vectors", ".", "T", "for", "i", "in", "tqdm", "(", "range", "(", "0", ",", "len", "(", "vectors", ")", ",", "batch_size", ")", ",", "disable", "=", "not", "show_progressbar", ")", ":", "distances", "=", "vectors", "[", "i", ":", "i", "+", "batch_size", "]", ".", "dot", "(", "reference_transposed", ")", "# For safety we clip", "distances", "=", "np", ".", "clip", "(", "distances", ",", "a_min", "=", ".0", ",", "a_max", "=", "1.0", ")", "for", "lidx", ",", "dists", "in", "enumerate", "(", "distances", ")", ":", "indices", "=", "np", ".", "flatnonzero", "(", "dists", ">=", "threshold", ")", "sorted_indices", "=", "indices", "[", "np", ".", "argsort", "(", "-", "dists", "[", "indices", "]", ")", "]", "if", "return_names", ":", "yield", "[", "(", "self", ".", "indices", "[", "d", "]", ",", "dists", "[", "d", "]", ")", "for", "d", "in", "sorted_indices", "]", "else", ":", "yield", "list", "(", "dists", "[", "sorted_indices", "]", ")" ]
Batched cosine distance.
[ "Batched", "cosine", "distance", "." ]
train
https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L511-L536
stephantul/reach
reach/reach.py
Reach._batch
def _batch(self, vectors, batch_size, num, show_progressbar, return_names): """Batched cosine distance.""" vectors = self.normalize(vectors) # Single transpose, makes things faster. reference_transposed = self.norm_vectors.T for i in tqdm(range(0, len(vectors), batch_size), disable=not show_progressbar): distances = vectors[i: i+batch_size].dot(reference_transposed) # For safety we clip distances = np.clip(distances, a_min=.0, a_max=1.0) if num == 1: sorted_indices = np.argmax(distances, 1)[:, None] else: sorted_indices = np.argpartition(-distances, kth=num, axis=1) sorted_indices = sorted_indices[:, :num] for lidx, indices in enumerate(sorted_indices): dists = distances[lidx, indices] if return_names: dindex = np.argsort(-dists) yield [(self.indices[indices[d]], dists[d]) for d in dindex] else: yield list(-1 * np.sort(-dists))
python
def _batch(self, vectors, batch_size, num, show_progressbar, return_names): """Batched cosine distance.""" vectors = self.normalize(vectors) # Single transpose, makes things faster. reference_transposed = self.norm_vectors.T for i in tqdm(range(0, len(vectors), batch_size), disable=not show_progressbar): distances = vectors[i: i+batch_size].dot(reference_transposed) # For safety we clip distances = np.clip(distances, a_min=.0, a_max=1.0) if num == 1: sorted_indices = np.argmax(distances, 1)[:, None] else: sorted_indices = np.argpartition(-distances, kth=num, axis=1) sorted_indices = sorted_indices[:, :num] for lidx, indices in enumerate(sorted_indices): dists = distances[lidx, indices] if return_names: dindex = np.argsort(-dists) yield [(self.indices[indices[d]], dists[d]) for d in dindex] else: yield list(-1 * np.sort(-dists))
[ "def", "_batch", "(", "self", ",", "vectors", ",", "batch_size", ",", "num", ",", "show_progressbar", ",", "return_names", ")", ":", "vectors", "=", "self", ".", "normalize", "(", "vectors", ")", "# Single transpose, makes things faster.", "reference_transposed", "=", "self", ".", "norm_vectors", ".", "T", "for", "i", "in", "tqdm", "(", "range", "(", "0", ",", "len", "(", "vectors", ")", ",", "batch_size", ")", ",", "disable", "=", "not", "show_progressbar", ")", ":", "distances", "=", "vectors", "[", "i", ":", "i", "+", "batch_size", "]", ".", "dot", "(", "reference_transposed", ")", "# For safety we clip", "distances", "=", "np", ".", "clip", "(", "distances", ",", "a_min", "=", ".0", ",", "a_max", "=", "1.0", ")", "if", "num", "==", "1", ":", "sorted_indices", "=", "np", ".", "argmax", "(", "distances", ",", "1", ")", "[", ":", ",", "None", "]", "else", ":", "sorted_indices", "=", "np", ".", "argpartition", "(", "-", "distances", ",", "kth", "=", "num", ",", "axis", "=", "1", ")", "sorted_indices", "=", "sorted_indices", "[", ":", ",", ":", "num", "]", "for", "lidx", ",", "indices", "in", "enumerate", "(", "sorted_indices", ")", ":", "dists", "=", "distances", "[", "lidx", ",", "indices", "]", "if", "return_names", ":", "dindex", "=", "np", ".", "argsort", "(", "-", "dists", ")", "yield", "[", "(", "self", ".", "indices", "[", "indices", "[", "d", "]", "]", ",", "dists", "[", "d", "]", ")", "for", "d", "in", "dindex", "]", "else", ":", "yield", "list", "(", "-", "1", "*", "np", ".", "sort", "(", "-", "dists", ")", ")" ]
Batched cosine distance.
[ "Batched", "cosine", "distance", "." ]
train
https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L538-L568
stephantul/reach
reach/reach.py
Reach.normalize
def normalize(vectors): """ Normalize a matrix of row vectors to unit length. Contains a shortcut if there are no zero vectors in the matrix. If there are zero vectors, we do some indexing tricks to avoid dividing by 0. Parameters ---------- vectors : np.array The vectors to normalize. Returns ------- vectors : np.array The input vectors, normalized to unit length. """ if np.ndim(vectors) == 1: norm = np.linalg.norm(vectors) if norm == 0: return np.zeros_like(vectors) return vectors / norm norm = np.linalg.norm(vectors, axis=1) if np.any(norm == 0): nonzero = norm > 0 result = np.zeros_like(vectors) n = norm[nonzero] p = vectors[nonzero] result[nonzero] = p / n[:, None] return result else: return vectors / norm[:, None]
python
def normalize(vectors): """ Normalize a matrix of row vectors to unit length. Contains a shortcut if there are no zero vectors in the matrix. If there are zero vectors, we do some indexing tricks to avoid dividing by 0. Parameters ---------- vectors : np.array The vectors to normalize. Returns ------- vectors : np.array The input vectors, normalized to unit length. """ if np.ndim(vectors) == 1: norm = np.linalg.norm(vectors) if norm == 0: return np.zeros_like(vectors) return vectors / norm norm = np.linalg.norm(vectors, axis=1) if np.any(norm == 0): nonzero = norm > 0 result = np.zeros_like(vectors) n = norm[nonzero] p = vectors[nonzero] result[nonzero] = p / n[:, None] return result else: return vectors / norm[:, None]
[ "def", "normalize", "(", "vectors", ")", ":", "if", "np", ".", "ndim", "(", "vectors", ")", "==", "1", ":", "norm", "=", "np", ".", "linalg", ".", "norm", "(", "vectors", ")", "if", "norm", "==", "0", ":", "return", "np", ".", "zeros_like", "(", "vectors", ")", "return", "vectors", "/", "norm", "norm", "=", "np", ".", "linalg", ".", "norm", "(", "vectors", ",", "axis", "=", "1", ")", "if", "np", ".", "any", "(", "norm", "==", "0", ")", ":", "nonzero", "=", "norm", ">", "0", "result", "=", "np", ".", "zeros_like", "(", "vectors", ")", "n", "=", "norm", "[", "nonzero", "]", "p", "=", "vectors", "[", "nonzero", "]", "result", "[", "nonzero", "]", "=", "p", "/", "n", "[", ":", ",", "None", "]", "return", "result", "else", ":", "return", "vectors", "/", "norm", "[", ":", ",", "None", "]" ]
Normalize a matrix of row vectors to unit length. Contains a shortcut if there are no zero vectors in the matrix. If there are zero vectors, we do some indexing tricks to avoid dividing by 0. Parameters ---------- vectors : np.array The vectors to normalize. Returns ------- vectors : np.array The input vectors, normalized to unit length.
[ "Normalize", "a", "matrix", "of", "row", "vectors", "to", "unit", "length", "." ]
train
https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L571-L610
stephantul/reach
reach/reach.py
Reach.vector_similarity
def vector_similarity(self, vector, items): """Compute the similarity between a vector and a set of items.""" vector = self.normalize(vector) items_vec = np.stack([self.norm_vectors[self.items[x]] for x in items]) return vector.dot(items_vec.T)
python
def vector_similarity(self, vector, items): """Compute the similarity between a vector and a set of items.""" vector = self.normalize(vector) items_vec = np.stack([self.norm_vectors[self.items[x]] for x in items]) return vector.dot(items_vec.T)
[ "def", "vector_similarity", "(", "self", ",", "vector", ",", "items", ")", ":", "vector", "=", "self", ".", "normalize", "(", "vector", ")", "items_vec", "=", "np", ".", "stack", "(", "[", "self", ".", "norm_vectors", "[", "self", ".", "items", "[", "x", "]", "]", "for", "x", "in", "items", "]", ")", "return", "vector", ".", "dot", "(", "items_vec", ".", "T", ")" ]
Compute the similarity between a vector and a set of items.
[ "Compute", "the", "similarity", "between", "a", "vector", "and", "a", "set", "of", "items", "." ]
train
https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L612-L616
stephantul/reach
reach/reach.py
Reach.similarity
def similarity(self, i1, i2): """ Compute the similarity between two sets of items. Parameters ---------- i1 : object The first set of items. i2 : object The second set of item. Returns ------- sim : array of floats An array of similarity scores between 1 and 0. """ try: if i1 in self.items: i1 = [i1] except TypeError: pass try: if i2 in self.items: i2 = [i2] except TypeError: pass i1_vec = np.stack([self.norm_vectors[self.items[x]] for x in i1]) i2_vec = np.stack([self.norm_vectors[self.items[x]] for x in i2]) return i1_vec.dot(i2_vec.T)
python
def similarity(self, i1, i2): """ Compute the similarity between two sets of items. Parameters ---------- i1 : object The first set of items. i2 : object The second set of item. Returns ------- sim : array of floats An array of similarity scores between 1 and 0. """ try: if i1 in self.items: i1 = [i1] except TypeError: pass try: if i2 in self.items: i2 = [i2] except TypeError: pass i1_vec = np.stack([self.norm_vectors[self.items[x]] for x in i1]) i2_vec = np.stack([self.norm_vectors[self.items[x]] for x in i2]) return i1_vec.dot(i2_vec.T)
[ "def", "similarity", "(", "self", ",", "i1", ",", "i2", ")", ":", "try", ":", "if", "i1", "in", "self", ".", "items", ":", "i1", "=", "[", "i1", "]", "except", "TypeError", ":", "pass", "try", ":", "if", "i2", "in", "self", ".", "items", ":", "i2", "=", "[", "i2", "]", "except", "TypeError", ":", "pass", "i1_vec", "=", "np", ".", "stack", "(", "[", "self", ".", "norm_vectors", "[", "self", ".", "items", "[", "x", "]", "]", "for", "x", "in", "i1", "]", ")", "i2_vec", "=", "np", ".", "stack", "(", "[", "self", ".", "norm_vectors", "[", "self", ".", "items", "[", "x", "]", "]", "for", "x", "in", "i2", "]", ")", "return", "i1_vec", ".", "dot", "(", "i2_vec", ".", "T", ")" ]
Compute the similarity between two sets of items. Parameters ---------- i1 : object The first set of items. i2 : object The second set of item. Returns ------- sim : array of floats An array of similarity scores between 1 and 0.
[ "Compute", "the", "similarity", "between", "two", "sets", "of", "items", "." ]
train
https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L618-L647
stephantul/reach
reach/reach.py
Reach.prune
def prune(self, wordlist): """ Prune the current reach instance by removing items. Parameters ---------- wordlist : list of str A list of words to keep. Note that this wordlist need not include all words in the Reach instance. Any words which are in the wordlist, but not in the reach instance are ignored. """ # Remove duplicates wordlist = set(wordlist).intersection(set(self.items.keys())) indices = [self.items[w] for w in wordlist if w in self.items] if self.unk_index is not None and self.unk_index not in indices: raise ValueError("Your unknown item is not in your list of items. " "Set it to None before pruning, or pass your " "unknown item.") self.vectors = self.vectors[indices] self.norm_vectors = self.norm_vectors[indices] self.items = {w: idx for idx, w in enumerate(wordlist)} self.indices = {v: k for k, v in self.items.items()} if self.unk_index is not None: self.unk_index = self.items[wordlist[self.unk_index]]
python
def prune(self, wordlist): """ Prune the current reach instance by removing items. Parameters ---------- wordlist : list of str A list of words to keep. Note that this wordlist need not include all words in the Reach instance. Any words which are in the wordlist, but not in the reach instance are ignored. """ # Remove duplicates wordlist = set(wordlist).intersection(set(self.items.keys())) indices = [self.items[w] for w in wordlist if w in self.items] if self.unk_index is not None and self.unk_index not in indices: raise ValueError("Your unknown item is not in your list of items. " "Set it to None before pruning, or pass your " "unknown item.") self.vectors = self.vectors[indices] self.norm_vectors = self.norm_vectors[indices] self.items = {w: idx for idx, w in enumerate(wordlist)} self.indices = {v: k for k, v in self.items.items()} if self.unk_index is not None: self.unk_index = self.items[wordlist[self.unk_index]]
[ "def", "prune", "(", "self", ",", "wordlist", ")", ":", "# Remove duplicates", "wordlist", "=", "set", "(", "wordlist", ")", ".", "intersection", "(", "set", "(", "self", ".", "items", ".", "keys", "(", ")", ")", ")", "indices", "=", "[", "self", ".", "items", "[", "w", "]", "for", "w", "in", "wordlist", "if", "w", "in", "self", ".", "items", "]", "if", "self", ".", "unk_index", "is", "not", "None", "and", "self", ".", "unk_index", "not", "in", "indices", ":", "raise", "ValueError", "(", "\"Your unknown item is not in your list of items. \"", "\"Set it to None before pruning, or pass your \"", "\"unknown item.\"", ")", "self", ".", "vectors", "=", "self", ".", "vectors", "[", "indices", "]", "self", ".", "norm_vectors", "=", "self", ".", "norm_vectors", "[", "indices", "]", "self", ".", "items", "=", "{", "w", ":", "idx", "for", "idx", ",", "w", "in", "enumerate", "(", "wordlist", ")", "}", "self", ".", "indices", "=", "{", "v", ":", "k", "for", "k", ",", "v", "in", "self", ".", "items", ".", "items", "(", ")", "}", "if", "self", ".", "unk_index", "is", "not", "None", ":", "self", ".", "unk_index", "=", "self", ".", "items", "[", "wordlist", "[", "self", ".", "unk_index", "]", "]" ]
Prune the current reach instance by removing items. Parameters ---------- wordlist : list of str A list of words to keep. Note that this wordlist need not include all words in the Reach instance. Any words which are in the wordlist, but not in the reach instance are ignored.
[ "Prune", "the", "current", "reach", "instance", "by", "removing", "items", "." ]
train
https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L649-L673
stephantul/reach
reach/reach.py
Reach.save
def save(self, path, write_header=True): """ Save the current vector space in word2vec format. Parameters ---------- path : str The path to save the vector file to. write_header : bool, optional, default True Whether to write a word2vec-style header as the first line of the file """ with open(path, 'w') as f: if write_header: f.write(u"{0} {1}\n".format(str(self.vectors.shape[0]), str(self.vectors.shape[1]))) for i in range(len(self.items)): w = self.indices[i] vec = self.vectors[i] f.write(u"{0} {1}\n".format(w, " ".join([str(x) for x in vec])))
python
def save(self, path, write_header=True): """ Save the current vector space in word2vec format. Parameters ---------- path : str The path to save the vector file to. write_header : bool, optional, default True Whether to write a word2vec-style header as the first line of the file """ with open(path, 'w') as f: if write_header: f.write(u"{0} {1}\n".format(str(self.vectors.shape[0]), str(self.vectors.shape[1]))) for i in range(len(self.items)): w = self.indices[i] vec = self.vectors[i] f.write(u"{0} {1}\n".format(w, " ".join([str(x) for x in vec])))
[ "def", "save", "(", "self", ",", "path", ",", "write_header", "=", "True", ")", ":", "with", "open", "(", "path", ",", "'w'", ")", "as", "f", ":", "if", "write_header", ":", "f", ".", "write", "(", "u\"{0} {1}\\n\"", ".", "format", "(", "str", "(", "self", ".", "vectors", ".", "shape", "[", "0", "]", ")", ",", "str", "(", "self", ".", "vectors", ".", "shape", "[", "1", "]", ")", ")", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "items", ")", ")", ":", "w", "=", "self", ".", "indices", "[", "i", "]", "vec", "=", "self", ".", "vectors", "[", "i", "]", "f", ".", "write", "(", "u\"{0} {1}\\n\"", ".", "format", "(", "w", ",", "\" \"", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "vec", "]", ")", ")", ")" ]
Save the current vector space in word2vec format. Parameters ---------- path : str The path to save the vector file to. write_header : bool, optional, default True Whether to write a word2vec-style header as the first line of the file
[ "Save", "the", "current", "vector", "space", "in", "word2vec", "format", "." ]
train
https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L675-L700
stephantul/reach
reach/reach.py
Reach.save_fast_format
def save_fast_format(self, filename): """ Save a reach instance in a fast format. The reach fast format stores the words and vectors of a Reach instance separately in a JSON and numpy format, respectively. Parameters ---------- filename : str The prefix to add to the saved filename. Note that this is not the real filename under which these items are stored. The words and unk_index are stored under "{filename}_words.json", and the numpy matrix is saved under "{filename}_vectors.npy". """ items, _ = zip(*sorted(self.items.items(), key=lambda x: x[1])) items = {"items": items, "unk_index": self.unk_index, "name": self.name} json.dump(items, open("{}_items.json".format(filename), 'w')) np.save(open("{}_vectors.npy".format(filename), 'wb'), self.vectors)
python
def save_fast_format(self, filename): """ Save a reach instance in a fast format. The reach fast format stores the words and vectors of a Reach instance separately in a JSON and numpy format, respectively. Parameters ---------- filename : str The prefix to add to the saved filename. Note that this is not the real filename under which these items are stored. The words and unk_index are stored under "{filename}_words.json", and the numpy matrix is saved under "{filename}_vectors.npy". """ items, _ = zip(*sorted(self.items.items(), key=lambda x: x[1])) items = {"items": items, "unk_index": self.unk_index, "name": self.name} json.dump(items, open("{}_items.json".format(filename), 'w')) np.save(open("{}_vectors.npy".format(filename), 'wb'), self.vectors)
[ "def", "save_fast_format", "(", "self", ",", "filename", ")", ":", "items", ",", "_", "=", "zip", "(", "*", "sorted", "(", "self", ".", "items", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ")", ")", "items", "=", "{", "\"items\"", ":", "items", ",", "\"unk_index\"", ":", "self", ".", "unk_index", ",", "\"name\"", ":", "self", ".", "name", "}", "json", ".", "dump", "(", "items", ",", "open", "(", "\"{}_items.json\"", ".", "format", "(", "filename", ")", ",", "'w'", ")", ")", "np", ".", "save", "(", "open", "(", "\"{}_vectors.npy\"", ".", "format", "(", "filename", ")", ",", "'wb'", ")", ",", "self", ".", "vectors", ")" ]
Save a reach instance in a fast format. The reach fast format stores the words and vectors of a Reach instance separately in a JSON and numpy format, respectively. Parameters ---------- filename : str The prefix to add to the saved filename. Note that this is not the real filename under which these items are stored. The words and unk_index are stored under "{filename}_words.json", and the numpy matrix is saved under "{filename}_vectors.npy".
[ "Save", "a", "reach", "instance", "in", "a", "fast", "format", "." ]
train
https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L702-L724
stephantul/reach
reach/reach.py
Reach.load_fast_format
def load_fast_format(filename): """ Load a reach instance in fast format. As described above, the fast format stores the words and vectors of the Reach instance separately, and is drastically faster than loading from .txt files. Parameters ---------- filename : str The filename prefix from which to load. Note that this is not a real filepath as such, but a shared prefix for both files. In order for this to work, both {filename}_words.json and {filename}_vectors.npy should be present. """ words, unk_index, name, vectors = Reach._load_fast(filename) return Reach(vectors, words, unk_index=unk_index, name=name)
python
def load_fast_format(filename): """ Load a reach instance in fast format. As described above, the fast format stores the words and vectors of the Reach instance separately, and is drastically faster than loading from .txt files. Parameters ---------- filename : str The filename prefix from which to load. Note that this is not a real filepath as such, but a shared prefix for both files. In order for this to work, both {filename}_words.json and {filename}_vectors.npy should be present. """ words, unk_index, name, vectors = Reach._load_fast(filename) return Reach(vectors, words, unk_index=unk_index, name=name)
[ "def", "load_fast_format", "(", "filename", ")", ":", "words", ",", "unk_index", ",", "name", ",", "vectors", "=", "Reach", ".", "_load_fast", "(", "filename", ")", "return", "Reach", "(", "vectors", ",", "words", ",", "unk_index", "=", "unk_index", ",", "name", "=", "name", ")" ]
Load a reach instance in fast format. As described above, the fast format stores the words and vectors of the Reach instance separately, and is drastically faster than loading from .txt files. Parameters ---------- filename : str The filename prefix from which to load. Note that this is not a real filepath as such, but a shared prefix for both files. In order for this to work, both {filename}_words.json and {filename}_vectors.npy should be present.
[ "Load", "a", "reach", "instance", "in", "fast", "format", "." ]
train
https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L727-L745
stephantul/reach
reach/reach.py
Reach._load_fast
def _load_fast(filename): """Sub for fast loader.""" it = json.load(open("{}_items.json".format(filename))) words, unk_index, name = it["items"], it["unk_index"], it["name"] vectors = np.load(open("{}_vectors.npy".format(filename), 'rb')) return words, unk_index, name, vectors
python
def _load_fast(filename): """Sub for fast loader.""" it = json.load(open("{}_items.json".format(filename))) words, unk_index, name = it["items"], it["unk_index"], it["name"] vectors = np.load(open("{}_vectors.npy".format(filename), 'rb')) return words, unk_index, name, vectors
[ "def", "_load_fast", "(", "filename", ")", ":", "it", "=", "json", ".", "load", "(", "open", "(", "\"{}_items.json\"", ".", "format", "(", "filename", ")", ")", ")", "words", ",", "unk_index", ",", "name", "=", "it", "[", "\"items\"", "]", ",", "it", "[", "\"unk_index\"", "]", ",", "it", "[", "\"name\"", "]", "vectors", "=", "np", ".", "load", "(", "open", "(", "\"{}_vectors.npy\"", ".", "format", "(", "filename", ")", ",", "'rb'", ")", ")", "return", "words", ",", "unk_index", ",", "name", ",", "vectors" ]
Sub for fast loader.
[ "Sub", "for", "fast", "loader", "." ]
train
https://github.com/stephantul/reach/blob/e5ed0cc895d17429e797c6d7dd57bce82ff00d5d/reach/reach.py#L748-L753
bjodah/finitediff
examples/sine.py
demo_usage
def demo_usage(n_data=50, n_fit=537, nhead=5, ntail=5, plot=False, alt=0): """ Plots a noisy sine curve and the fitting to it. Also presents the error and the error in the approximation of its first derivative (cosine curve) Usage example for benchmarking: $ time python sine.py --nhead 3 --ntail 3 --n-fit 500000 --n-data 50000 Usage example for plotting: $ python sine.py --nhead 1 --ntail 1 --plot """ x0, xend = 0, 5 # shaky linspace -5% to +5% noise x_data = (np.linspace(x0, xend, n_data) + np.random.rand(n_data)*(xend-x0)/n_data/1.5) y_data = np.sin(x_data) * (1.0+0.1*(np.random.rand(n_data)-0.5)) x_fit = np.linspace(x0, xend, n_fit) # Edges behave badly, work around: x_fit[0] = x_fit[0] + (x_fit[1]-x_fit[0])/2 x_fit[-1] = x_fit[-2]+(x_fit[-1]-x_fit[-2])/2 if alt: y_fit = np.empty(n_fit) dydx_fit = np.empty(n_fit) for i, xf in enumerate(x_fit): # get index j of first data point beyond xf j = np.where(x_data > xf)[0][0] lower_bound = max(0, j-alt) upper_bound = min(n_data-1, j+alt) y_fit[i] = derivatives_at_point_by_finite_diff( x_data[lower_bound:upper_bound], y_data[lower_bound:upper_bound], xf, 0) dydx_fit[i] = derivatives_at_point_by_finite_diff( x_data[lower_bound:upper_bound], y_data[lower_bound:upper_bound], xf, 1)[1] else: interp = interpolate_by_finite_diff(x_data, y_data, x_fit, 1, nhead, ntail) y_fit = interp[:, 0] dydx_fit = interp[:, 1] if plot: import matplotlib.pyplot as plt plt.subplot(221) plt.plot(x_data, y_data, 'x', label='Data points (sin)') plt.plot(x_fit, y_fit, '-', label='Fitted curve (order=0)') plt.plot(x_data, np.sin(x_data), '-', label='Analytic sin(x)') plt.legend() plt.subplot(222) plt.plot(x_fit, y_fit-np.sin(x_fit), label='Error in order=0') plt.legend() plt.subplot(223) plt.plot(x_fit, dydx_fit, '-', label='Fitted derivative (order=1)') plt.plot(x_data, np.cos(x_data), '-', label='Analytic cos(x)') plt.legend() plt.subplot(224) plt.plot(x_fit, dydx_fit-np.cos(x_fit), label='Error in order=1') plt.legend() plt.show()
python
def demo_usage(n_data=50, n_fit=537, nhead=5, ntail=5, plot=False, alt=0): """ Plots a noisy sine curve and the fitting to it. Also presents the error and the error in the approximation of its first derivative (cosine curve) Usage example for benchmarking: $ time python sine.py --nhead 3 --ntail 3 --n-fit 500000 --n-data 50000 Usage example for plotting: $ python sine.py --nhead 1 --ntail 1 --plot """ x0, xend = 0, 5 # shaky linspace -5% to +5% noise x_data = (np.linspace(x0, xend, n_data) + np.random.rand(n_data)*(xend-x0)/n_data/1.5) y_data = np.sin(x_data) * (1.0+0.1*(np.random.rand(n_data)-0.5)) x_fit = np.linspace(x0, xend, n_fit) # Edges behave badly, work around: x_fit[0] = x_fit[0] + (x_fit[1]-x_fit[0])/2 x_fit[-1] = x_fit[-2]+(x_fit[-1]-x_fit[-2])/2 if alt: y_fit = np.empty(n_fit) dydx_fit = np.empty(n_fit) for i, xf in enumerate(x_fit): # get index j of first data point beyond xf j = np.where(x_data > xf)[0][0] lower_bound = max(0, j-alt) upper_bound = min(n_data-1, j+alt) y_fit[i] = derivatives_at_point_by_finite_diff( x_data[lower_bound:upper_bound], y_data[lower_bound:upper_bound], xf, 0) dydx_fit[i] = derivatives_at_point_by_finite_diff( x_data[lower_bound:upper_bound], y_data[lower_bound:upper_bound], xf, 1)[1] else: interp = interpolate_by_finite_diff(x_data, y_data, x_fit, 1, nhead, ntail) y_fit = interp[:, 0] dydx_fit = interp[:, 1] if plot: import matplotlib.pyplot as plt plt.subplot(221) plt.plot(x_data, y_data, 'x', label='Data points (sin)') plt.plot(x_fit, y_fit, '-', label='Fitted curve (order=0)') plt.plot(x_data, np.sin(x_data), '-', label='Analytic sin(x)') plt.legend() plt.subplot(222) plt.plot(x_fit, y_fit-np.sin(x_fit), label='Error in order=0') plt.legend() plt.subplot(223) plt.plot(x_fit, dydx_fit, '-', label='Fitted derivative (order=1)') plt.plot(x_data, np.cos(x_data), '-', label='Analytic cos(x)') plt.legend() plt.subplot(224) plt.plot(x_fit, dydx_fit-np.cos(x_fit), label='Error in order=1') plt.legend() plt.show()
[ "def", "demo_usage", "(", "n_data", "=", "50", ",", "n_fit", "=", "537", ",", "nhead", "=", "5", ",", "ntail", "=", "5", ",", "plot", "=", "False", ",", "alt", "=", "0", ")", ":", "x0", ",", "xend", "=", "0", ",", "5", "# shaky linspace -5% to +5% noise", "x_data", "=", "(", "np", ".", "linspace", "(", "x0", ",", "xend", ",", "n_data", ")", "+", "np", ".", "random", ".", "rand", "(", "n_data", ")", "*", "(", "xend", "-", "x0", ")", "/", "n_data", "/", "1.5", ")", "y_data", "=", "np", ".", "sin", "(", "x_data", ")", "*", "(", "1.0", "+", "0.1", "*", "(", "np", ".", "random", ".", "rand", "(", "n_data", ")", "-", "0.5", ")", ")", "x_fit", "=", "np", ".", "linspace", "(", "x0", ",", "xend", ",", "n_fit", ")", "# Edges behave badly, work around:", "x_fit", "[", "0", "]", "=", "x_fit", "[", "0", "]", "+", "(", "x_fit", "[", "1", "]", "-", "x_fit", "[", "0", "]", ")", "/", "2", "x_fit", "[", "-", "1", "]", "=", "x_fit", "[", "-", "2", "]", "+", "(", "x_fit", "[", "-", "1", "]", "-", "x_fit", "[", "-", "2", "]", ")", "/", "2", "if", "alt", ":", "y_fit", "=", "np", ".", "empty", "(", "n_fit", ")", "dydx_fit", "=", "np", ".", "empty", "(", "n_fit", ")", "for", "i", ",", "xf", "in", "enumerate", "(", "x_fit", ")", ":", "# get index j of first data point beyond xf", "j", "=", "np", ".", "where", "(", "x_data", ">", "xf", ")", "[", "0", "]", "[", "0", "]", "lower_bound", "=", "max", "(", "0", ",", "j", "-", "alt", ")", "upper_bound", "=", "min", "(", "n_data", "-", "1", ",", "j", "+", "alt", ")", "y_fit", "[", "i", "]", "=", "derivatives_at_point_by_finite_diff", "(", "x_data", "[", "lower_bound", ":", "upper_bound", "]", ",", "y_data", "[", "lower_bound", ":", "upper_bound", "]", ",", "xf", ",", "0", ")", "dydx_fit", "[", "i", "]", "=", "derivatives_at_point_by_finite_diff", "(", "x_data", "[", "lower_bound", ":", "upper_bound", "]", ",", "y_data", "[", "lower_bound", ":", "upper_bound", "]", ",", "xf", ",", "1", ")", "[", "1", "]", "else", ":", "interp", "=", "interpolate_by_finite_diff", "(", "x_data", ",", "y_data", ",", "x_fit", ",", "1", ",", "nhead", ",", "ntail", ")", "y_fit", "=", "interp", "[", ":", ",", "0", "]", "dydx_fit", "=", "interp", "[", ":", ",", "1", "]", "if", "plot", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "plt", ".", "subplot", "(", "221", ")", "plt", ".", "plot", "(", "x_data", ",", "y_data", ",", "'x'", ",", "label", "=", "'Data points (sin)'", ")", "plt", ".", "plot", "(", "x_fit", ",", "y_fit", ",", "'-'", ",", "label", "=", "'Fitted curve (order=0)'", ")", "plt", ".", "plot", "(", "x_data", ",", "np", ".", "sin", "(", "x_data", ")", ",", "'-'", ",", "label", "=", "'Analytic sin(x)'", ")", "plt", ".", "legend", "(", ")", "plt", ".", "subplot", "(", "222", ")", "plt", ".", "plot", "(", "x_fit", ",", "y_fit", "-", "np", ".", "sin", "(", "x_fit", ")", ",", "label", "=", "'Error in order=0'", ")", "plt", ".", "legend", "(", ")", "plt", ".", "subplot", "(", "223", ")", "plt", ".", "plot", "(", "x_fit", ",", "dydx_fit", ",", "'-'", ",", "label", "=", "'Fitted derivative (order=1)'", ")", "plt", ".", "plot", "(", "x_data", ",", "np", ".", "cos", "(", "x_data", ")", ",", "'-'", ",", "label", "=", "'Analytic cos(x)'", ")", "plt", ".", "legend", "(", ")", "plt", ".", "subplot", "(", "224", ")", "plt", ".", "plot", "(", "x_fit", ",", "dydx_fit", "-", "np", ".", "cos", "(", "x_fit", ")", ",", "label", "=", "'Error in order=1'", ")", "plt", ".", "legend", "(", ")", "plt", ".", "show", "(", ")" ]
Plots a noisy sine curve and the fitting to it. Also presents the error and the error in the approximation of its first derivative (cosine curve) Usage example for benchmarking: $ time python sine.py --nhead 3 --ntail 3 --n-fit 500000 --n-data 50000 Usage example for plotting: $ python sine.py --nhead 1 --ntail 1 --plot
[ "Plots", "a", "noisy", "sine", "curve", "and", "the", "fitting", "to", "it", ".", "Also", "presents", "the", "error", "and", "the", "error", "in", "the", "approximation", "of", "its", "first", "derivative", "(", "cosine", "curve", ")" ]
train
https://github.com/bjodah/finitediff/blob/c1b1c6840512d2206e2f97315d9bf1738c1ca3d3/examples/sine.py#L13-L83
bjodah/finitediff
examples/err.py
demo_err
def demo_err(): """ This demo shows how the error in the estimate varies depending on how many data points are included in the interpolation (m parameter in this function). """ max_order = 7 n = 20 l = 0.25 fmt1 = '{0: <5s}\t{1: <21s}\t{2: >21s}\t{3: >21s}\t{4: >21s}' fmt2 = '{0: <5d}\t{1:20.18f}\t{2: >21.18f}\t{3: >21.18f}\t{4: >21.18f}' x = np.cumsum(np.random.rand(n)*l) x = np.concatenate((x[::-1]*-1, x)) lst = [] derivs = np.zeros(n) for order in range(max_order+1): print('Order', order) for m in range(1+order//2, n+1): sub_x = x[n-m:n+m] derivs[m-1] = derivatives_at_point_by_finite_diff( sub_x, np.exp(sub_x), 0, order)[order] print(fmt1.format('m', 'val', 'diff', 'analytical error', 'diff/analytical')) for m in range(1, n): print(fmt2.format( (m+1)*2, derivs[m], derivs[m]-derivs[m-1], derivs[m]-1, (derivs[m]-derivs[m-1])/(derivs[m]-1))) lst.append((derivs[-1], abs(derivs[-1]-derivs[-2]))) print(np.array(lst))
python
def demo_err(): """ This demo shows how the error in the estimate varies depending on how many data points are included in the interpolation (m parameter in this function). """ max_order = 7 n = 20 l = 0.25 fmt1 = '{0: <5s}\t{1: <21s}\t{2: >21s}\t{3: >21s}\t{4: >21s}' fmt2 = '{0: <5d}\t{1:20.18f}\t{2: >21.18f}\t{3: >21.18f}\t{4: >21.18f}' x = np.cumsum(np.random.rand(n)*l) x = np.concatenate((x[::-1]*-1, x)) lst = [] derivs = np.zeros(n) for order in range(max_order+1): print('Order', order) for m in range(1+order//2, n+1): sub_x = x[n-m:n+m] derivs[m-1] = derivatives_at_point_by_finite_diff( sub_x, np.exp(sub_x), 0, order)[order] print(fmt1.format('m', 'val', 'diff', 'analytical error', 'diff/analytical')) for m in range(1, n): print(fmt2.format( (m+1)*2, derivs[m], derivs[m]-derivs[m-1], derivs[m]-1, (derivs[m]-derivs[m-1])/(derivs[m]-1))) lst.append((derivs[-1], abs(derivs[-1]-derivs[-2]))) print(np.array(lst))
[ "def", "demo_err", "(", ")", ":", "max_order", "=", "7", "n", "=", "20", "l", "=", "0.25", "fmt1", "=", "'{0: <5s}\\t{1: <21s}\\t{2: >21s}\\t{3: >21s}\\t{4: >21s}'", "fmt2", "=", "'{0: <5d}\\t{1:20.18f}\\t{2: >21.18f}\\t{3: >21.18f}\\t{4: >21.18f}'", "x", "=", "np", ".", "cumsum", "(", "np", ".", "random", ".", "rand", "(", "n", ")", "*", "l", ")", "x", "=", "np", ".", "concatenate", "(", "(", "x", "[", ":", ":", "-", "1", "]", "*", "-", "1", ",", "x", ")", ")", "lst", "=", "[", "]", "derivs", "=", "np", ".", "zeros", "(", "n", ")", "for", "order", "in", "range", "(", "max_order", "+", "1", ")", ":", "print", "(", "'Order'", ",", "order", ")", "for", "m", "in", "range", "(", "1", "+", "order", "//", "2", ",", "n", "+", "1", ")", ":", "sub_x", "=", "x", "[", "n", "-", "m", ":", "n", "+", "m", "]", "derivs", "[", "m", "-", "1", "]", "=", "derivatives_at_point_by_finite_diff", "(", "sub_x", ",", "np", ".", "exp", "(", "sub_x", ")", ",", "0", ",", "order", ")", "[", "order", "]", "print", "(", "fmt1", ".", "format", "(", "'m'", ",", "'val'", ",", "'diff'", ",", "'analytical error'", ",", "'diff/analytical'", ")", ")", "for", "m", "in", "range", "(", "1", ",", "n", ")", ":", "print", "(", "fmt2", ".", "format", "(", "(", "m", "+", "1", ")", "*", "2", ",", "derivs", "[", "m", "]", ",", "derivs", "[", "m", "]", "-", "derivs", "[", "m", "-", "1", "]", ",", "derivs", "[", "m", "]", "-", "1", ",", "(", "derivs", "[", "m", "]", "-", "derivs", "[", "m", "-", "1", "]", ")", "/", "(", "derivs", "[", "m", "]", "-", "1", ")", ")", ")", "lst", ".", "append", "(", "(", "derivs", "[", "-", "1", "]", ",", "abs", "(", "derivs", "[", "-", "1", "]", "-", "derivs", "[", "-", "2", "]", ")", ")", ")", "print", "(", "np", ".", "array", "(", "lst", ")", ")" ]
This demo shows how the error in the estimate varies depending on how many data points are included in the interpolation (m parameter in this function).
[ "This", "demo", "shows", "how", "the", "error", "in", "the", "estimate", "varies", "depending", "on", "how", "many", "data", "points", "are", "included", "in", "the", "interpolation", "(", "m", "parameter", "in", "this", "function", ")", "." ]
train
https://github.com/bjodah/finitediff/blob/c1b1c6840512d2206e2f97315d9bf1738c1ca3d3/examples/err.py#L9-L37
LABHR/octohatrack
octohatrack/helpers.py
display_results
def display_results(repo_name, contributors, api_len): """ Fancy display. """ print("\n") print("All Contributors:") # Sort and consolidate on Name seen = [] for user in sorted(contributors, key=_sort_by_name): if user.get("name"): key = user["name"] else: key = user["user_name"] if key not in seen: seen.append(key) if key != user["user_name"]: print("%s (%s)" % (user["name"], user["user_name"])) else: print(user["user_name"]) print("") print("Repo: %s" % repo_name) print("GitHub Contributors: %s" % api_len) print("All Contributors: %s 👏" % len(seen))
python
def display_results(repo_name, contributors, api_len): """ Fancy display. """ print("\n") print("All Contributors:") # Sort and consolidate on Name seen = [] for user in sorted(contributors, key=_sort_by_name): if user.get("name"): key = user["name"] else: key = user["user_name"] if key not in seen: seen.append(key) if key != user["user_name"]: print("%s (%s)" % (user["name"], user["user_name"])) else: print(user["user_name"]) print("") print("Repo: %s" % repo_name) print("GitHub Contributors: %s" % api_len) print("All Contributors: %s 👏" % len(seen))
[ "def", "display_results", "(", "repo_name", ",", "contributors", ",", "api_len", ")", ":", "print", "(", "\"\\n\"", ")", "print", "(", "\"All Contributors:\"", ")", "# Sort and consolidate on Name", "seen", "=", "[", "]", "for", "user", "in", "sorted", "(", "contributors", ",", "key", "=", "_sort_by_name", ")", ":", "if", "user", ".", "get", "(", "\"name\"", ")", ":", "key", "=", "user", "[", "\"name\"", "]", "else", ":", "key", "=", "user", "[", "\"user_name\"", "]", "if", "key", "not", "in", "seen", ":", "seen", ".", "append", "(", "key", ")", "if", "key", "!=", "user", "[", "\"user_name\"", "]", ":", "print", "(", "\"%s (%s)\"", "%", "(", "user", "[", "\"name\"", "]", ",", "user", "[", "\"user_name\"", "]", ")", ")", "else", ":", "print", "(", "user", "[", "\"user_name\"", "]", ")", "print", "(", "\"\"", ")", "print", "(", "\"Repo: %s\"", "%", "repo_name", ")", "print", "(", "\"GitHub Contributors: %s\"", "%", "api_len", ")", "print", "(", "\"All Contributors: %s 👏\" % ", "e", "(se", "e", "n))", "", "" ]
Fancy display.
[ "Fancy", "display", "." ]
train
https://github.com/LABHR/octohatrack/blob/bf855a0190518a3b2c45304cbbac00e22086b6da/octohatrack/helpers.py#L13-L39
LABHR/octohatrack
octohatrack/api_helpers.py
get_json
def get_json(uri): """ Handle headers and json for us :3 """ response = requests.get(API + uri, headers=HEADERS) limit = int(response.headers.get("x-ratelimit-remaining")) if limit == 0: sys.stdout.write("\n") message = "You have run out of GitHub request tokens. " if int(response.headers.get("x-ratelimit-limit")) == 60: message += "Set a GITHUB_TOKEN to increase your limit to 5000/hour. " wait_seconds = int(response.headers.get("x-ratelimit-reset")) - int(time.time()) wait_minutes = math.ceil(wait_seconds / 60) message += "Try again in ~%d minutes. " % wait_minutes if "--wait-for-reset" in sys.argv: progress_message(message.replace("Try ", "Trying ")) time.sleep(wait_seconds + 1) progress_message("Resuming") return get_json(uri) else: raise ValueError(message) progress() return response.json()
python
def get_json(uri): """ Handle headers and json for us :3 """ response = requests.get(API + uri, headers=HEADERS) limit = int(response.headers.get("x-ratelimit-remaining")) if limit == 0: sys.stdout.write("\n") message = "You have run out of GitHub request tokens. " if int(response.headers.get("x-ratelimit-limit")) == 60: message += "Set a GITHUB_TOKEN to increase your limit to 5000/hour. " wait_seconds = int(response.headers.get("x-ratelimit-reset")) - int(time.time()) wait_minutes = math.ceil(wait_seconds / 60) message += "Try again in ~%d minutes. " % wait_minutes if "--wait-for-reset" in sys.argv: progress_message(message.replace("Try ", "Trying ")) time.sleep(wait_seconds + 1) progress_message("Resuming") return get_json(uri) else: raise ValueError(message) progress() return response.json()
[ "def", "get_json", "(", "uri", ")", ":", "response", "=", "requests", ".", "get", "(", "API", "+", "uri", ",", "headers", "=", "HEADERS", ")", "limit", "=", "int", "(", "response", ".", "headers", ".", "get", "(", "\"x-ratelimit-remaining\"", ")", ")", "if", "limit", "==", "0", ":", "sys", ".", "stdout", ".", "write", "(", "\"\\n\"", ")", "message", "=", "\"You have run out of GitHub request tokens. \"", "if", "int", "(", "response", ".", "headers", ".", "get", "(", "\"x-ratelimit-limit\"", ")", ")", "==", "60", ":", "message", "+=", "\"Set a GITHUB_TOKEN to increase your limit to 5000/hour. \"", "wait_seconds", "=", "int", "(", "response", ".", "headers", ".", "get", "(", "\"x-ratelimit-reset\"", ")", ")", "-", "int", "(", "time", ".", "time", "(", ")", ")", "wait_minutes", "=", "math", ".", "ceil", "(", "wait_seconds", "/", "60", ")", "message", "+=", "\"Try again in ~%d minutes. \"", "%", "wait_minutes", "if", "\"--wait-for-reset\"", "in", "sys", ".", "argv", ":", "progress_message", "(", "message", ".", "replace", "(", "\"Try \"", ",", "\"Trying \"", ")", ")", "time", ".", "sleep", "(", "wait_seconds", "+", "1", ")", "progress_message", "(", "\"Resuming\"", ")", "return", "get_json", "(", "uri", ")", "else", ":", "raise", "ValueError", "(", "message", ")", "progress", "(", ")", "return", "response", ".", "json", "(", ")" ]
Handle headers and json for us :3
[ "Handle", "headers", "and", "json", "for", "us", ":", "3" ]
train
https://github.com/LABHR/octohatrack/blob/bf855a0190518a3b2c45304cbbac00e22086b6da/octohatrack/api_helpers.py#L24-L51
LABHR/octohatrack
octohatrack/api_helpers.py
api_walk
def api_walk(uri, per_page=100, key="login"): """ For a GitHub URI, walk all the pages until there's no more content """ page = 1 result = [] while True: response = get_json(uri + "?page=%d&per_page=%d" % (page, per_page)) if len(response) == 0: break else: page += 1 for r in response: if key == USER_LOGIN: result.append(user_login(r)) else: result.append(r[key]) return list(set(result))
python
def api_walk(uri, per_page=100, key="login"): """ For a GitHub URI, walk all the pages until there's no more content """ page = 1 result = [] while True: response = get_json(uri + "?page=%d&per_page=%d" % (page, per_page)) if len(response) == 0: break else: page += 1 for r in response: if key == USER_LOGIN: result.append(user_login(r)) else: result.append(r[key]) return list(set(result))
[ "def", "api_walk", "(", "uri", ",", "per_page", "=", "100", ",", "key", "=", "\"login\"", ")", ":", "page", "=", "1", "result", "=", "[", "]", "while", "True", ":", "response", "=", "get_json", "(", "uri", "+", "\"?page=%d&per_page=%d\"", "%", "(", "page", ",", "per_page", ")", ")", "if", "len", "(", "response", ")", "==", "0", ":", "break", "else", ":", "page", "+=", "1", "for", "r", "in", "response", ":", "if", "key", "==", "USER_LOGIN", ":", "result", ".", "append", "(", "user_login", "(", "r", ")", ")", "else", ":", "result", ".", "append", "(", "r", "[", "key", "]", ")", "return", "list", "(", "set", "(", "result", ")", ")" ]
For a GitHub URI, walk all the pages until there's no more content
[ "For", "a", "GitHub", "URI", "walk", "all", "the", "pages", "until", "there", "s", "no", "more", "content" ]
train
https://github.com/LABHR/octohatrack/blob/bf855a0190518a3b2c45304cbbac00e22086b6da/octohatrack/api_helpers.py#L54-L73
LABHR/octohatrack
octohatrack/api_helpers.py
api_get
def api_get(uri, key=None): """ Simple API endpoint get, return only the keys we care about """ response = get_json(uri) if response: if type(response) == list: r = response[0] elif type(response) == dict: r = response if type(r) == dict: # Special nested value we care about if key == USER_LOGIN: return user_login(r) if key in r: return r[key]
python
def api_get(uri, key=None): """ Simple API endpoint get, return only the keys we care about """ response = get_json(uri) if response: if type(response) == list: r = response[0] elif type(response) == dict: r = response if type(r) == dict: # Special nested value we care about if key == USER_LOGIN: return user_login(r) if key in r: return r[key]
[ "def", "api_get", "(", "uri", ",", "key", "=", "None", ")", ":", "response", "=", "get_json", "(", "uri", ")", "if", "response", ":", "if", "type", "(", "response", ")", "==", "list", ":", "r", "=", "response", "[", "0", "]", "elif", "type", "(", "response", ")", "==", "dict", ":", "r", "=", "response", "if", "type", "(", "r", ")", "==", "dict", ":", "# Special nested value we care about", "if", "key", "==", "USER_LOGIN", ":", "return", "user_login", "(", "r", ")", "if", "key", "in", "r", ":", "return", "r", "[", "key", "]" ]
Simple API endpoint get, return only the keys we care about
[ "Simple", "API", "endpoint", "get", "return", "only", "the", "keys", "we", "care", "about" ]
train
https://github.com/LABHR/octohatrack/blob/bf855a0190518a3b2c45304cbbac00e22086b6da/octohatrack/api_helpers.py#L86-L103
LABHR/octohatrack
octohatrack_graphql.py
reducejson
def reducejson(j): """ Not sure if there's a better way to walk the ... interesting result """ authors = [] for key in j["data"]["repository"]["commitComments"]["edges"]: authors.append(key["node"]["author"]) for key in j["data"]["repository"]["issues"]["nodes"]: authors.append(key["author"]) for c in key["comments"]["nodes"]: authors.append(c["author"]) for key in j["data"]["repository"]["pullRequests"]["edges"]: authors.append(key["node"]["author"]) for c in key["node"]["comments"]["nodes"]: authors.append(c["author"]) unique = list({v['login']:v for v in authors if v is not None}.values()) return unique
python
def reducejson(j): """ Not sure if there's a better way to walk the ... interesting result """ authors = [] for key in j["data"]["repository"]["commitComments"]["edges"]: authors.append(key["node"]["author"]) for key in j["data"]["repository"]["issues"]["nodes"]: authors.append(key["author"]) for c in key["comments"]["nodes"]: authors.append(c["author"]) for key in j["data"]["repository"]["pullRequests"]["edges"]: authors.append(key["node"]["author"]) for c in key["node"]["comments"]["nodes"]: authors.append(c["author"]) unique = list({v['login']:v for v in authors if v is not None}.values()) return unique
[ "def", "reducejson", "(", "j", ")", ":", "authors", "=", "[", "]", "for", "key", "in", "j", "[", "\"data\"", "]", "[", "\"repository\"", "]", "[", "\"commitComments\"", "]", "[", "\"edges\"", "]", ":", "authors", ".", "append", "(", "key", "[", "\"node\"", "]", "[", "\"author\"", "]", ")", "for", "key", "in", "j", "[", "\"data\"", "]", "[", "\"repository\"", "]", "[", "\"issues\"", "]", "[", "\"nodes\"", "]", ":", "authors", ".", "append", "(", "key", "[", "\"author\"", "]", ")", "for", "c", "in", "key", "[", "\"comments\"", "]", "[", "\"nodes\"", "]", ":", "authors", ".", "append", "(", "c", "[", "\"author\"", "]", ")", "for", "key", "in", "j", "[", "\"data\"", "]", "[", "\"repository\"", "]", "[", "\"pullRequests\"", "]", "[", "\"edges\"", "]", ":", "authors", ".", "append", "(", "key", "[", "\"node\"", "]", "[", "\"author\"", "]", ")", "for", "c", "in", "key", "[", "\"node\"", "]", "[", "\"comments\"", "]", "[", "\"nodes\"", "]", ":", "authors", ".", "append", "(", "c", "[", "\"author\"", "]", ")", "unique", "=", "list", "(", "{", "v", "[", "'login'", "]", ":", "v", "for", "v", "in", "authors", "if", "v", "is", "not", "None", "}", ".", "values", "(", ")", ")", "return", "unique" ]
Not sure if there's a better way to walk the ... interesting result
[ "Not", "sure", "if", "there", "s", "a", "better", "way", "to", "walk", "the", "...", "interesting", "result" ]
train
https://github.com/LABHR/octohatrack/blob/bf855a0190518a3b2c45304cbbac00e22086b6da/octohatrack_graphql.py#L52-L73
oconnor663/duct.py
duct.py
stringify_with_dot_if_path
def stringify_with_dot_if_path(x): '''Pathlib never renders a leading './' in front of a local path. That's an issue because on POSIX subprocess.py (like bash) won't execute scripts in the current directory without it. In the same vein, we also don't want Path('echo') to match '/usr/bin/echo' from the $PATH. To work around both issues, we explicitly join a leading dot to any relative pathlib path.''' if isinstance(x, PurePath): # Note that join does nothing if the path is absolute. return os.path.join('.', str(x)) return x
python
def stringify_with_dot_if_path(x): '''Pathlib never renders a leading './' in front of a local path. That's an issue because on POSIX subprocess.py (like bash) won't execute scripts in the current directory without it. In the same vein, we also don't want Path('echo') to match '/usr/bin/echo' from the $PATH. To work around both issues, we explicitly join a leading dot to any relative pathlib path.''' if isinstance(x, PurePath): # Note that join does nothing if the path is absolute. return os.path.join('.', str(x)) return x
[ "def", "stringify_with_dot_if_path", "(", "x", ")", ":", "if", "isinstance", "(", "x", ",", "PurePath", ")", ":", "# Note that join does nothing if the path is absolute.", "return", "os", ".", "path", ".", "join", "(", "'.'", ",", "str", "(", "x", ")", ")", "return", "x" ]
Pathlib never renders a leading './' in front of a local path. That's an issue because on POSIX subprocess.py (like bash) won't execute scripts in the current directory without it. In the same vein, we also don't want Path('echo') to match '/usr/bin/echo' from the $PATH. To work around both issues, we explicitly join a leading dot to any relative pathlib path.
[ "Pathlib", "never", "renders", "a", "leading", ".", "/", "in", "front", "of", "a", "local", "path", ".", "That", "s", "an", "issue", "because", "on", "POSIX", "subprocess", ".", "py", "(", "like", "bash", ")", "won", "t", "execute", "scripts", "in", "the", "current", "directory", "without", "it", ".", "In", "the", "same", "vein", "we", "also", "don", "t", "want", "Path", "(", "echo", ")", "to", "match", "/", "usr", "/", "bin", "/", "echo", "from", "the", "$PATH", ".", "To", "work", "around", "both", "issues", "we", "explicitly", "join", "a", "leading", "dot", "to", "any", "relative", "pathlib", "path", "." ]
train
https://github.com/oconnor663/duct.py/blob/f10f1e9093a2913281294bb89a6e1744aa700e73/duct.py#L616-L625
oconnor663/duct.py
duct.py
maybe_canonicalize_exe_path
def maybe_canonicalize_exe_path(exe_name, iocontext): '''There's a tricky interaction between exe paths and `dir`. Exe paths can be relative, and so we have to ask: Is an exe path interpreted relative to the parent's cwd, or the child's? The answer is that it's platform dependent! >.< (Windows uses the parent's cwd, but because of the fork-chdir-exec pattern, Unix usually uses the child's.) We want to use the parent's cwd consistently, because that saves the caller from having to worry about whether `dir` will have side effects, and because it's easy for the caller to use path.join if they want to. That means that when `dir` is in use, we need to detect exe names that are relative paths, and absolutify them. We want to do that as little as possible though, both because canonicalization can fail, and because we prefer to let the caller control the child's argv[0]. We never want to absolutify a name like "emacs", because that's probably a program in the PATH rather than a local file. So we look for slashes in the name to determine what's a filepath and what isn't. Note that anything given as a Path will always have a slash by the time we get here, because stringify_with_dot_if_path prepends a ./ to them when they're relative. This leaves the case where Windows users might pass a local file like "foo.bat" as a string, which we can't distinguish from a global program name. However, because the Windows has the preferred "relative to parent's cwd" behavior already, this case actually works without our help. (The thing Windows users have to watch out for instead is local files shadowing global program names, which I don't think we can or should prevent.)''' has_sep = (os.path.sep in exe_name or (os.path.altsep is not None and os.path.altsep in exe_name)) if has_sep and iocontext.dir is not None and not os.path.isabs(exe_name): return os.path.realpath(exe_name) else: return exe_name
python
def maybe_canonicalize_exe_path(exe_name, iocontext): '''There's a tricky interaction between exe paths and `dir`. Exe paths can be relative, and so we have to ask: Is an exe path interpreted relative to the parent's cwd, or the child's? The answer is that it's platform dependent! >.< (Windows uses the parent's cwd, but because of the fork-chdir-exec pattern, Unix usually uses the child's.) We want to use the parent's cwd consistently, because that saves the caller from having to worry about whether `dir` will have side effects, and because it's easy for the caller to use path.join if they want to. That means that when `dir` is in use, we need to detect exe names that are relative paths, and absolutify them. We want to do that as little as possible though, both because canonicalization can fail, and because we prefer to let the caller control the child's argv[0]. We never want to absolutify a name like "emacs", because that's probably a program in the PATH rather than a local file. So we look for slashes in the name to determine what's a filepath and what isn't. Note that anything given as a Path will always have a slash by the time we get here, because stringify_with_dot_if_path prepends a ./ to them when they're relative. This leaves the case where Windows users might pass a local file like "foo.bat" as a string, which we can't distinguish from a global program name. However, because the Windows has the preferred "relative to parent's cwd" behavior already, this case actually works without our help. (The thing Windows users have to watch out for instead is local files shadowing global program names, which I don't think we can or should prevent.)''' has_sep = (os.path.sep in exe_name or (os.path.altsep is not None and os.path.altsep in exe_name)) if has_sep and iocontext.dir is not None and not os.path.isabs(exe_name): return os.path.realpath(exe_name) else: return exe_name
[ "def", "maybe_canonicalize_exe_path", "(", "exe_name", ",", "iocontext", ")", ":", "has_sep", "=", "(", "os", ".", "path", ".", "sep", "in", "exe_name", "or", "(", "os", ".", "path", ".", "altsep", "is", "not", "None", "and", "os", ".", "path", ".", "altsep", "in", "exe_name", ")", ")", "if", "has_sep", "and", "iocontext", ".", "dir", "is", "not", "None", "and", "not", "os", ".", "path", ".", "isabs", "(", "exe_name", ")", ":", "return", "os", ".", "path", ".", "realpath", "(", "exe_name", ")", "else", ":", "return", "exe_name" ]
There's a tricky interaction between exe paths and `dir`. Exe paths can be relative, and so we have to ask: Is an exe path interpreted relative to the parent's cwd, or the child's? The answer is that it's platform dependent! >.< (Windows uses the parent's cwd, but because of the fork-chdir-exec pattern, Unix usually uses the child's.) We want to use the parent's cwd consistently, because that saves the caller from having to worry about whether `dir` will have side effects, and because it's easy for the caller to use path.join if they want to. That means that when `dir` is in use, we need to detect exe names that are relative paths, and absolutify them. We want to do that as little as possible though, both because canonicalization can fail, and because we prefer to let the caller control the child's argv[0]. We never want to absolutify a name like "emacs", because that's probably a program in the PATH rather than a local file. So we look for slashes in the name to determine what's a filepath and what isn't. Note that anything given as a Path will always have a slash by the time we get here, because stringify_with_dot_if_path prepends a ./ to them when they're relative. This leaves the case where Windows users might pass a local file like "foo.bat" as a string, which we can't distinguish from a global program name. However, because the Windows has the preferred "relative to parent's cwd" behavior already, this case actually works without our help. (The thing Windows users have to watch out for instead is local files shadowing global program names, which I don't think we can or should prevent.)
[ "There", "s", "a", "tricky", "interaction", "between", "exe", "paths", "and", "dir", ".", "Exe", "paths", "can", "be", "relative", "and", "so", "we", "have", "to", "ask", ":", "Is", "an", "exe", "path", "interpreted", "relative", "to", "the", "parent", "s", "cwd", "or", "the", "child", "s?", "The", "answer", "is", "that", "it", "s", "platform", "dependent!", ">", ".", "<", "(", "Windows", "uses", "the", "parent", "s", "cwd", "but", "because", "of", "the", "fork", "-", "chdir", "-", "exec", "pattern", "Unix", "usually", "uses", "the", "child", "s", ".", ")" ]
train
https://github.com/oconnor663/duct.py/blob/f10f1e9093a2913281294bb89a6e1744aa700e73/duct.py#L661-L694
oconnor663/duct.py
duct.py
safe_popen
def safe_popen(*args, **kwargs): '''This wrapper works around two major deadlock issues to do with pipes. The first is that, before Python 3.2 on POSIX systems, os.pipe() creates inheritable file descriptors, which leak to all child processes and prevent reads from reaching EOF. The workaround for this is to set close_fds=True on POSIX, which was not the default in those versions. See PEP 0446 for many details. The second issue arises on Windows, where we're not allowed to set close_fds=True while also setting stdin/stdout/stderr. Descriptors from os.pipe() on Windows have never been inheritable, so it would seem that we're safe. However, the Windows implementation of subprocess.Popen() creates temporary inheritable copies of its descriptors, and these can leak. The workaround for this is to protect Popen() with a global lock. See https://bugs.python.org/issue25565.''' close_fds = (os.name != 'nt') with popen_lock: return subprocess.Popen(*args, close_fds=close_fds, **kwargs)
python
def safe_popen(*args, **kwargs): '''This wrapper works around two major deadlock issues to do with pipes. The first is that, before Python 3.2 on POSIX systems, os.pipe() creates inheritable file descriptors, which leak to all child processes and prevent reads from reaching EOF. The workaround for this is to set close_fds=True on POSIX, which was not the default in those versions. See PEP 0446 for many details. The second issue arises on Windows, where we're not allowed to set close_fds=True while also setting stdin/stdout/stderr. Descriptors from os.pipe() on Windows have never been inheritable, so it would seem that we're safe. However, the Windows implementation of subprocess.Popen() creates temporary inheritable copies of its descriptors, and these can leak. The workaround for this is to protect Popen() with a global lock. See https://bugs.python.org/issue25565.''' close_fds = (os.name != 'nt') with popen_lock: return subprocess.Popen(*args, close_fds=close_fds, **kwargs)
[ "def", "safe_popen", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "close_fds", "=", "(", "os", ".", "name", "!=", "'nt'", ")", "with", "popen_lock", ":", "return", "subprocess", ".", "Popen", "(", "*", "args", ",", "close_fds", "=", "close_fds", ",", "*", "*", "kwargs", ")" ]
This wrapper works around two major deadlock issues to do with pipes. The first is that, before Python 3.2 on POSIX systems, os.pipe() creates inheritable file descriptors, which leak to all child processes and prevent reads from reaching EOF. The workaround for this is to set close_fds=True on POSIX, which was not the default in those versions. See PEP 0446 for many details. The second issue arises on Windows, where we're not allowed to set close_fds=True while also setting stdin/stdout/stderr. Descriptors from os.pipe() on Windows have never been inheritable, so it would seem that we're safe. However, the Windows implementation of subprocess.Popen() creates temporary inheritable copies of its descriptors, and these can leak. The workaround for this is to protect Popen() with a global lock. See https://bugs.python.org/issue25565.
[ "This", "wrapper", "works", "around", "two", "major", "deadlock", "issues", "to", "do", "with", "pipes", ".", "The", "first", "is", "that", "before", "Python", "3", ".", "2", "on", "POSIX", "systems", "os", ".", "pipe", "()", "creates", "inheritable", "file", "descriptors", "which", "leak", "to", "all", "child", "processes", "and", "prevent", "reads", "from", "reaching", "EOF", ".", "The", "workaround", "for", "this", "is", "to", "set", "close_fds", "=", "True", "on", "POSIX", "which", "was", "not", "the", "default", "in", "those", "versions", ".", "See", "PEP", "0446", "for", "many", "details", "." ]
train
https://github.com/oconnor663/duct.py/blob/f10f1e9093a2913281294bb89a6e1744aa700e73/duct.py#L700-L718
oconnor663/duct.py
duct.py
Expression.run
def run(self): '''Execute the expression and return a Result, which includes the exit status and any captured output. Raise an exception if the status is non-zero.''' with spawn_output_reader() as (stdout_capture, stdout_thread): with spawn_output_reader() as (stderr_capture, stderr_thread): context = starter_iocontext(stdout_capture, stderr_capture) status = self._exec(context) stdout_bytes = stdout_thread.join() stderr_bytes = stderr_thread.join() result = Result(status.code, stdout_bytes, stderr_bytes) if is_checked_error(status): raise StatusError(result, self) return result
python
def run(self): '''Execute the expression and return a Result, which includes the exit status and any captured output. Raise an exception if the status is non-zero.''' with spawn_output_reader() as (stdout_capture, stdout_thread): with spawn_output_reader() as (stderr_capture, stderr_thread): context = starter_iocontext(stdout_capture, stderr_capture) status = self._exec(context) stdout_bytes = stdout_thread.join() stderr_bytes = stderr_thread.join() result = Result(status.code, stdout_bytes, stderr_bytes) if is_checked_error(status): raise StatusError(result, self) return result
[ "def", "run", "(", "self", ")", ":", "with", "spawn_output_reader", "(", ")", "as", "(", "stdout_capture", ",", "stdout_thread", ")", ":", "with", "spawn_output_reader", "(", ")", "as", "(", "stderr_capture", ",", "stderr_thread", ")", ":", "context", "=", "starter_iocontext", "(", "stdout_capture", ",", "stderr_capture", ")", "status", "=", "self", ".", "_exec", "(", "context", ")", "stdout_bytes", "=", "stdout_thread", ".", "join", "(", ")", "stderr_bytes", "=", "stderr_thread", ".", "join", "(", ")", "result", "=", "Result", "(", "status", ".", "code", ",", "stdout_bytes", ",", "stderr_bytes", ")", "if", "is_checked_error", "(", "status", ")", ":", "raise", "StatusError", "(", "result", ",", "self", ")", "return", "result" ]
Execute the expression and return a Result, which includes the exit status and any captured output. Raise an exception if the status is non-zero.
[ "Execute", "the", "expression", "and", "return", "a", "Result", "which", "includes", "the", "exit", "status", "and", "any", "captured", "output", ".", "Raise", "an", "exception", "if", "the", "status", "is", "non", "-", "zero", "." ]
train
https://github.com/oconnor663/duct.py/blob/f10f1e9093a2913281294bb89a6e1744aa700e73/duct.py#L26-L39
oconnor663/duct.py
duct.py
Expression.read
def read(self): '''Execute the expression and capture its output, similar to backticks or $() in the shell. This is a wrapper around run() which captures stdout, decodes it, trims it, and returns it directly.''' result = self.stdout_capture().run() stdout_str = decode_with_universal_newlines(result.stdout) return stdout_str.rstrip('\n')
python
def read(self): '''Execute the expression and capture its output, similar to backticks or $() in the shell. This is a wrapper around run() which captures stdout, decodes it, trims it, and returns it directly.''' result = self.stdout_capture().run() stdout_str = decode_with_universal_newlines(result.stdout) return stdout_str.rstrip('\n')
[ "def", "read", "(", "self", ")", ":", "result", "=", "self", ".", "stdout_capture", "(", ")", ".", "run", "(", ")", "stdout_str", "=", "decode_with_universal_newlines", "(", "result", ".", "stdout", ")", "return", "stdout_str", ".", "rstrip", "(", "'\\n'", ")" ]
Execute the expression and capture its output, similar to backticks or $() in the shell. This is a wrapper around run() which captures stdout, decodes it, trims it, and returns it directly.
[ "Execute", "the", "expression", "and", "capture", "its", "output", "similar", "to", "backticks", "or", "$", "()", "in", "the", "shell", ".", "This", "is", "a", "wrapper", "around", "run", "()", "which", "captures", "stdout", "decodes", "it", "trims", "it", "and", "returns", "it", "directly", "." ]
train
https://github.com/oconnor663/duct.py/blob/f10f1e9093a2913281294bb89a6e1744aa700e73/duct.py#L41-L47
oconnor663/duct.py
duct.py
Expression.start
def start(self): '''Equivalent to `run`, but instead of blocking the current thread, return a WaitHandle that doesn't block until `wait` is called. This is currently implemented with a simple background thread, though in theory it could avoid using threads in most cases.''' thread = ThreadWithReturn(self.run) thread.start() return WaitHandle(thread)
python
def start(self): '''Equivalent to `run`, but instead of blocking the current thread, return a WaitHandle that doesn't block until `wait` is called. This is currently implemented with a simple background thread, though in theory it could avoid using threads in most cases.''' thread = ThreadWithReturn(self.run) thread.start() return WaitHandle(thread)
[ "def", "start", "(", "self", ")", ":", "thread", "=", "ThreadWithReturn", "(", "self", ".", "run", ")", "thread", ".", "start", "(", ")", "return", "WaitHandle", "(", "thread", ")" ]
Equivalent to `run`, but instead of blocking the current thread, return a WaitHandle that doesn't block until `wait` is called. This is currently implemented with a simple background thread, though in theory it could avoid using threads in most cases.
[ "Equivalent", "to", "run", "but", "instead", "of", "blocking", "the", "current", "thread", "return", "a", "WaitHandle", "that", "doesn", "t", "block", "until", "wait", "is", "called", ".", "This", "is", "currently", "implemented", "with", "a", "simple", "background", "thread", "though", "in", "theory", "it", "could", "avoid", "using", "threads", "in", "most", "cases", "." ]
train
https://github.com/oconnor663/duct.py/blob/f10f1e9093a2913281294bb89a6e1744aa700e73/duct.py#L49-L56
keans/lmnotify
lmnotify/lmnotify.py
LaMetricManager._exec
def _exec(self, cmd, url, json_data=None): """ execute a command at the device using the RESTful API :param str cmd: one of the REST commands, e.g. GET or POST :param str url: URL of the REST API the command should be applied to :param dict json_data: json data that should be attached to the command """ assert(cmd in ("GET", "POST", "PUT", "DELETE")) assert(self.dev is not None) if json_data is None: json_data = {} # add device address to the URL url = url.format(self.dev["ipv4_internal"]) # set basic authentication auth = HTTPBasicAuth("dev", self.dev["api_key"]) # execute HTTP request res = None if cmd == "GET": res = self._local_session.session.get( url, auth=auth, verify=False ) elif cmd == "POST": res = self._local_session.session.post( url, auth=auth, json=json_data, verify=False ) elif cmd == "PUT": res = self._local_session.session.put( url, auth=auth, json=json_data, verify=False ) elif cmd == "DELETE": res = self._local_session.session.delete( url, auth=auth, verify=False ) if res is not None: # raise an exception on error res.raise_for_status() return res.json()
python
def _exec(self, cmd, url, json_data=None): """ execute a command at the device using the RESTful API :param str cmd: one of the REST commands, e.g. GET or POST :param str url: URL of the REST API the command should be applied to :param dict json_data: json data that should be attached to the command """ assert(cmd in ("GET", "POST", "PUT", "DELETE")) assert(self.dev is not None) if json_data is None: json_data = {} # add device address to the URL url = url.format(self.dev["ipv4_internal"]) # set basic authentication auth = HTTPBasicAuth("dev", self.dev["api_key"]) # execute HTTP request res = None if cmd == "GET": res = self._local_session.session.get( url, auth=auth, verify=False ) elif cmd == "POST": res = self._local_session.session.post( url, auth=auth, json=json_data, verify=False ) elif cmd == "PUT": res = self._local_session.session.put( url, auth=auth, json=json_data, verify=False ) elif cmd == "DELETE": res = self._local_session.session.delete( url, auth=auth, verify=False ) if res is not None: # raise an exception on error res.raise_for_status() return res.json()
[ "def", "_exec", "(", "self", ",", "cmd", ",", "url", ",", "json_data", "=", "None", ")", ":", "assert", "(", "cmd", "in", "(", "\"GET\"", ",", "\"POST\"", ",", "\"PUT\"", ",", "\"DELETE\"", ")", ")", "assert", "(", "self", ".", "dev", "is", "not", "None", ")", "if", "json_data", "is", "None", ":", "json_data", "=", "{", "}", "# add device address to the URL", "url", "=", "url", ".", "format", "(", "self", ".", "dev", "[", "\"ipv4_internal\"", "]", ")", "# set basic authentication", "auth", "=", "HTTPBasicAuth", "(", "\"dev\"", ",", "self", ".", "dev", "[", "\"api_key\"", "]", ")", "# execute HTTP request", "res", "=", "None", "if", "cmd", "==", "\"GET\"", ":", "res", "=", "self", ".", "_local_session", ".", "session", ".", "get", "(", "url", ",", "auth", "=", "auth", ",", "verify", "=", "False", ")", "elif", "cmd", "==", "\"POST\"", ":", "res", "=", "self", ".", "_local_session", ".", "session", ".", "post", "(", "url", ",", "auth", "=", "auth", ",", "json", "=", "json_data", ",", "verify", "=", "False", ")", "elif", "cmd", "==", "\"PUT\"", ":", "res", "=", "self", ".", "_local_session", ".", "session", ".", "put", "(", "url", ",", "auth", "=", "auth", ",", "json", "=", "json_data", ",", "verify", "=", "False", ")", "elif", "cmd", "==", "\"DELETE\"", ":", "res", "=", "self", ".", "_local_session", ".", "session", ".", "delete", "(", "url", ",", "auth", "=", "auth", ",", "verify", "=", "False", ")", "if", "res", "is", "not", "None", ":", "# raise an exception on error", "res", ".", "raise_for_status", "(", ")", "return", "res", ".", "json", "(", ")" ]
execute a command at the device using the RESTful API :param str cmd: one of the REST commands, e.g. GET or POST :param str url: URL of the REST API the command should be applied to :param dict json_data: json data that should be attached to the command
[ "execute", "a", "command", "at", "the", "device", "using", "the", "RESTful", "API" ]
train
https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L90-L136
keans/lmnotify
lmnotify/lmnotify.py
LaMetricManager.set_device
def set_device(self, dev): """ set the current device (that will be used for following API calls) :param dict dev: device that should be used for the API calls (can be obtained via get_devices function) """ log.debug("setting device to '{}'".format(dev)) self.dev = dev self.set_apps_list()
python
def set_device(self, dev): """ set the current device (that will be used for following API calls) :param dict dev: device that should be used for the API calls (can be obtained via get_devices function) """ log.debug("setting device to '{}'".format(dev)) self.dev = dev self.set_apps_list()
[ "def", "set_device", "(", "self", ",", "dev", ")", ":", "log", ".", "debug", "(", "\"setting device to '{}'\"", ".", "format", "(", "dev", ")", ")", "self", ".", "dev", "=", "dev", "self", ".", "set_apps_list", "(", ")" ]
set the current device (that will be used for following API calls) :param dict dev: device that should be used for the API calls (can be obtained via get_devices function)
[ "set", "the", "current", "device", "(", "that", "will", "be", "used", "for", "following", "API", "calls", ")" ]
train
https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L146-L155
keans/lmnotify
lmnotify/lmnotify.py
LaMetricManager._get_widget_id
def _get_widget_id(self, package_name): """ returns widget_id for given package_name does not care about multiple widget ids at the moment, just picks the first :param str package_name: package to check for :return: id of first widget which belongs to the given package_name :rtype: str """ widget_id = "" for app in self.get_apps_list(): if app.package == package_name: widget_id = list(app.widgets.keys())[0] return widget_id
python
def _get_widget_id(self, package_name): """ returns widget_id for given package_name does not care about multiple widget ids at the moment, just picks the first :param str package_name: package to check for :return: id of first widget which belongs to the given package_name :rtype: str """ widget_id = "" for app in self.get_apps_list(): if app.package == package_name: widget_id = list(app.widgets.keys())[0] return widget_id
[ "def", "_get_widget_id", "(", "self", ",", "package_name", ")", ":", "widget_id", "=", "\"\"", "for", "app", "in", "self", ".", "get_apps_list", "(", ")", ":", "if", "app", ".", "package", "==", "package_name", ":", "widget_id", "=", "list", "(", "app", ".", "widgets", ".", "keys", "(", ")", ")", "[", "0", "]", "return", "widget_id" ]
returns widget_id for given package_name does not care about multiple widget ids at the moment, just picks the first :param str package_name: package to check for :return: id of first widget which belongs to the given package_name :rtype: str
[ "returns", "widget_id", "for", "given", "package_name", "does", "not", "care", "about", "multiple", "widget", "ids", "at", "the", "moment", "just", "picks", "the", "first" ]
train
https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L157-L171
keans/lmnotify
lmnotify/lmnotify.py
LaMetricManager.get_user
def get_user(self): """ get the user details via the cloud """ log.debug("getting user information from LaMetric cloud...") _, url = CLOUD_URLS["get_user"] res = self._cloud_session.session.get(url) if res is not None: # raise an exception on error res.raise_for_status() return res.json()
python
def get_user(self): """ get the user details via the cloud """ log.debug("getting user information from LaMetric cloud...") _, url = CLOUD_URLS["get_user"] res = self._cloud_session.session.get(url) if res is not None: # raise an exception on error res.raise_for_status() return res.json()
[ "def", "get_user", "(", "self", ")", ":", "log", ".", "debug", "(", "\"getting user information from LaMetric cloud...\"", ")", "_", ",", "url", "=", "CLOUD_URLS", "[", "\"get_user\"", "]", "res", "=", "self", ".", "_cloud_session", ".", "session", ".", "get", "(", "url", ")", "if", "res", "is", "not", "None", ":", "# raise an exception on error", "res", ".", "raise_for_status", "(", ")", "return", "res", ".", "json", "(", ")" ]
get the user details via the cloud
[ "get", "the", "user", "details", "via", "the", "cloud" ]
train
https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L174-L185
keans/lmnotify
lmnotify/lmnotify.py
LaMetricManager.get_devices
def get_devices(self, force_reload=False, save_devices=True): """ get all devices that are linked to the user, if the local device file is not existing the devices will be obtained from the LaMetric cloud, otherwise the local device file will be read. :param bool force_reload: When True, devices are read again from cloud :param bool save_devices: When True, devices obtained from the LaMetric cloud are stored locally """ if ( (not os.path.exists(self._devices_filename)) or (force_reload is True) ): # -- load devices from LaMetric cloud -- log.debug("getting devices from LaMetric cloud...") _, url = CLOUD_URLS["get_devices"] res = self._cloud_session.session.get(url) if res is not None: # raise an exception on error res.raise_for_status() # store obtained devices internally self._devices = res.json() if save_devices is True: # save obtained devices to the local file self.save_devices() return self._devices else: # -- load devices from local file -- log.debug( "getting devices from '{}'...".format(self._devices_filename) ) return self.load_devices()
python
def get_devices(self, force_reload=False, save_devices=True): """ get all devices that are linked to the user, if the local device file is not existing the devices will be obtained from the LaMetric cloud, otherwise the local device file will be read. :param bool force_reload: When True, devices are read again from cloud :param bool save_devices: When True, devices obtained from the LaMetric cloud are stored locally """ if ( (not os.path.exists(self._devices_filename)) or (force_reload is True) ): # -- load devices from LaMetric cloud -- log.debug("getting devices from LaMetric cloud...") _, url = CLOUD_URLS["get_devices"] res = self._cloud_session.session.get(url) if res is not None: # raise an exception on error res.raise_for_status() # store obtained devices internally self._devices = res.json() if save_devices is True: # save obtained devices to the local file self.save_devices() return self._devices else: # -- load devices from local file -- log.debug( "getting devices from '{}'...".format(self._devices_filename) ) return self.load_devices()
[ "def", "get_devices", "(", "self", ",", "force_reload", "=", "False", ",", "save_devices", "=", "True", ")", ":", "if", "(", "(", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "_devices_filename", ")", ")", "or", "(", "force_reload", "is", "True", ")", ")", ":", "# -- load devices from LaMetric cloud --", "log", ".", "debug", "(", "\"getting devices from LaMetric cloud...\"", ")", "_", ",", "url", "=", "CLOUD_URLS", "[", "\"get_devices\"", "]", "res", "=", "self", ".", "_cloud_session", ".", "session", ".", "get", "(", "url", ")", "if", "res", "is", "not", "None", ":", "# raise an exception on error", "res", ".", "raise_for_status", "(", ")", "# store obtained devices internally", "self", ".", "_devices", "=", "res", ".", "json", "(", ")", "if", "save_devices", "is", "True", ":", "# save obtained devices to the local file", "self", ".", "save_devices", "(", ")", "return", "self", ".", "_devices", "else", ":", "# -- load devices from local file --", "log", ".", "debug", "(", "\"getting devices from '{}'...\"", ".", "format", "(", "self", ".", "_devices_filename", ")", ")", "return", "self", ".", "load_devices", "(", ")" ]
get all devices that are linked to the user, if the local device file is not existing the devices will be obtained from the LaMetric cloud, otherwise the local device file will be read. :param bool force_reload: When True, devices are read again from cloud :param bool save_devices: When True, devices obtained from the LaMetric cloud are stored locally
[ "get", "all", "devices", "that", "are", "linked", "to", "the", "user", "if", "the", "local", "device", "file", "is", "not", "existing", "the", "devices", "will", "be", "obtained", "from", "the", "LaMetric", "cloud", "otherwise", "the", "local", "device", "file", "will", "be", "read", "." ]
train
https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L187-L222
keans/lmnotify
lmnotify/lmnotify.py
LaMetricManager.save_devices
def save_devices(self): """ save devices that have been obtained from LaMetric cloud to a local file """ log.debug("saving devices to ''...".format(self._devices_filename)) if self._devices != []: with codecs.open(self._devices_filename, "wb", "utf-8") as f: json.dump(self._devices, f)
python
def save_devices(self): """ save devices that have been obtained from LaMetric cloud to a local file """ log.debug("saving devices to ''...".format(self._devices_filename)) if self._devices != []: with codecs.open(self._devices_filename, "wb", "utf-8") as f: json.dump(self._devices, f)
[ "def", "save_devices", "(", "self", ")", ":", "log", ".", "debug", "(", "\"saving devices to ''...\"", ".", "format", "(", "self", ".", "_devices_filename", ")", ")", "if", "self", ".", "_devices", "!=", "[", "]", ":", "with", "codecs", ".", "open", "(", "self", ".", "_devices_filename", ",", "\"wb\"", ",", "\"utf-8\"", ")", "as", "f", ":", "json", ".", "dump", "(", "self", ".", "_devices", ",", "f", ")" ]
save devices that have been obtained from LaMetric cloud to a local file
[ "save", "devices", "that", "have", "been", "obtained", "from", "LaMetric", "cloud", "to", "a", "local", "file" ]
train
https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L224-L232
keans/lmnotify
lmnotify/lmnotify.py
LaMetricManager.get_endpoint_map
def get_endpoint_map(self): """ returns API version and endpoint map """ log.debug("getting end points...") cmd, url = DEVICE_URLS["get_endpoint_map"] return self._exec(cmd, url)
python
def get_endpoint_map(self): """ returns API version and endpoint map """ log.debug("getting end points...") cmd, url = DEVICE_URLS["get_endpoint_map"] return self._exec(cmd, url)
[ "def", "get_endpoint_map", "(", "self", ")", ":", "log", ".", "debug", "(", "\"getting end points...\"", ")", "cmd", ",", "url", "=", "DEVICE_URLS", "[", "\"get_endpoint_map\"", "]", "return", "self", ".", "_exec", "(", "cmd", ",", "url", ")" ]
returns API version and endpoint map
[ "returns", "API", "version", "and", "endpoint", "map" ]
train
https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L235-L241
keans/lmnotify
lmnotify/lmnotify.py
LaMetricManager.load_devices
def load_devices(self): """ load stored devices from the local file """ self._devices = [] if os.path.exists(self._devices_filename): log.debug( "loading devices from '{}'...".format(self._devices_filename) ) with codecs.open(self._devices_filename, "rb", "utf-8") as f: self._devices = json.load(f) return self._devices
python
def load_devices(self): """ load stored devices from the local file """ self._devices = [] if os.path.exists(self._devices_filename): log.debug( "loading devices from '{}'...".format(self._devices_filename) ) with codecs.open(self._devices_filename, "rb", "utf-8") as f: self._devices = json.load(f) return self._devices
[ "def", "load_devices", "(", "self", ")", ":", "self", ".", "_devices", "=", "[", "]", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "_devices_filename", ")", ":", "log", ".", "debug", "(", "\"loading devices from '{}'...\"", ".", "format", "(", "self", ".", "_devices_filename", ")", ")", "with", "codecs", ".", "open", "(", "self", ".", "_devices_filename", ",", "\"rb\"", ",", "\"utf-8\"", ")", "as", "f", ":", "self", ".", "_devices", "=", "json", ".", "load", "(", "f", ")", "return", "self", ".", "_devices" ]
load stored devices from the local file
[ "load", "stored", "devices", "from", "the", "local", "file" ]
train
https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L252-L264
keans/lmnotify
lmnotify/lmnotify.py
LaMetricManager.get_device_state
def get_device_state(self): """ returns the full device state """ log.debug("getting device state...") cmd, url = DEVICE_URLS["get_device_state"] return self._exec(cmd, url)
python
def get_device_state(self): """ returns the full device state """ log.debug("getting device state...") cmd, url = DEVICE_URLS["get_device_state"] return self._exec(cmd, url)
[ "def", "get_device_state", "(", "self", ")", ":", "log", ".", "debug", "(", "\"getting device state...\"", ")", "cmd", ",", "url", "=", "DEVICE_URLS", "[", "\"get_device_state\"", "]", "return", "self", ".", "_exec", "(", "cmd", ",", "url", ")" ]
returns the full device state
[ "returns", "the", "full", "device", "state" ]
train
https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L266-L272
keans/lmnotify
lmnotify/lmnotify.py
LaMetricManager.send_notification
def send_notification( self, model, priority="warning", icon_type=None, lifetime=None ): """ sends new notification to the device :param Model model: an instance of the Model class that should be used :param str priority: the priority of the notification [info, warning or critical] (default: warning) :param str icon_type: the icon type of the notification [none, info or alert] (default: None) :param int lifetime: the lifetime of the notification in ms (default: 2 min) """ assert(priority in ("info", "warning", "critical")) assert(icon_type in (None, "none", "info", "alert")) assert((lifetime is None) or (lifetime > 0)) log.debug("sending notification...") cmd, url = DEVICE_URLS["send_notification"] json_data = {"model": model.json(), "priority": priority} if icon_type is not None: json_data["icon_type"] = icon_type if lifetime is not None: json_data["lifetime"] = lifetime return self._exec(cmd, url, json_data=json_data)
python
def send_notification( self, model, priority="warning", icon_type=None, lifetime=None ): """ sends new notification to the device :param Model model: an instance of the Model class that should be used :param str priority: the priority of the notification [info, warning or critical] (default: warning) :param str icon_type: the icon type of the notification [none, info or alert] (default: None) :param int lifetime: the lifetime of the notification in ms (default: 2 min) """ assert(priority in ("info", "warning", "critical")) assert(icon_type in (None, "none", "info", "alert")) assert((lifetime is None) or (lifetime > 0)) log.debug("sending notification...") cmd, url = DEVICE_URLS["send_notification"] json_data = {"model": model.json(), "priority": priority} if icon_type is not None: json_data["icon_type"] = icon_type if lifetime is not None: json_data["lifetime"] = lifetime return self._exec(cmd, url, json_data=json_data)
[ "def", "send_notification", "(", "self", ",", "model", ",", "priority", "=", "\"warning\"", ",", "icon_type", "=", "None", ",", "lifetime", "=", "None", ")", ":", "assert", "(", "priority", "in", "(", "\"info\"", ",", "\"warning\"", ",", "\"critical\"", ")", ")", "assert", "(", "icon_type", "in", "(", "None", ",", "\"none\"", ",", "\"info\"", ",", "\"alert\"", ")", ")", "assert", "(", "(", "lifetime", "is", "None", ")", "or", "(", "lifetime", ">", "0", ")", ")", "log", ".", "debug", "(", "\"sending notification...\"", ")", "cmd", ",", "url", "=", "DEVICE_URLS", "[", "\"send_notification\"", "]", "json_data", "=", "{", "\"model\"", ":", "model", ".", "json", "(", ")", ",", "\"priority\"", ":", "priority", "}", "if", "icon_type", "is", "not", "None", ":", "json_data", "[", "\"icon_type\"", "]", "=", "icon_type", "if", "lifetime", "is", "not", "None", ":", "json_data", "[", "\"lifetime\"", "]", "=", "lifetime", "return", "self", ".", "_exec", "(", "cmd", ",", "url", ",", "json_data", "=", "json_data", ")" ]
sends new notification to the device :param Model model: an instance of the Model class that should be used :param str priority: the priority of the notification [info, warning or critical] (default: warning) :param str icon_type: the icon type of the notification [none, info or alert] (default: None) :param int lifetime: the lifetime of the notification in ms (default: 2 min)
[ "sends", "new", "notification", "to", "the", "device" ]
train
https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L274-L303
keans/lmnotify
lmnotify/lmnotify.py
LaMetricManager.get_notifications
def get_notifications(self): """ returns the list of all notifications in queue """ log.debug("getting notifications in queue...") cmd, url = DEVICE_URLS["get_notifications_queue"] return self._exec(cmd, url)
python
def get_notifications(self): """ returns the list of all notifications in queue """ log.debug("getting notifications in queue...") cmd, url = DEVICE_URLS["get_notifications_queue"] return self._exec(cmd, url)
[ "def", "get_notifications", "(", "self", ")", ":", "log", ".", "debug", "(", "\"getting notifications in queue...\"", ")", "cmd", ",", "url", "=", "DEVICE_URLS", "[", "\"get_notifications_queue\"", "]", "return", "self", ".", "_exec", "(", "cmd", ",", "url", ")" ]
returns the list of all notifications in queue
[ "returns", "the", "list", "of", "all", "notifications", "in", "queue" ]
train
https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L305-L311
keans/lmnotify
lmnotify/lmnotify.py
LaMetricManager.get_current_notification
def get_current_notification(self): """ returns the current notification (i.e. the one that is visible) """ log.debug("getting visible notification...") cmd, url = DEVICE_URLS["get_current_notification"] return self._exec(cmd, url)
python
def get_current_notification(self): """ returns the current notification (i.e. the one that is visible) """ log.debug("getting visible notification...") cmd, url = DEVICE_URLS["get_current_notification"] return self._exec(cmd, url)
[ "def", "get_current_notification", "(", "self", ")", ":", "log", ".", "debug", "(", "\"getting visible notification...\"", ")", "cmd", ",", "url", "=", "DEVICE_URLS", "[", "\"get_current_notification\"", "]", "return", "self", ".", "_exec", "(", "cmd", ",", "url", ")" ]
returns the current notification (i.e. the one that is visible)
[ "returns", "the", "current", "notification", "(", "i", ".", "e", ".", "the", "one", "that", "is", "visible", ")" ]
train
https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L313-L319
keans/lmnotify
lmnotify/lmnotify.py
LaMetricManager.get_notification
def get_notification(self, notification_id): """ returns a specific notification by given id :param str notification_id: the ID of the notification """ log.debug("getting notification '{}'...".format(notification_id)) cmd, url = DEVICE_URLS["get_notification"] return self._exec(cmd, url.replace(":id", notification_id))
python
def get_notification(self, notification_id): """ returns a specific notification by given id :param str notification_id: the ID of the notification """ log.debug("getting notification '{}'...".format(notification_id)) cmd, url = DEVICE_URLS["get_notification"] return self._exec(cmd, url.replace(":id", notification_id))
[ "def", "get_notification", "(", "self", ",", "notification_id", ")", ":", "log", ".", "debug", "(", "\"getting notification '{}'...\"", ".", "format", "(", "notification_id", ")", ")", "cmd", ",", "url", "=", "DEVICE_URLS", "[", "\"get_notification\"", "]", "return", "self", ".", "_exec", "(", "cmd", ",", "url", ".", "replace", "(", "\":id\"", ",", "notification_id", ")", ")" ]
returns a specific notification by given id :param str notification_id: the ID of the notification
[ "returns", "a", "specific", "notification", "by", "given", "id" ]
train
https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L321-L329
keans/lmnotify
lmnotify/lmnotify.py
LaMetricManager.get_display
def get_display(self): """ returns information about the display, including brightness, screensaver etc. """ log.debug("getting display information...") cmd, url = DEVICE_URLS["get_display"] return self._exec(cmd, url)
python
def get_display(self): """ returns information about the display, including brightness, screensaver etc. """ log.debug("getting display information...") cmd, url = DEVICE_URLS["get_display"] return self._exec(cmd, url)
[ "def", "get_display", "(", "self", ")", ":", "log", ".", "debug", "(", "\"getting display information...\"", ")", "cmd", ",", "url", "=", "DEVICE_URLS", "[", "\"get_display\"", "]", "return", "self", ".", "_exec", "(", "cmd", ",", "url", ")" ]
returns information about the display, including brightness, screensaver etc.
[ "returns", "information", "about", "the", "display", "including", "brightness", "screensaver", "etc", "." ]
train
https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L341-L348