repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
sfischer13/python-prompt
prompt/__init__.py
secret
def secret(prompt=None, empty=False): """Prompt a string without echoing. Parameters ---------- prompt : str, optional Use an alternative prompt. empty : bool, optional Allow an empty response. Returns ------- str or None A str if the user entered a non-empty string. None if the user pressed only Enter and ``empty`` was True. Raises ------ getpass.GetPassWarning If echo free input is unavailable. See Also -------- getpass.getpass """ if prompt is None: prompt = PROMPT s = getpass.getpass(prompt=prompt) if empty and not s: return None else: if s: return s else: return secret(prompt=prompt, empty=empty)
python
def secret(prompt=None, empty=False): """Prompt a string without echoing. Parameters ---------- prompt : str, optional Use an alternative prompt. empty : bool, optional Allow an empty response. Returns ------- str or None A str if the user entered a non-empty string. None if the user pressed only Enter and ``empty`` was True. Raises ------ getpass.GetPassWarning If echo free input is unavailable. See Also -------- getpass.getpass """ if prompt is None: prompt = PROMPT s = getpass.getpass(prompt=prompt) if empty and not s: return None else: if s: return s else: return secret(prompt=prompt, empty=empty)
[ "def", "secret", "(", "prompt", "=", "None", ",", "empty", "=", "False", ")", ":", "if", "prompt", "is", "None", ":", "prompt", "=", "PROMPT", "s", "=", "getpass", ".", "getpass", "(", "prompt", "=", "prompt", ")", "if", "empty", "and", "not", "s", ":", "return", "None", "else", ":", "if", "s", ":", "return", "s", "else", ":", "return", "secret", "(", "prompt", "=", "prompt", ",", "empty", "=", "empty", ")" ]
Prompt a string without echoing. Parameters ---------- prompt : str, optional Use an alternative prompt. empty : bool, optional Allow an empty response. Returns ------- str or None A str if the user entered a non-empty string. None if the user pressed only Enter and ``empty`` was True. Raises ------ getpass.GetPassWarning If echo free input is unavailable. See Also -------- getpass.getpass
[ "Prompt", "a", "string", "without", "echoing", "." ]
train
https://github.com/sfischer13/python-prompt/blob/d2acf5db64a9e45247c7abf1d67c2eb7db87bb48/prompt/__init__.py#L198-L233
sfischer13/python-prompt
prompt/__init__.py
string
def string(prompt=None, empty=False): """Prompt a string. Parameters ---------- prompt : str, optional Use an alternative prompt. empty : bool, optional Allow an empty response. Returns ------- str or None A str if the user entered a non-empty string. None if the user pressed only Enter and ``empty`` was True. """ s = _prompt_input(prompt) if empty and not s: return None else: if s: return s else: return string(prompt=prompt, empty=empty)
python
def string(prompt=None, empty=False): """Prompt a string. Parameters ---------- prompt : str, optional Use an alternative prompt. empty : bool, optional Allow an empty response. Returns ------- str or None A str if the user entered a non-empty string. None if the user pressed only Enter and ``empty`` was True. """ s = _prompt_input(prompt) if empty and not s: return None else: if s: return s else: return string(prompt=prompt, empty=empty)
[ "def", "string", "(", "prompt", "=", "None", ",", "empty", "=", "False", ")", ":", "s", "=", "_prompt_input", "(", "prompt", ")", "if", "empty", "and", "not", "s", ":", "return", "None", "else", ":", "if", "s", ":", "return", "s", "else", ":", "return", "string", "(", "prompt", "=", "prompt", ",", "empty", "=", "empty", ")" ]
Prompt a string. Parameters ---------- prompt : str, optional Use an alternative prompt. empty : bool, optional Allow an empty response. Returns ------- str or None A str if the user entered a non-empty string. None if the user pressed only Enter and ``empty`` was True.
[ "Prompt", "a", "string", "." ]
train
https://github.com/sfischer13/python-prompt/blob/d2acf5db64a9e45247c7abf1d67c2eb7db87bb48/prompt/__init__.py#L236-L260
iurisilvio/Flask-SQLAlchemy-Cache
flask_sqlalchemy_cache/core.py
CachingQuery._get_cache_plus_key
def _get_cache_plus_key(self): """Return a cache region plus key.""" key = getattr(self, '_cache_key', self.key_from_query()) return self._cache.cache, key
python
def _get_cache_plus_key(self): """Return a cache region plus key.""" key = getattr(self, '_cache_key', self.key_from_query()) return self._cache.cache, key
[ "def", "_get_cache_plus_key", "(", "self", ")", ":", "key", "=", "getattr", "(", "self", ",", "'_cache_key'", ",", "self", ".", "key_from_query", "(", ")", ")", "return", "self", ".", "_cache", ".", "cache", ",", "key" ]
Return a cache region plus key.
[ "Return", "a", "cache", "region", "plus", "key", "." ]
train
https://github.com/iurisilvio/Flask-SQLAlchemy-Cache/blob/d29023c8fc09fd5a6a0ae24d18eee2de88215ab0/flask_sqlalchemy_cache/core.py#L52-L55
iurisilvio/Flask-SQLAlchemy-Cache
flask_sqlalchemy_cache/core.py
CachingQuery.get_value
def get_value(self, merge=True, createfunc=None, expiration_time=None, ignore_expiration=False): """ Return the value from the cache for this query. """ cache, cache_key = self._get_cache_plus_key() # ignore_expiration means, if the value is in the cache # but is expired, return it anyway. This doesn't make sense # with createfunc, which says, if the value is expired, generate # a new value. assert not ignore_expiration or not createfunc, \ "Can't ignore expiration and also provide createfunc" if ignore_expiration or not createfunc: cached_value = cache.get(cache_key, expiration_time=expiration_time, ignore_expiration=ignore_expiration) else: cached_value = cache.get(cache_key) if not cached_value: cached_value = createfunc() cache.set(cache_key, cached_value, timeout=expiration_time) if cached_value and merge: cached_value = self.merge_result(cached_value, load=False) return cached_value
python
def get_value(self, merge=True, createfunc=None, expiration_time=None, ignore_expiration=False): """ Return the value from the cache for this query. """ cache, cache_key = self._get_cache_plus_key() # ignore_expiration means, if the value is in the cache # but is expired, return it anyway. This doesn't make sense # with createfunc, which says, if the value is expired, generate # a new value. assert not ignore_expiration or not createfunc, \ "Can't ignore expiration and also provide createfunc" if ignore_expiration or not createfunc: cached_value = cache.get(cache_key, expiration_time=expiration_time, ignore_expiration=ignore_expiration) else: cached_value = cache.get(cache_key) if not cached_value: cached_value = createfunc() cache.set(cache_key, cached_value, timeout=expiration_time) if cached_value and merge: cached_value = self.merge_result(cached_value, load=False) return cached_value
[ "def", "get_value", "(", "self", ",", "merge", "=", "True", ",", "createfunc", "=", "None", ",", "expiration_time", "=", "None", ",", "ignore_expiration", "=", "False", ")", ":", "cache", ",", "cache_key", "=", "self", ".", "_get_cache_plus_key", "(", ")", "# ignore_expiration means, if the value is in the cache", "# but is expired, return it anyway. This doesn't make sense", "# with createfunc, which says, if the value is expired, generate", "# a new value.", "assert", "not", "ignore_expiration", "or", "not", "createfunc", ",", "\"Can't ignore expiration and also provide createfunc\"", "if", "ignore_expiration", "or", "not", "createfunc", ":", "cached_value", "=", "cache", ".", "get", "(", "cache_key", ",", "expiration_time", "=", "expiration_time", ",", "ignore_expiration", "=", "ignore_expiration", ")", "else", ":", "cached_value", "=", "cache", ".", "get", "(", "cache_key", ")", "if", "not", "cached_value", ":", "cached_value", "=", "createfunc", "(", ")", "cache", ".", "set", "(", "cache_key", ",", "cached_value", ",", "timeout", "=", "expiration_time", ")", "if", "cached_value", "and", "merge", ":", "cached_value", "=", "self", ".", "merge_result", "(", "cached_value", ",", "load", "=", "False", ")", "return", "cached_value" ]
Return the value from the cache for this query.
[ "Return", "the", "value", "from", "the", "cache", "for", "this", "query", "." ]
train
https://github.com/iurisilvio/Flask-SQLAlchemy-Cache/blob/d29023c8fc09fd5a6a0ae24d18eee2de88215ab0/flask_sqlalchemy_cache/core.py#L62-L89
iurisilvio/Flask-SQLAlchemy-Cache
flask_sqlalchemy_cache/core.py
CachingQuery.set_value
def set_value(self, value): """Set the value in the cache for this query.""" cache, cache_key = self._get_cache_plus_key() cache.set(cache_key, value)
python
def set_value(self, value): """Set the value in the cache for this query.""" cache, cache_key = self._get_cache_plus_key() cache.set(cache_key, value)
[ "def", "set_value", "(", "self", ",", "value", ")", ":", "cache", ",", "cache_key", "=", "self", ".", "_get_cache_plus_key", "(", ")", "cache", ".", "set", "(", "cache_key", ",", "value", ")" ]
Set the value in the cache for this query.
[ "Set", "the", "value", "in", "the", "cache", "for", "this", "query", "." ]
train
https://github.com/iurisilvio/Flask-SQLAlchemy-Cache/blob/d29023c8fc09fd5a6a0ae24d18eee2de88215ab0/flask_sqlalchemy_cache/core.py#L91-L94
iurisilvio/Flask-SQLAlchemy-Cache
flask_sqlalchemy_cache/core.py
CachingQuery.key_from_query
def key_from_query(self, qualifier=None): """ Given a Query, create a cache key. There are many approaches to this; here we use the simplest, which is to create an md5 hash of the text of the SQL statement, combined with stringified versions of all the bound parameters within it. There's a bit of a performance hit with compiling out "query.statement" here; other approaches include setting up an explicit cache key with a particular Query, then combining that with the bound parameter values. """ stmt = self.with_labels().statement compiled = stmt.compile() params = compiled.params values = [str(compiled)] for k in sorted(params): values.append(repr(params[k])) key = u" ".join(values) return md5(key.encode('utf8')).hexdigest()
python
def key_from_query(self, qualifier=None): """ Given a Query, create a cache key. There are many approaches to this; here we use the simplest, which is to create an md5 hash of the text of the SQL statement, combined with stringified versions of all the bound parameters within it. There's a bit of a performance hit with compiling out "query.statement" here; other approaches include setting up an explicit cache key with a particular Query, then combining that with the bound parameter values. """ stmt = self.with_labels().statement compiled = stmt.compile() params = compiled.params values = [str(compiled)] for k in sorted(params): values.append(repr(params[k])) key = u" ".join(values) return md5(key.encode('utf8')).hexdigest()
[ "def", "key_from_query", "(", "self", ",", "qualifier", "=", "None", ")", ":", "stmt", "=", "self", ".", "with_labels", "(", ")", ".", "statement", "compiled", "=", "stmt", ".", "compile", "(", ")", "params", "=", "compiled", ".", "params", "values", "=", "[", "str", "(", "compiled", ")", "]", "for", "k", "in", "sorted", "(", "params", ")", ":", "values", ".", "append", "(", "repr", "(", "params", "[", "k", "]", ")", ")", "key", "=", "u\" \"", ".", "join", "(", "values", ")", "return", "md5", "(", "key", ".", "encode", "(", "'utf8'", ")", ")", ".", "hexdigest", "(", ")" ]
Given a Query, create a cache key. There are many approaches to this; here we use the simplest, which is to create an md5 hash of the text of the SQL statement, combined with stringified versions of all the bound parameters within it. There's a bit of a performance hit with compiling out "query.statement" here; other approaches include setting up an explicit cache key with a particular Query, then combining that with the bound parameter values.
[ "Given", "a", "Query", "create", "a", "cache", "key", "." ]
train
https://github.com/iurisilvio/Flask-SQLAlchemy-Cache/blob/d29023c8fc09fd5a6a0ae24d18eee2de88215ab0/flask_sqlalchemy_cache/core.py#L96-L116
iurisilvio/Flask-SQLAlchemy-Cache
flask_sqlalchemy_cache/core.py
RelationshipCache.process_query_conditionally
def process_query_conditionally(self, query): """ Process a Query that is used within a lazy loader. (the process_query_conditionally() method is a SQLAlchemy hook invoked only within lazyload.) """ if query._current_path: mapper, prop = query._current_path[-2:] for cls in mapper.class_.__mro__: k = (cls, prop.key) relationship_option = self._relationship_options.get(k) if relationship_option: query._cache = relationship_option break
python
def process_query_conditionally(self, query): """ Process a Query that is used within a lazy loader. (the process_query_conditionally() method is a SQLAlchemy hook invoked only within lazyload.) """ if query._current_path: mapper, prop = query._current_path[-2:] for cls in mapper.class_.__mro__: k = (cls, prop.key) relationship_option = self._relationship_options.get(k) if relationship_option: query._cache = relationship_option break
[ "def", "process_query_conditionally", "(", "self", ",", "query", ")", ":", "if", "query", ".", "_current_path", ":", "mapper", ",", "prop", "=", "query", ".", "_current_path", "[", "-", "2", ":", "]", "for", "cls", "in", "mapper", ".", "class_", ".", "__mro__", ":", "k", "=", "(", "cls", ",", "prop", ".", "key", ")", "relationship_option", "=", "self", ".", "_relationship_options", ".", "get", "(", "k", ")", "if", "relationship_option", ":", "query", ".", "_cache", "=", "relationship_option", "break" ]
Process a Query that is used within a lazy loader. (the process_query_conditionally() method is a SQLAlchemy hook invoked only within lazyload.)
[ "Process", "a", "Query", "that", "is", "used", "within", "a", "lazy", "loader", "." ]
train
https://github.com/iurisilvio/Flask-SQLAlchemy-Cache/blob/d29023c8fc09fd5a6a0ae24d18eee2de88215ab0/flask_sqlalchemy_cache/core.py#L179-L193
jakevdp/supersmoother
supersmoother/smoother.py
Smoother.fit
def fit(self, t, y, dy=1, presorted=False): """Fit the smoother Parameters ---------- t : array_like time locations of the points to smooth y : array_like y locations of the points to smooth dy : array_like or float (default = 1) Errors in the y values presorted : bool (default = False) If True, then t is assumed to be sorted. Returns ------- self : Smoother instance """ self.t, self.y, self.dy = self._validate_inputs(t, y, dy, presorted) self._fit(self.t, self.y, self.dy) return self
python
def fit(self, t, y, dy=1, presorted=False): """Fit the smoother Parameters ---------- t : array_like time locations of the points to smooth y : array_like y locations of the points to smooth dy : array_like or float (default = 1) Errors in the y values presorted : bool (default = False) If True, then t is assumed to be sorted. Returns ------- self : Smoother instance """ self.t, self.y, self.dy = self._validate_inputs(t, y, dy, presorted) self._fit(self.t, self.y, self.dy) return self
[ "def", "fit", "(", "self", ",", "t", ",", "y", ",", "dy", "=", "1", ",", "presorted", "=", "False", ")", ":", "self", ".", "t", ",", "self", ".", "y", ",", "self", ".", "dy", "=", "self", ".", "_validate_inputs", "(", "t", ",", "y", ",", "dy", ",", "presorted", ")", "self", ".", "_fit", "(", "self", ".", "t", ",", "self", ".", "y", ",", "self", ".", "dy", ")", "return", "self" ]
Fit the smoother Parameters ---------- t : array_like time locations of the points to smooth y : array_like y locations of the points to smooth dy : array_like or float (default = 1) Errors in the y values presorted : bool (default = False) If True, then t is assumed to be sorted. Returns ------- self : Smoother instance
[ "Fit", "the", "smoother" ]
train
https://github.com/jakevdp/supersmoother/blob/0c96cf13dcd6f9006d3c0421f9cd6e18abe27a2f/supersmoother/smoother.py#L13-L33
jakevdp/supersmoother
supersmoother/smoother.py
Smoother.predict
def predict(self, t): """Predict the smoothed function value at time t Parameters ---------- t : array_like Times at which to predict the result Returns ------- y : ndarray Smoothed values at time t """ t = np.asarray(t) return self._predict(np.ravel(t)).reshape(t.shape)
python
def predict(self, t): """Predict the smoothed function value at time t Parameters ---------- t : array_like Times at which to predict the result Returns ------- y : ndarray Smoothed values at time t """ t = np.asarray(t) return self._predict(np.ravel(t)).reshape(t.shape)
[ "def", "predict", "(", "self", ",", "t", ")", ":", "t", "=", "np", ".", "asarray", "(", "t", ")", "return", "self", ".", "_predict", "(", "np", ".", "ravel", "(", "t", ")", ")", ".", "reshape", "(", "t", ".", "shape", ")" ]
Predict the smoothed function value at time t Parameters ---------- t : array_like Times at which to predict the result Returns ------- y : ndarray Smoothed values at time t
[ "Predict", "the", "smoothed", "function", "value", "at", "time", "t" ]
train
https://github.com/jakevdp/supersmoother/blob/0c96cf13dcd6f9006d3c0421f9cd6e18abe27a2f/supersmoother/smoother.py#L35-L49
jakevdp/supersmoother
supersmoother/smoother.py
Smoother.cv_residuals
def cv_residuals(self, cv=True): """Return the residuals of the cross-validation for the fit data""" vals = self.cv_values(cv) return (self.y - vals) / self.dy
python
def cv_residuals(self, cv=True): """Return the residuals of the cross-validation for the fit data""" vals = self.cv_values(cv) return (self.y - vals) / self.dy
[ "def", "cv_residuals", "(", "self", ",", "cv", "=", "True", ")", ":", "vals", "=", "self", ".", "cv_values", "(", "cv", ")", "return", "(", "self", ".", "y", "-", "vals", ")", "/", "self", ".", "dy" ]
Return the residuals of the cross-validation for the fit data
[ "Return", "the", "residuals", "of", "the", "cross", "-", "validation", "for", "the", "fit", "data" ]
train
https://github.com/jakevdp/supersmoother/blob/0c96cf13dcd6f9006d3c0421f9cd6e18abe27a2f/supersmoother/smoother.py#L55-L58
jakevdp/supersmoother
supersmoother/smoother.py
Smoother.cv_error
def cv_error(self, cv=True, skip_endpoints=True): """Return the sum of cross-validation residuals for the input data""" resids = self.cv_residuals(cv) if skip_endpoints: resids = resids[1:-1] return np.mean(abs(resids))
python
def cv_error(self, cv=True, skip_endpoints=True): """Return the sum of cross-validation residuals for the input data""" resids = self.cv_residuals(cv) if skip_endpoints: resids = resids[1:-1] return np.mean(abs(resids))
[ "def", "cv_error", "(", "self", ",", "cv", "=", "True", ",", "skip_endpoints", "=", "True", ")", ":", "resids", "=", "self", ".", "cv_residuals", "(", "cv", ")", "if", "skip_endpoints", ":", "resids", "=", "resids", "[", "1", ":", "-", "1", "]", "return", "np", ".", "mean", "(", "abs", "(", "resids", ")", ")" ]
Return the sum of cross-validation residuals for the input data
[ "Return", "the", "sum", "of", "cross", "-", "validation", "residuals", "for", "the", "input", "data" ]
train
https://github.com/jakevdp/supersmoother/blob/0c96cf13dcd6f9006d3c0421f9cd6e18abe27a2f/supersmoother/smoother.py#L60-L65
casebeer/audiogen
audiogen/noise.py
arcfour
def arcfour(key, csbN=1): '''Return a generator for the ARCFOUR/RC4 pseudorandom keystream for the key provided. Keys should be byte strings or sequences of ints.''' if isinstance(key, str): key = [ord(c) for c in key] s = range(256) j = 0 for n in range(csbN): for i in range(256): j = (j + s[i] + key[i % len(key)]) % 256 t = s[i] s[i] = s[j] s[j] = t i = 0 j = 0 while True: i = (i + 1) % 256 j = (j + s[i]) % 256 t = s[i] s[i] = s[j] s[j] = t yield s[(s[i] + s[j]) % 256]
python
def arcfour(key, csbN=1): '''Return a generator for the ARCFOUR/RC4 pseudorandom keystream for the key provided. Keys should be byte strings or sequences of ints.''' if isinstance(key, str): key = [ord(c) for c in key] s = range(256) j = 0 for n in range(csbN): for i in range(256): j = (j + s[i] + key[i % len(key)]) % 256 t = s[i] s[i] = s[j] s[j] = t i = 0 j = 0 while True: i = (i + 1) % 256 j = (j + s[i]) % 256 t = s[i] s[i] = s[j] s[j] = t yield s[(s[i] + s[j]) % 256]
[ "def", "arcfour", "(", "key", ",", "csbN", "=", "1", ")", ":", "if", "isinstance", "(", "key", ",", "str", ")", ":", "key", "=", "[", "ord", "(", "c", ")", "for", "c", "in", "key", "]", "s", "=", "range", "(", "256", ")", "j", "=", "0", "for", "n", "in", "range", "(", "csbN", ")", ":", "for", "i", "in", "range", "(", "256", ")", ":", "j", "=", "(", "j", "+", "s", "[", "i", "]", "+", "key", "[", "i", "%", "len", "(", "key", ")", "]", ")", "%", "256", "t", "=", "s", "[", "i", "]", "s", "[", "i", "]", "=", "s", "[", "j", "]", "s", "[", "j", "]", "=", "t", "i", "=", "0", "j", "=", "0", "while", "True", ":", "i", "=", "(", "i", "+", "1", ")", "%", "256", "j", "=", "(", "j", "+", "s", "[", "i", "]", ")", "%", "256", "t", "=", "s", "[", "i", "]", "s", "[", "i", "]", "=", "s", "[", "j", "]", "s", "[", "j", "]", "=", "t", "yield", "s", "[", "(", "s", "[", "i", "]", "+", "s", "[", "j", "]", ")", "%", "256", "]" ]
Return a generator for the ARCFOUR/RC4 pseudorandom keystream for the key provided. Keys should be byte strings or sequences of ints.
[ "Return", "a", "generator", "for", "the", "ARCFOUR", "/", "RC4", "pseudorandom", "keystream", "for", "the", "key", "provided", ".", "Keys", "should", "be", "byte", "strings", "or", "sequences", "of", "ints", "." ]
train
https://github.com/casebeer/audiogen/blob/184dee2ca32c2bb4315a0f18e62288728fcd7881/audiogen/noise.py#L5-L26
casebeer/audiogen
audiogen/noise.py
arcfour_drop
def arcfour_drop(key, n=3072): '''Return a generator for the RC4-drop pseudorandom keystream given by the key and number of bytes to drop passed as arguments. Dropped bytes default to the more conservative 3072, NOT the SCAN default of 768.''' af = arcfour(key) [af.next() for c in range(n)] return af
python
def arcfour_drop(key, n=3072): '''Return a generator for the RC4-drop pseudorandom keystream given by the key and number of bytes to drop passed as arguments. Dropped bytes default to the more conservative 3072, NOT the SCAN default of 768.''' af = arcfour(key) [af.next() for c in range(n)] return af
[ "def", "arcfour_drop", "(", "key", ",", "n", "=", "3072", ")", ":", "af", "=", "arcfour", "(", "key", ")", "[", "af", ".", "next", "(", ")", "for", "c", "in", "range", "(", "n", ")", "]", "return", "af" ]
Return a generator for the RC4-drop pseudorandom keystream given by the key and number of bytes to drop passed as arguments. Dropped bytes default to the more conservative 3072, NOT the SCAN default of 768.
[ "Return", "a", "generator", "for", "the", "RC4", "-", "drop", "pseudorandom", "keystream", "given", "by", "the", "key", "and", "number", "of", "bytes", "to", "drop", "passed", "as", "arguments", ".", "Dropped", "bytes", "default", "to", "the", "more", "conservative", "3072", "NOT", "the", "SCAN", "default", "of", "768", "." ]
train
https://github.com/casebeer/audiogen/blob/184dee2ca32c2bb4315a0f18e62288728fcd7881/audiogen/noise.py#L28-L34
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
resolve_ssl_protocol_version
def resolve_ssl_protocol_version(version=None): """ Look up an SSL protocol version by name. If *version* is not specified, then the strongest protocol available will be returned. :param str version: The name of the version to look up. :return: A protocol constant from the :py:mod:`ssl` module. :rtype: int """ if version is None: protocol_preference = ('TLSv1_2', 'TLSv1_1', 'TLSv1', 'SSLv3', 'SSLv23', 'SSLv2') for protocol in protocol_preference: if hasattr(ssl, 'PROTOCOL_' + protocol): return getattr(ssl, 'PROTOCOL_' + protocol) raise RuntimeError('could not find a suitable ssl PROTOCOL_ version constant') elif isinstance(version, str): if not hasattr(ssl, 'PROTOCOL_' + version): raise ValueError('invalid ssl protocol version: ' + version) return getattr(ssl, 'PROTOCOL_' + version) raise TypeError("ssl_version() argument 1 must be str, not {0}".format(type(version).__name__))
python
def resolve_ssl_protocol_version(version=None): """ Look up an SSL protocol version by name. If *version* is not specified, then the strongest protocol available will be returned. :param str version: The name of the version to look up. :return: A protocol constant from the :py:mod:`ssl` module. :rtype: int """ if version is None: protocol_preference = ('TLSv1_2', 'TLSv1_1', 'TLSv1', 'SSLv3', 'SSLv23', 'SSLv2') for protocol in protocol_preference: if hasattr(ssl, 'PROTOCOL_' + protocol): return getattr(ssl, 'PROTOCOL_' + protocol) raise RuntimeError('could not find a suitable ssl PROTOCOL_ version constant') elif isinstance(version, str): if not hasattr(ssl, 'PROTOCOL_' + version): raise ValueError('invalid ssl protocol version: ' + version) return getattr(ssl, 'PROTOCOL_' + version) raise TypeError("ssl_version() argument 1 must be str, not {0}".format(type(version).__name__))
[ "def", "resolve_ssl_protocol_version", "(", "version", "=", "None", ")", ":", "if", "version", "is", "None", ":", "protocol_preference", "=", "(", "'TLSv1_2'", ",", "'TLSv1_1'", ",", "'TLSv1'", ",", "'SSLv3'", ",", "'SSLv23'", ",", "'SSLv2'", ")", "for", "protocol", "in", "protocol_preference", ":", "if", "hasattr", "(", "ssl", ",", "'PROTOCOL_'", "+", "protocol", ")", ":", "return", "getattr", "(", "ssl", ",", "'PROTOCOL_'", "+", "protocol", ")", "raise", "RuntimeError", "(", "'could not find a suitable ssl PROTOCOL_ version constant'", ")", "elif", "isinstance", "(", "version", ",", "str", ")", ":", "if", "not", "hasattr", "(", "ssl", ",", "'PROTOCOL_'", "+", "version", ")", ":", "raise", "ValueError", "(", "'invalid ssl protocol version: '", "+", "version", ")", "return", "getattr", "(", "ssl", ",", "'PROTOCOL_'", "+", "version", ")", "raise", "TypeError", "(", "\"ssl_version() argument 1 must be str, not {0}\"", ".", "format", "(", "type", "(", "version", ")", ".", "__name__", ")", ")" ]
Look up an SSL protocol version by name. If *version* is not specified, then the strongest protocol available will be returned. :param str version: The name of the version to look up. :return: A protocol constant from the :py:mod:`ssl` module. :rtype: int
[ "Look", "up", "an", "SSL", "protocol", "version", "by", "name", ".", "If", "*", "version", "*", "is", "not", "specified", "then", "the", "strongest", "protocol", "available", "will", "be", "returned", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L215-L234
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
build_server_from_argparser
def build_server_from_argparser(description=None, server_klass=None, handler_klass=None): """ Build a server from command line arguments. If a ServerClass or HandlerClass is specified, then the object must inherit from the corresponding AdvancedHTTPServer base class. :param str description: Description string to be passed to the argument parser. :param server_klass: Alternative server class to use. :type server_klass: :py:class:`.AdvancedHTTPServer` :param handler_klass: Alternative handler class to use. :type handler_klass: :py:class:`.RequestHandler` :return: A configured server instance. :rtype: :py:class:`.AdvancedHTTPServer` """ import argparse def _argp_dir_type(arg): if not os.path.isdir(arg): raise argparse.ArgumentTypeError("{0} is not a valid directory".format(repr(arg))) return arg def _argp_port_type(arg): if not arg.isdigit(): raise argparse.ArgumentTypeError("{0} is not a valid port".format(repr(arg))) arg = int(arg) if arg < 0 or arg > 65535: raise argparse.ArgumentTypeError("{0} is not a valid port".format(repr(arg))) return arg description = (description or 'HTTP Server') server_klass = (server_klass or AdvancedHTTPServer) handler_klass = (handler_klass or RequestHandler) parser = argparse.ArgumentParser(conflict_handler='resolve', description=description, fromfile_prefix_chars='@') parser.epilog = 'When a config file is specified with --config only the --log, --log-file and --password options will be used.' parser.add_argument('-c', '--conf', dest='config', type=argparse.FileType('r'), help='read settings from a config file') parser.add_argument('-i', '--ip', dest='ip', default='0.0.0.0', help='the ip address to serve on') parser.add_argument('-L', '--log', dest='loglvl', choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'), default='INFO', help='set the logging level') parser.add_argument('-p', '--port', dest='port', default=8080, type=_argp_port_type, help='port to serve on') parser.add_argument('-v', '--version', action='version', version=parser.prog + ' Version: ' + __version__) parser.add_argument('-w', '--web-root', dest='web_root', default='.', type=_argp_dir_type, help='path to the web root directory') parser.add_argument('--log-file', dest='log_file', help='log information to a file') parser.add_argument('--no-threads', dest='use_threads', action='store_false', default=True, help='disable threading') parser.add_argument('--password', dest='password', help='password to use for basic authentication') ssl_group = parser.add_argument_group('ssl options') ssl_group.add_argument('--ssl-cert', dest='ssl_cert', help='the ssl cert to use') ssl_group.add_argument('--ssl-key', dest='ssl_key', help='the ssl key to use') ssl_group.add_argument('--ssl-version', dest='ssl_version', choices=[p[9:] for p in dir(ssl) if p.startswith('PROTOCOL_')], help='the version of ssl to use') arguments = parser.parse_args() logging.getLogger('').setLevel(logging.DEBUG) console_log_handler = logging.StreamHandler() console_log_handler.setLevel(getattr(logging, arguments.loglvl)) console_log_handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)-8s %(message)s")) logging.getLogger('').addHandler(console_log_handler) if arguments.log_file: main_file_handler = logging.handlers.RotatingFileHandler(arguments.log_file, maxBytes=262144, backupCount=5) main_file_handler.setLevel(logging.DEBUG) main_file_handler.setFormatter(logging.Formatter("%(asctime)s %(name)-30s %(levelname)-10s %(message)s")) logging.getLogger('').setLevel(logging.DEBUG) logging.getLogger('').addHandler(main_file_handler) if arguments.config: config = ConfigParser() config.readfp(arguments.config) server = build_server_from_config( config, 'server', server_klass=server_klass, handler_klass=handler_klass ) else: server = server_klass( handler_klass, address=(arguments.ip, arguments.port), use_threads=arguments.use_threads, ssl_certfile=arguments.ssl_cert, ssl_keyfile=arguments.ssl_key, ssl_version=arguments.ssl_version ) server.serve_files_root = arguments.web_root if arguments.password: server.auth_add_creds('', arguments.password) return server
python
def build_server_from_argparser(description=None, server_klass=None, handler_klass=None): """ Build a server from command line arguments. If a ServerClass or HandlerClass is specified, then the object must inherit from the corresponding AdvancedHTTPServer base class. :param str description: Description string to be passed to the argument parser. :param server_klass: Alternative server class to use. :type server_klass: :py:class:`.AdvancedHTTPServer` :param handler_klass: Alternative handler class to use. :type handler_klass: :py:class:`.RequestHandler` :return: A configured server instance. :rtype: :py:class:`.AdvancedHTTPServer` """ import argparse def _argp_dir_type(arg): if not os.path.isdir(arg): raise argparse.ArgumentTypeError("{0} is not a valid directory".format(repr(arg))) return arg def _argp_port_type(arg): if not arg.isdigit(): raise argparse.ArgumentTypeError("{0} is not a valid port".format(repr(arg))) arg = int(arg) if arg < 0 or arg > 65535: raise argparse.ArgumentTypeError("{0} is not a valid port".format(repr(arg))) return arg description = (description or 'HTTP Server') server_klass = (server_klass or AdvancedHTTPServer) handler_klass = (handler_klass or RequestHandler) parser = argparse.ArgumentParser(conflict_handler='resolve', description=description, fromfile_prefix_chars='@') parser.epilog = 'When a config file is specified with --config only the --log, --log-file and --password options will be used.' parser.add_argument('-c', '--conf', dest='config', type=argparse.FileType('r'), help='read settings from a config file') parser.add_argument('-i', '--ip', dest='ip', default='0.0.0.0', help='the ip address to serve on') parser.add_argument('-L', '--log', dest='loglvl', choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'), default='INFO', help='set the logging level') parser.add_argument('-p', '--port', dest='port', default=8080, type=_argp_port_type, help='port to serve on') parser.add_argument('-v', '--version', action='version', version=parser.prog + ' Version: ' + __version__) parser.add_argument('-w', '--web-root', dest='web_root', default='.', type=_argp_dir_type, help='path to the web root directory') parser.add_argument('--log-file', dest='log_file', help='log information to a file') parser.add_argument('--no-threads', dest='use_threads', action='store_false', default=True, help='disable threading') parser.add_argument('--password', dest='password', help='password to use for basic authentication') ssl_group = parser.add_argument_group('ssl options') ssl_group.add_argument('--ssl-cert', dest='ssl_cert', help='the ssl cert to use') ssl_group.add_argument('--ssl-key', dest='ssl_key', help='the ssl key to use') ssl_group.add_argument('--ssl-version', dest='ssl_version', choices=[p[9:] for p in dir(ssl) if p.startswith('PROTOCOL_')], help='the version of ssl to use') arguments = parser.parse_args() logging.getLogger('').setLevel(logging.DEBUG) console_log_handler = logging.StreamHandler() console_log_handler.setLevel(getattr(logging, arguments.loglvl)) console_log_handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)-8s %(message)s")) logging.getLogger('').addHandler(console_log_handler) if arguments.log_file: main_file_handler = logging.handlers.RotatingFileHandler(arguments.log_file, maxBytes=262144, backupCount=5) main_file_handler.setLevel(logging.DEBUG) main_file_handler.setFormatter(logging.Formatter("%(asctime)s %(name)-30s %(levelname)-10s %(message)s")) logging.getLogger('').setLevel(logging.DEBUG) logging.getLogger('').addHandler(main_file_handler) if arguments.config: config = ConfigParser() config.readfp(arguments.config) server = build_server_from_config( config, 'server', server_klass=server_klass, handler_klass=handler_klass ) else: server = server_klass( handler_klass, address=(arguments.ip, arguments.port), use_threads=arguments.use_threads, ssl_certfile=arguments.ssl_cert, ssl_keyfile=arguments.ssl_key, ssl_version=arguments.ssl_version ) server.serve_files_root = arguments.web_root if arguments.password: server.auth_add_creds('', arguments.password) return server
[ "def", "build_server_from_argparser", "(", "description", "=", "None", ",", "server_klass", "=", "None", ",", "handler_klass", "=", "None", ")", ":", "import", "argparse", "def", "_argp_dir_type", "(", "arg", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "arg", ")", ":", "raise", "argparse", ".", "ArgumentTypeError", "(", "\"{0} is not a valid directory\"", ".", "format", "(", "repr", "(", "arg", ")", ")", ")", "return", "arg", "def", "_argp_port_type", "(", "arg", ")", ":", "if", "not", "arg", ".", "isdigit", "(", ")", ":", "raise", "argparse", ".", "ArgumentTypeError", "(", "\"{0} is not a valid port\"", ".", "format", "(", "repr", "(", "arg", ")", ")", ")", "arg", "=", "int", "(", "arg", ")", "if", "arg", "<", "0", "or", "arg", ">", "65535", ":", "raise", "argparse", ".", "ArgumentTypeError", "(", "\"{0} is not a valid port\"", ".", "format", "(", "repr", "(", "arg", ")", ")", ")", "return", "arg", "description", "=", "(", "description", "or", "'HTTP Server'", ")", "server_klass", "=", "(", "server_klass", "or", "AdvancedHTTPServer", ")", "handler_klass", "=", "(", "handler_klass", "or", "RequestHandler", ")", "parser", "=", "argparse", ".", "ArgumentParser", "(", "conflict_handler", "=", "'resolve'", ",", "description", "=", "description", ",", "fromfile_prefix_chars", "=", "'@'", ")", "parser", ".", "epilog", "=", "'When a config file is specified with --config only the --log, --log-file and --password options will be used.'", "parser", ".", "add_argument", "(", "'-c'", ",", "'--conf'", ",", "dest", "=", "'config'", ",", "type", "=", "argparse", ".", "FileType", "(", "'r'", ")", ",", "help", "=", "'read settings from a config file'", ")", "parser", ".", "add_argument", "(", "'-i'", ",", "'--ip'", ",", "dest", "=", "'ip'", ",", "default", "=", "'0.0.0.0'", ",", "help", "=", "'the ip address to serve on'", ")", "parser", ".", "add_argument", "(", "'-L'", ",", "'--log'", ",", "dest", "=", "'loglvl'", ",", "choices", "=", "(", "'DEBUG'", ",", "'INFO'", ",", "'WARNING'", ",", "'ERROR'", ",", "'CRITICAL'", ")", ",", "default", "=", "'INFO'", ",", "help", "=", "'set the logging level'", ")", "parser", ".", "add_argument", "(", "'-p'", ",", "'--port'", ",", "dest", "=", "'port'", ",", "default", "=", "8080", ",", "type", "=", "_argp_port_type", ",", "help", "=", "'port to serve on'", ")", "parser", ".", "add_argument", "(", "'-v'", ",", "'--version'", ",", "action", "=", "'version'", ",", "version", "=", "parser", ".", "prog", "+", "' Version: '", "+", "__version__", ")", "parser", ".", "add_argument", "(", "'-w'", ",", "'--web-root'", ",", "dest", "=", "'web_root'", ",", "default", "=", "'.'", ",", "type", "=", "_argp_dir_type", ",", "help", "=", "'path to the web root directory'", ")", "parser", ".", "add_argument", "(", "'--log-file'", ",", "dest", "=", "'log_file'", ",", "help", "=", "'log information to a file'", ")", "parser", ".", "add_argument", "(", "'--no-threads'", ",", "dest", "=", "'use_threads'", ",", "action", "=", "'store_false'", ",", "default", "=", "True", ",", "help", "=", "'disable threading'", ")", "parser", ".", "add_argument", "(", "'--password'", ",", "dest", "=", "'password'", ",", "help", "=", "'password to use for basic authentication'", ")", "ssl_group", "=", "parser", ".", "add_argument_group", "(", "'ssl options'", ")", "ssl_group", ".", "add_argument", "(", "'--ssl-cert'", ",", "dest", "=", "'ssl_cert'", ",", "help", "=", "'the ssl cert to use'", ")", "ssl_group", ".", "add_argument", "(", "'--ssl-key'", ",", "dest", "=", "'ssl_key'", ",", "help", "=", "'the ssl key to use'", ")", "ssl_group", ".", "add_argument", "(", "'--ssl-version'", ",", "dest", "=", "'ssl_version'", ",", "choices", "=", "[", "p", "[", "9", ":", "]", "for", "p", "in", "dir", "(", "ssl", ")", "if", "p", ".", "startswith", "(", "'PROTOCOL_'", ")", "]", ",", "help", "=", "'the version of ssl to use'", ")", "arguments", "=", "parser", ".", "parse_args", "(", ")", "logging", ".", "getLogger", "(", "''", ")", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "console_log_handler", "=", "logging", ".", "StreamHandler", "(", ")", "console_log_handler", ".", "setLevel", "(", "getattr", "(", "logging", ",", "arguments", ".", "loglvl", ")", ")", "console_log_handler", ".", "setFormatter", "(", "logging", ".", "Formatter", "(", "\"%(asctime)s %(levelname)-8s %(message)s\"", ")", ")", "logging", ".", "getLogger", "(", "''", ")", ".", "addHandler", "(", "console_log_handler", ")", "if", "arguments", ".", "log_file", ":", "main_file_handler", "=", "logging", ".", "handlers", ".", "RotatingFileHandler", "(", "arguments", ".", "log_file", ",", "maxBytes", "=", "262144", ",", "backupCount", "=", "5", ")", "main_file_handler", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "main_file_handler", ".", "setFormatter", "(", "logging", ".", "Formatter", "(", "\"%(asctime)s %(name)-30s %(levelname)-10s %(message)s\"", ")", ")", "logging", ".", "getLogger", "(", "''", ")", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "logging", ".", "getLogger", "(", "''", ")", ".", "addHandler", "(", "main_file_handler", ")", "if", "arguments", ".", "config", ":", "config", "=", "ConfigParser", "(", ")", "config", ".", "readfp", "(", "arguments", ".", "config", ")", "server", "=", "build_server_from_config", "(", "config", ",", "'server'", ",", "server_klass", "=", "server_klass", ",", "handler_klass", "=", "handler_klass", ")", "else", ":", "server", "=", "server_klass", "(", "handler_klass", ",", "address", "=", "(", "arguments", ".", "ip", ",", "arguments", ".", "port", ")", ",", "use_threads", "=", "arguments", ".", "use_threads", ",", "ssl_certfile", "=", "arguments", ".", "ssl_cert", ",", "ssl_keyfile", "=", "arguments", ".", "ssl_key", ",", "ssl_version", "=", "arguments", ".", "ssl_version", ")", "server", ".", "serve_files_root", "=", "arguments", ".", "web_root", "if", "arguments", ".", "password", ":", "server", ".", "auth_add_creds", "(", "''", ",", "arguments", ".", "password", ")", "return", "server" ]
Build a server from command line arguments. If a ServerClass or HandlerClass is specified, then the object must inherit from the corresponding AdvancedHTTPServer base class. :param str description: Description string to be passed to the argument parser. :param server_klass: Alternative server class to use. :type server_klass: :py:class:`.AdvancedHTTPServer` :param handler_klass: Alternative handler class to use. :type handler_klass: :py:class:`.RequestHandler` :return: A configured server instance. :rtype: :py:class:`.AdvancedHTTPServer`
[ "Build", "a", "server", "from", "command", "line", "arguments", ".", "If", "a", "ServerClass", "or", "HandlerClass", "is", "specified", "then", "the", "object", "must", "inherit", "from", "the", "corresponding", "AdvancedHTTPServer", "base", "class", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L236-L321
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
build_server_from_config
def build_server_from_config(config, section_name, server_klass=None, handler_klass=None): """ Build a server from a provided :py:class:`configparser.ConfigParser` instance. If a ServerClass or HandlerClass is specified, then the object must inherit from the corresponding AdvancedHTTPServer base class. :param config: Configuration to retrieve settings from. :type config: :py:class:`configparser.ConfigParser` :param str section_name: The section name of the configuration to use. :param server_klass: Alternative server class to use. :type server_klass: :py:class:`.AdvancedHTTPServer` :param handler_klass: Alternative handler class to use. :type handler_klass: :py:class:`.RequestHandler` :return: A configured server instance. :rtype: :py:class:`.AdvancedHTTPServer` """ server_klass = (server_klass or AdvancedHTTPServer) handler_klass = (handler_klass or RequestHandler) port = config.getint(section_name, 'port') web_root = None if config.has_option(section_name, 'web_root'): web_root = config.get(section_name, 'web_root') if config.has_option(section_name, 'ip'): ip = config.get(section_name, 'ip') else: ip = '0.0.0.0' ssl_certfile = None if config.has_option(section_name, 'ssl_cert'): ssl_certfile = config.get(section_name, 'ssl_cert') ssl_keyfile = None if config.has_option(section_name, 'ssl_key'): ssl_keyfile = config.get(section_name, 'ssl_key') ssl_version = None if config.has_option(section_name, 'ssl_version'): ssl_version = config.get(section_name, 'ssl_version') server = server_klass( handler_klass, address=(ip, port), ssl_certfile=ssl_certfile, ssl_keyfile=ssl_keyfile, ssl_version=ssl_version ) if config.has_option(section_name, 'password_type'): password_type = config.get(section_name, 'password_type') else: password_type = 'md5' if config.has_option(section_name, 'password'): password = config.get(section_name, 'password') if config.has_option(section_name, 'username'): username = config.get(section_name, 'username') else: username = '' server.auth_add_creds(username, password, pwtype=password_type) cred_idx = 0 while config.has_option(section_name, 'password' + str(cred_idx)): password = config.get(section_name, 'password' + str(cred_idx)) if not config.has_option(section_name, 'username' + str(cred_idx)): break username = config.get(section_name, 'username' + str(cred_idx)) server.auth_add_creds(username, password, pwtype=password_type) cred_idx += 1 if web_root is None: server.serve_files = False else: server.serve_files = True server.serve_files_root = web_root if config.has_option(section_name, 'list_directories'): server.serve_files_list_directories = config.getboolean(section_name, 'list_directories') return server
python
def build_server_from_config(config, section_name, server_klass=None, handler_klass=None): """ Build a server from a provided :py:class:`configparser.ConfigParser` instance. If a ServerClass or HandlerClass is specified, then the object must inherit from the corresponding AdvancedHTTPServer base class. :param config: Configuration to retrieve settings from. :type config: :py:class:`configparser.ConfigParser` :param str section_name: The section name of the configuration to use. :param server_klass: Alternative server class to use. :type server_klass: :py:class:`.AdvancedHTTPServer` :param handler_klass: Alternative handler class to use. :type handler_klass: :py:class:`.RequestHandler` :return: A configured server instance. :rtype: :py:class:`.AdvancedHTTPServer` """ server_klass = (server_klass or AdvancedHTTPServer) handler_klass = (handler_klass or RequestHandler) port = config.getint(section_name, 'port') web_root = None if config.has_option(section_name, 'web_root'): web_root = config.get(section_name, 'web_root') if config.has_option(section_name, 'ip'): ip = config.get(section_name, 'ip') else: ip = '0.0.0.0' ssl_certfile = None if config.has_option(section_name, 'ssl_cert'): ssl_certfile = config.get(section_name, 'ssl_cert') ssl_keyfile = None if config.has_option(section_name, 'ssl_key'): ssl_keyfile = config.get(section_name, 'ssl_key') ssl_version = None if config.has_option(section_name, 'ssl_version'): ssl_version = config.get(section_name, 'ssl_version') server = server_klass( handler_klass, address=(ip, port), ssl_certfile=ssl_certfile, ssl_keyfile=ssl_keyfile, ssl_version=ssl_version ) if config.has_option(section_name, 'password_type'): password_type = config.get(section_name, 'password_type') else: password_type = 'md5' if config.has_option(section_name, 'password'): password = config.get(section_name, 'password') if config.has_option(section_name, 'username'): username = config.get(section_name, 'username') else: username = '' server.auth_add_creds(username, password, pwtype=password_type) cred_idx = 0 while config.has_option(section_name, 'password' + str(cred_idx)): password = config.get(section_name, 'password' + str(cred_idx)) if not config.has_option(section_name, 'username' + str(cred_idx)): break username = config.get(section_name, 'username' + str(cred_idx)) server.auth_add_creds(username, password, pwtype=password_type) cred_idx += 1 if web_root is None: server.serve_files = False else: server.serve_files = True server.serve_files_root = web_root if config.has_option(section_name, 'list_directories'): server.serve_files_list_directories = config.getboolean(section_name, 'list_directories') return server
[ "def", "build_server_from_config", "(", "config", ",", "section_name", ",", "server_klass", "=", "None", ",", "handler_klass", "=", "None", ")", ":", "server_klass", "=", "(", "server_klass", "or", "AdvancedHTTPServer", ")", "handler_klass", "=", "(", "handler_klass", "or", "RequestHandler", ")", "port", "=", "config", ".", "getint", "(", "section_name", ",", "'port'", ")", "web_root", "=", "None", "if", "config", ".", "has_option", "(", "section_name", ",", "'web_root'", ")", ":", "web_root", "=", "config", ".", "get", "(", "section_name", ",", "'web_root'", ")", "if", "config", ".", "has_option", "(", "section_name", ",", "'ip'", ")", ":", "ip", "=", "config", ".", "get", "(", "section_name", ",", "'ip'", ")", "else", ":", "ip", "=", "'0.0.0.0'", "ssl_certfile", "=", "None", "if", "config", ".", "has_option", "(", "section_name", ",", "'ssl_cert'", ")", ":", "ssl_certfile", "=", "config", ".", "get", "(", "section_name", ",", "'ssl_cert'", ")", "ssl_keyfile", "=", "None", "if", "config", ".", "has_option", "(", "section_name", ",", "'ssl_key'", ")", ":", "ssl_keyfile", "=", "config", ".", "get", "(", "section_name", ",", "'ssl_key'", ")", "ssl_version", "=", "None", "if", "config", ".", "has_option", "(", "section_name", ",", "'ssl_version'", ")", ":", "ssl_version", "=", "config", ".", "get", "(", "section_name", ",", "'ssl_version'", ")", "server", "=", "server_klass", "(", "handler_klass", ",", "address", "=", "(", "ip", ",", "port", ")", ",", "ssl_certfile", "=", "ssl_certfile", ",", "ssl_keyfile", "=", "ssl_keyfile", ",", "ssl_version", "=", "ssl_version", ")", "if", "config", ".", "has_option", "(", "section_name", ",", "'password_type'", ")", ":", "password_type", "=", "config", ".", "get", "(", "section_name", ",", "'password_type'", ")", "else", ":", "password_type", "=", "'md5'", "if", "config", ".", "has_option", "(", "section_name", ",", "'password'", ")", ":", "password", "=", "config", ".", "get", "(", "section_name", ",", "'password'", ")", "if", "config", ".", "has_option", "(", "section_name", ",", "'username'", ")", ":", "username", "=", "config", ".", "get", "(", "section_name", ",", "'username'", ")", "else", ":", "username", "=", "''", "server", ".", "auth_add_creds", "(", "username", ",", "password", ",", "pwtype", "=", "password_type", ")", "cred_idx", "=", "0", "while", "config", ".", "has_option", "(", "section_name", ",", "'password'", "+", "str", "(", "cred_idx", ")", ")", ":", "password", "=", "config", ".", "get", "(", "section_name", ",", "'password'", "+", "str", "(", "cred_idx", ")", ")", "if", "not", "config", ".", "has_option", "(", "section_name", ",", "'username'", "+", "str", "(", "cred_idx", ")", ")", ":", "break", "username", "=", "config", ".", "get", "(", "section_name", ",", "'username'", "+", "str", "(", "cred_idx", ")", ")", "server", ".", "auth_add_creds", "(", "username", ",", "password", ",", "pwtype", "=", "password_type", ")", "cred_idx", "+=", "1", "if", "web_root", "is", "None", ":", "server", ".", "serve_files", "=", "False", "else", ":", "server", ".", "serve_files", "=", "True", "server", ".", "serve_files_root", "=", "web_root", "if", "config", ".", "has_option", "(", "section_name", ",", "'list_directories'", ")", ":", "server", ".", "serve_files_list_directories", "=", "config", ".", "getboolean", "(", "section_name", ",", "'list_directories'", ")", "return", "server" ]
Build a server from a provided :py:class:`configparser.ConfigParser` instance. If a ServerClass or HandlerClass is specified, then the object must inherit from the corresponding AdvancedHTTPServer base class. :param config: Configuration to retrieve settings from. :type config: :py:class:`configparser.ConfigParser` :param str section_name: The section name of the configuration to use. :param server_klass: Alternative server class to use. :type server_klass: :py:class:`.AdvancedHTTPServer` :param handler_klass: Alternative handler class to use. :type handler_klass: :py:class:`.RequestHandler` :return: A configured server instance. :rtype: :py:class:`.AdvancedHTTPServer`
[ "Build", "a", "server", "from", "a", "provided", ":", "py", ":", "class", ":", "configparser", ".", "ConfigParser", "instance", ".", "If", "a", "ServerClass", "or", "HandlerClass", "is", "specified", "then", "the", "object", "must", "inherit", "from", "the", "corresponding", "AdvancedHTTPServer", "base", "class", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L323-L395
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
RPCClient.set_serializer
def set_serializer(self, serializer_name, compression=None): """ Configure the serializer to use for communication with the server. The serializer specified must be valid and in the :py:data:`.g_serializer_drivers` map. :param str serializer_name: The name of the serializer to use. :param str compression: The name of a compression library to use. """ self.serializer = Serializer(serializer_name, charset='UTF-8', compression=compression) self.logger.debug('using serializer: ' + serializer_name)
python
def set_serializer(self, serializer_name, compression=None): """ Configure the serializer to use for communication with the server. The serializer specified must be valid and in the :py:data:`.g_serializer_drivers` map. :param str serializer_name: The name of the serializer to use. :param str compression: The name of a compression library to use. """ self.serializer = Serializer(serializer_name, charset='UTF-8', compression=compression) self.logger.debug('using serializer: ' + serializer_name)
[ "def", "set_serializer", "(", "self", ",", "serializer_name", ",", "compression", "=", "None", ")", ":", "self", ".", "serializer", "=", "Serializer", "(", "serializer_name", ",", "charset", "=", "'UTF-8'", ",", "compression", "=", "compression", ")", "self", ".", "logger", ".", "debug", "(", "'using serializer: '", "+", "serializer_name", ")" ]
Configure the serializer to use for communication with the server. The serializer specified must be valid and in the :py:data:`.g_serializer_drivers` map. :param str serializer_name: The name of the serializer to use. :param str compression: The name of a compression library to use.
[ "Configure", "the", "serializer", "to", "use", "for", "communication", "with", "the", "server", ".", "The", "serializer", "specified", "must", "be", "valid", "and", "in", "the", ":", "py", ":", "data", ":", ".", "g_serializer_drivers", "map", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L548-L558
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
RPCClient.reconnect
def reconnect(self): """Reconnect to the remote server.""" self.lock.acquire() if self.use_ssl: self.client = http.client.HTTPSConnection(self.host, self.port, context=self.ssl_context) else: self.client = http.client.HTTPConnection(self.host, self.port) self.lock.release()
python
def reconnect(self): """Reconnect to the remote server.""" self.lock.acquire() if self.use_ssl: self.client = http.client.HTTPSConnection(self.host, self.port, context=self.ssl_context) else: self.client = http.client.HTTPConnection(self.host, self.port) self.lock.release()
[ "def", "reconnect", "(", "self", ")", ":", "self", ".", "lock", ".", "acquire", "(", ")", "if", "self", ".", "use_ssl", ":", "self", ".", "client", "=", "http", ".", "client", ".", "HTTPSConnection", "(", "self", ".", "host", ",", "self", ".", "port", ",", "context", "=", "self", ".", "ssl_context", ")", "else", ":", "self", ".", "client", "=", "http", ".", "client", ".", "HTTPConnection", "(", "self", ".", "host", ",", "self", ".", "port", ")", "self", ".", "lock", ".", "release", "(", ")" ]
Reconnect to the remote server.
[ "Reconnect", "to", "the", "remote", "server", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L571-L578
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
RPCClient.call
def call(self, method, *args, **kwargs): """ Issue a call to the remote end point to execute the specified procedure. :param str method: The name of the remote procedure to execute. :return: The return value from the remote function. """ if kwargs: options = self.encode(dict(args=args, kwargs=kwargs)) else: options = self.encode(args) headers = {} if self.headers: headers.update(self.headers) headers['Content-Type'] = self.serializer.content_type headers['Content-Length'] = str(len(options)) headers['Connection'] = 'close' if self.username is not None and self.password is not None: headers['Authorization'] = 'Basic ' + base64.b64encode((self.username + ':' + self.password).encode('UTF-8')).decode('UTF-8') method = os.path.join(self.uri_base, method) self.logger.debug('calling RPC method: ' + method[1:]) try: with self.lock: self.client.request('RPC', method, options, headers) resp = self.client.getresponse() except http.client.ImproperConnectionState: raise RPCConnectionError('improper connection state') if resp.status != 200: raise RPCError(resp.reason, resp.status) resp_data = resp.read() resp_data = self.decode(resp_data) if not ('exception_occurred' in resp_data and 'result' in resp_data): raise RPCError('missing response information', resp.status) if resp_data['exception_occurred']: raise RPCError('remote method incurred an exception', resp.status, remote_exception=resp_data['exception']) return resp_data['result']
python
def call(self, method, *args, **kwargs): """ Issue a call to the remote end point to execute the specified procedure. :param str method: The name of the remote procedure to execute. :return: The return value from the remote function. """ if kwargs: options = self.encode(dict(args=args, kwargs=kwargs)) else: options = self.encode(args) headers = {} if self.headers: headers.update(self.headers) headers['Content-Type'] = self.serializer.content_type headers['Content-Length'] = str(len(options)) headers['Connection'] = 'close' if self.username is not None and self.password is not None: headers['Authorization'] = 'Basic ' + base64.b64encode((self.username + ':' + self.password).encode('UTF-8')).decode('UTF-8') method = os.path.join(self.uri_base, method) self.logger.debug('calling RPC method: ' + method[1:]) try: with self.lock: self.client.request('RPC', method, options, headers) resp = self.client.getresponse() except http.client.ImproperConnectionState: raise RPCConnectionError('improper connection state') if resp.status != 200: raise RPCError(resp.reason, resp.status) resp_data = resp.read() resp_data = self.decode(resp_data) if not ('exception_occurred' in resp_data and 'result' in resp_data): raise RPCError('missing response information', resp.status) if resp_data['exception_occurred']: raise RPCError('remote method incurred an exception', resp.status, remote_exception=resp_data['exception']) return resp_data['result']
[ "def", "call", "(", "self", ",", "method", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", ":", "options", "=", "self", ".", "encode", "(", "dict", "(", "args", "=", "args", ",", "kwargs", "=", "kwargs", ")", ")", "else", ":", "options", "=", "self", ".", "encode", "(", "args", ")", "headers", "=", "{", "}", "if", "self", ".", "headers", ":", "headers", ".", "update", "(", "self", ".", "headers", ")", "headers", "[", "'Content-Type'", "]", "=", "self", ".", "serializer", ".", "content_type", "headers", "[", "'Content-Length'", "]", "=", "str", "(", "len", "(", "options", ")", ")", "headers", "[", "'Connection'", "]", "=", "'close'", "if", "self", ".", "username", "is", "not", "None", "and", "self", ".", "password", "is", "not", "None", ":", "headers", "[", "'Authorization'", "]", "=", "'Basic '", "+", "base64", ".", "b64encode", "(", "(", "self", ".", "username", "+", "':'", "+", "self", ".", "password", ")", ".", "encode", "(", "'UTF-8'", ")", ")", ".", "decode", "(", "'UTF-8'", ")", "method", "=", "os", ".", "path", ".", "join", "(", "self", ".", "uri_base", ",", "method", ")", "self", ".", "logger", ".", "debug", "(", "'calling RPC method: '", "+", "method", "[", "1", ":", "]", ")", "try", ":", "with", "self", ".", "lock", ":", "self", ".", "client", ".", "request", "(", "'RPC'", ",", "method", ",", "options", ",", "headers", ")", "resp", "=", "self", ".", "client", ".", "getresponse", "(", ")", "except", "http", ".", "client", ".", "ImproperConnectionState", ":", "raise", "RPCConnectionError", "(", "'improper connection state'", ")", "if", "resp", ".", "status", "!=", "200", ":", "raise", "RPCError", "(", "resp", ".", "reason", ",", "resp", ".", "status", ")", "resp_data", "=", "resp", ".", "read", "(", ")", "resp_data", "=", "self", ".", "decode", "(", "resp_data", ")", "if", "not", "(", "'exception_occurred'", "in", "resp_data", "and", "'result'", "in", "resp_data", ")", ":", "raise", "RPCError", "(", "'missing response information'", ",", "resp", ".", "status", ")", "if", "resp_data", "[", "'exception_occurred'", "]", ":", "raise", "RPCError", "(", "'remote method incurred an exception'", ",", "resp", ".", "status", ",", "remote_exception", "=", "resp_data", "[", "'exception'", "]", ")", "return", "resp_data", "[", "'result'", "]" ]
Issue a call to the remote end point to execute the specified procedure. :param str method: The name of the remote procedure to execute. :return: The return value from the remote function.
[ "Issue", "a", "call", "to", "the", "remote", "end", "point", "to", "execute", "the", "specified", "procedure", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L580-L620
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
RPCClientCached.cache_call
def cache_call(self, method, *options): """ Call a remote method and store the result locally. Subsequent calls to the same method with the same arguments will return the cached result without invoking the remote procedure. Cached results are kept indefinitely and must be manually refreshed with a call to :py:meth:`.cache_call_refresh`. :param str method: The name of the remote procedure to execute. :return: The return value from the remote function. """ options_hash = self.encode(options) if len(options_hash) > 20: options_hash = hashlib.new('sha1', options_hash).digest() options_hash = sqlite3.Binary(options_hash) with self.cache_lock: cursor = self.cache_db.cursor() cursor.execute('SELECT return_value FROM cache WHERE method = ? AND options_hash = ?', (method, options_hash)) return_value = cursor.fetchone() if return_value: return_value = bytes(return_value[0]) return self.decode(return_value) return_value = self.call(method, *options) store_return_value = sqlite3.Binary(self.encode(return_value)) with self.cache_lock: cursor = self.cache_db.cursor() cursor.execute('INSERT INTO cache (method, options_hash, return_value) VALUES (?, ?, ?)', (method, options_hash, store_return_value)) self.cache_db.commit() return return_value
python
def cache_call(self, method, *options): """ Call a remote method and store the result locally. Subsequent calls to the same method with the same arguments will return the cached result without invoking the remote procedure. Cached results are kept indefinitely and must be manually refreshed with a call to :py:meth:`.cache_call_refresh`. :param str method: The name of the remote procedure to execute. :return: The return value from the remote function. """ options_hash = self.encode(options) if len(options_hash) > 20: options_hash = hashlib.new('sha1', options_hash).digest() options_hash = sqlite3.Binary(options_hash) with self.cache_lock: cursor = self.cache_db.cursor() cursor.execute('SELECT return_value FROM cache WHERE method = ? AND options_hash = ?', (method, options_hash)) return_value = cursor.fetchone() if return_value: return_value = bytes(return_value[0]) return self.decode(return_value) return_value = self.call(method, *options) store_return_value = sqlite3.Binary(self.encode(return_value)) with self.cache_lock: cursor = self.cache_db.cursor() cursor.execute('INSERT INTO cache (method, options_hash, return_value) VALUES (?, ?, ?)', (method, options_hash, store_return_value)) self.cache_db.commit() return return_value
[ "def", "cache_call", "(", "self", ",", "method", ",", "*", "options", ")", ":", "options_hash", "=", "self", ".", "encode", "(", "options", ")", "if", "len", "(", "options_hash", ")", ">", "20", ":", "options_hash", "=", "hashlib", ".", "new", "(", "'sha1'", ",", "options_hash", ")", ".", "digest", "(", ")", "options_hash", "=", "sqlite3", ".", "Binary", "(", "options_hash", ")", "with", "self", ".", "cache_lock", ":", "cursor", "=", "self", ".", "cache_db", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "'SELECT return_value FROM cache WHERE method = ? AND options_hash = ?'", ",", "(", "method", ",", "options_hash", ")", ")", "return_value", "=", "cursor", ".", "fetchone", "(", ")", "if", "return_value", ":", "return_value", "=", "bytes", "(", "return_value", "[", "0", "]", ")", "return", "self", ".", "decode", "(", "return_value", ")", "return_value", "=", "self", ".", "call", "(", "method", ",", "*", "options", ")", "store_return_value", "=", "sqlite3", ".", "Binary", "(", "self", ".", "encode", "(", "return_value", ")", ")", "with", "self", ".", "cache_lock", ":", "cursor", "=", "self", ".", "cache_db", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "'INSERT INTO cache (method, options_hash, return_value) VALUES (?, ?, ?)'", ",", "(", "method", ",", "options_hash", ",", "store_return_value", ")", ")", "self", ".", "cache_db", ".", "commit", "(", ")", "return", "return_value" ]
Call a remote method and store the result locally. Subsequent calls to the same method with the same arguments will return the cached result without invoking the remote procedure. Cached results are kept indefinitely and must be manually refreshed with a call to :py:meth:`.cache_call_refresh`. :param str method: The name of the remote procedure to execute. :return: The return value from the remote function.
[ "Call", "a", "remote", "method", "and", "store", "the", "result", "locally", ".", "Subsequent", "calls", "to", "the", "same", "method", "with", "the", "same", "arguments", "will", "return", "the", "cached", "result", "without", "invoking", "the", "remote", "procedure", ".", "Cached", "results", "are", "kept", "indefinitely", "and", "must", "be", "manually", "refreshed", "with", "a", "call", "to", ":", "py", ":", "meth", ":", ".", "cache_call_refresh", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L636-L665
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
RPCClientCached.cache_call_refresh
def cache_call_refresh(self, method, *options): """ Call a remote method and update the local cache with the result if it already existed. :param str method: The name of the remote procedure to execute. :return: The return value from the remote function. """ options_hash = self.encode(options) if len(options_hash) > 20: options_hash = hashlib.new('sha1', options).digest() options_hash = sqlite3.Binary(options_hash) with self.cache_lock: cursor = self.cache_db.cursor() cursor.execute('DELETE FROM cache WHERE method = ? AND options_hash = ?', (method, options_hash)) return_value = self.call(method, *options) store_return_value = sqlite3.Binary(self.encode(return_value)) with self.cache_lock: cursor = self.cache_db.cursor() cursor.execute('INSERT INTO cache (method, options_hash, return_value) VALUES (?, ?, ?)', (method, options_hash, store_return_value)) self.cache_db.commit() return return_value
python
def cache_call_refresh(self, method, *options): """ Call a remote method and update the local cache with the result if it already existed. :param str method: The name of the remote procedure to execute. :return: The return value from the remote function. """ options_hash = self.encode(options) if len(options_hash) > 20: options_hash = hashlib.new('sha1', options).digest() options_hash = sqlite3.Binary(options_hash) with self.cache_lock: cursor = self.cache_db.cursor() cursor.execute('DELETE FROM cache WHERE method = ? AND options_hash = ?', (method, options_hash)) return_value = self.call(method, *options) store_return_value = sqlite3.Binary(self.encode(return_value)) with self.cache_lock: cursor = self.cache_db.cursor() cursor.execute('INSERT INTO cache (method, options_hash, return_value) VALUES (?, ?, ?)', (method, options_hash, store_return_value)) self.cache_db.commit() return return_value
[ "def", "cache_call_refresh", "(", "self", ",", "method", ",", "*", "options", ")", ":", "options_hash", "=", "self", ".", "encode", "(", "options", ")", "if", "len", "(", "options_hash", ")", ">", "20", ":", "options_hash", "=", "hashlib", ".", "new", "(", "'sha1'", ",", "options", ")", ".", "digest", "(", ")", "options_hash", "=", "sqlite3", ".", "Binary", "(", "options_hash", ")", "with", "self", ".", "cache_lock", ":", "cursor", "=", "self", ".", "cache_db", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "'DELETE FROM cache WHERE method = ? AND options_hash = ?'", ",", "(", "method", ",", "options_hash", ")", ")", "return_value", "=", "self", ".", "call", "(", "method", ",", "*", "options", ")", "store_return_value", "=", "sqlite3", ".", "Binary", "(", "self", ".", "encode", "(", "return_value", ")", ")", "with", "self", ".", "cache_lock", ":", "cursor", "=", "self", ".", "cache_db", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "'INSERT INTO cache (method, options_hash, return_value) VALUES (?, ?, ?)'", ",", "(", "method", ",", "options_hash", ",", "store_return_value", ")", ")", "self", ".", "cache_db", ".", "commit", "(", ")", "return", "return_value" ]
Call a remote method and update the local cache with the result if it already existed. :param str method: The name of the remote procedure to execute. :return: The return value from the remote function.
[ "Call", "a", "remote", "method", "and", "update", "the", "local", "cache", "with", "the", "result", "if", "it", "already", "existed", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L667-L689
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
RPCClientCached.cache_clear
def cache_clear(self): """Purge the local store of all cached function information.""" with self.cache_lock: cursor = self.cache_db.cursor() cursor.execute('DELETE FROM cache') self.cache_db.commit() self.logger.info('the RPC cache has been purged') return
python
def cache_clear(self): """Purge the local store of all cached function information.""" with self.cache_lock: cursor = self.cache_db.cursor() cursor.execute('DELETE FROM cache') self.cache_db.commit() self.logger.info('the RPC cache has been purged') return
[ "def", "cache_clear", "(", "self", ")", ":", "with", "self", ".", "cache_lock", ":", "cursor", "=", "self", ".", "cache_db", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "'DELETE FROM cache'", ")", "self", ".", "cache_db", ".", "commit", "(", ")", "self", ".", "logger", ".", "info", "(", "'the RPC cache has been purged'", ")", "return" ]
Purge the local store of all cached function information.
[ "Purge", "the", "local", "store", "of", "all", "cached", "function", "information", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L691-L698
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
RequestHandler.respond_file
def respond_file(self, file_path, attachment=False, query=None): """ Respond to the client by serving a file, either directly or as an attachment. :param str file_path: The path to the file to serve, this does not need to be in the web root. :param bool attachment: Whether to serve the file as a download by setting the Content-Disposition header. """ del query file_path = os.path.abspath(file_path) try: file_obj = open(file_path, 'rb') except IOError: self.respond_not_found() return self.send_response(200) self.send_header('Content-Type', self.guess_mime_type(file_path)) fs = os.fstat(file_obj.fileno()) self.send_header('Content-Length', str(fs[6])) if attachment: file_name = os.path.basename(file_path) self.send_header('Content-Disposition', 'attachment; filename=' + file_name) self.send_header('Last-Modified', self.date_time_string(fs.st_mtime)) self.end_headers() shutil.copyfileobj(file_obj, self.wfile) file_obj.close() return
python
def respond_file(self, file_path, attachment=False, query=None): """ Respond to the client by serving a file, either directly or as an attachment. :param str file_path: The path to the file to serve, this does not need to be in the web root. :param bool attachment: Whether to serve the file as a download by setting the Content-Disposition header. """ del query file_path = os.path.abspath(file_path) try: file_obj = open(file_path, 'rb') except IOError: self.respond_not_found() return self.send_response(200) self.send_header('Content-Type', self.guess_mime_type(file_path)) fs = os.fstat(file_obj.fileno()) self.send_header('Content-Length', str(fs[6])) if attachment: file_name = os.path.basename(file_path) self.send_header('Content-Disposition', 'attachment; filename=' + file_name) self.send_header('Last-Modified', self.date_time_string(fs.st_mtime)) self.end_headers() shutil.copyfileobj(file_obj, self.wfile) file_obj.close() return
[ "def", "respond_file", "(", "self", ",", "file_path", ",", "attachment", "=", "False", ",", "query", "=", "None", ")", ":", "del", "query", "file_path", "=", "os", ".", "path", ".", "abspath", "(", "file_path", ")", "try", ":", "file_obj", "=", "open", "(", "file_path", ",", "'rb'", ")", "except", "IOError", ":", "self", ".", "respond_not_found", "(", ")", "return", "self", ".", "send_response", "(", "200", ")", "self", ".", "send_header", "(", "'Content-Type'", ",", "self", ".", "guess_mime_type", "(", "file_path", ")", ")", "fs", "=", "os", ".", "fstat", "(", "file_obj", ".", "fileno", "(", ")", ")", "self", ".", "send_header", "(", "'Content-Length'", ",", "str", "(", "fs", "[", "6", "]", ")", ")", "if", "attachment", ":", "file_name", "=", "os", ".", "path", ".", "basename", "(", "file_path", ")", "self", ".", "send_header", "(", "'Content-Disposition'", ",", "'attachment; filename='", "+", "file_name", ")", "self", ".", "send_header", "(", "'Last-Modified'", ",", "self", ".", "date_time_string", "(", "fs", ".", "st_mtime", ")", ")", "self", ".", "end_headers", "(", ")", "shutil", ".", "copyfileobj", "(", "file_obj", ",", "self", ".", "wfile", ")", "file_obj", ".", "close", "(", ")", "return" ]
Respond to the client by serving a file, either directly or as an attachment. :param str file_path: The path to the file to serve, this does not need to be in the web root. :param bool attachment: Whether to serve the file as a download by setting the Content-Disposition header.
[ "Respond", "to", "the", "client", "by", "serving", "a", "file", "either", "directly", "or", "as", "an", "attachment", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L892-L918
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
RequestHandler.respond_list_directory
def respond_list_directory(self, dir_path, query=None): """ Respond to the client with an HTML page listing the contents of the specified directory. :param str dir_path: The path of the directory to list the contents of. """ del query try: dir_contents = os.listdir(dir_path) except os.error: self.respond_not_found() return if os.path.normpath(dir_path) != self.__config['serve_files_root']: dir_contents.append('..') dir_contents.sort(key=lambda a: a.lower()) displaypath = html.escape(urllib.parse.unquote(self.path), quote=True) f = io.BytesIO() encoding = sys.getfilesystemencoding() f.write(b'<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n') f.write(b'<html>\n<title>Directory listing for ' + displaypath.encode(encoding) + b'</title>\n') f.write(b'<body>\n<h2>Directory listing for ' + displaypath.encode(encoding) + b'</h2>\n') f.write(b'<hr>\n<ul>\n') for name in dir_contents: fullname = os.path.join(dir_path, name) displayname = linkname = name # Append / for directories or @ for symbolic links if os.path.isdir(fullname): displayname = name + "/" linkname = name + "/" if os.path.islink(fullname): displayname = name + "@" # Note: a link to a directory displays with @ and links with / f.write(('<li><a href="' + urllib.parse.quote(linkname) + '">' + html.escape(displayname, quote=True) + '</a>\n').encode(encoding)) f.write(b'</ul>\n<hr>\n</body>\n</html>\n') length = f.tell() f.seek(0) self.send_response(200) self.send_header('Content-Type', 'text/html; charset=' + encoding) self.send_header('Content-Length', length) self.end_headers() shutil.copyfileobj(f, self.wfile) f.close() return
python
def respond_list_directory(self, dir_path, query=None): """ Respond to the client with an HTML page listing the contents of the specified directory. :param str dir_path: The path of the directory to list the contents of. """ del query try: dir_contents = os.listdir(dir_path) except os.error: self.respond_not_found() return if os.path.normpath(dir_path) != self.__config['serve_files_root']: dir_contents.append('..') dir_contents.sort(key=lambda a: a.lower()) displaypath = html.escape(urllib.parse.unquote(self.path), quote=True) f = io.BytesIO() encoding = sys.getfilesystemencoding() f.write(b'<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n') f.write(b'<html>\n<title>Directory listing for ' + displaypath.encode(encoding) + b'</title>\n') f.write(b'<body>\n<h2>Directory listing for ' + displaypath.encode(encoding) + b'</h2>\n') f.write(b'<hr>\n<ul>\n') for name in dir_contents: fullname = os.path.join(dir_path, name) displayname = linkname = name # Append / for directories or @ for symbolic links if os.path.isdir(fullname): displayname = name + "/" linkname = name + "/" if os.path.islink(fullname): displayname = name + "@" # Note: a link to a directory displays with @ and links with / f.write(('<li><a href="' + urllib.parse.quote(linkname) + '">' + html.escape(displayname, quote=True) + '</a>\n').encode(encoding)) f.write(b'</ul>\n<hr>\n</body>\n</html>\n') length = f.tell() f.seek(0) self.send_response(200) self.send_header('Content-Type', 'text/html; charset=' + encoding) self.send_header('Content-Length', length) self.end_headers() shutil.copyfileobj(f, self.wfile) f.close() return
[ "def", "respond_list_directory", "(", "self", ",", "dir_path", ",", "query", "=", "None", ")", ":", "del", "query", "try", ":", "dir_contents", "=", "os", ".", "listdir", "(", "dir_path", ")", "except", "os", ".", "error", ":", "self", ".", "respond_not_found", "(", ")", "return", "if", "os", ".", "path", ".", "normpath", "(", "dir_path", ")", "!=", "self", ".", "__config", "[", "'serve_files_root'", "]", ":", "dir_contents", ".", "append", "(", "'..'", ")", "dir_contents", ".", "sort", "(", "key", "=", "lambda", "a", ":", "a", ".", "lower", "(", ")", ")", "displaypath", "=", "html", ".", "escape", "(", "urllib", ".", "parse", ".", "unquote", "(", "self", ".", "path", ")", ",", "quote", "=", "True", ")", "f", "=", "io", ".", "BytesIO", "(", ")", "encoding", "=", "sys", ".", "getfilesystemencoding", "(", ")", "f", ".", "write", "(", "b'<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 3.2 Final//EN\">\\n'", ")", "f", ".", "write", "(", "b'<html>\\n<title>Directory listing for '", "+", "displaypath", ".", "encode", "(", "encoding", ")", "+", "b'</title>\\n'", ")", "f", ".", "write", "(", "b'<body>\\n<h2>Directory listing for '", "+", "displaypath", ".", "encode", "(", "encoding", ")", "+", "b'</h2>\\n'", ")", "f", ".", "write", "(", "b'<hr>\\n<ul>\\n'", ")", "for", "name", "in", "dir_contents", ":", "fullname", "=", "os", ".", "path", ".", "join", "(", "dir_path", ",", "name", ")", "displayname", "=", "linkname", "=", "name", "# Append / for directories or @ for symbolic links", "if", "os", ".", "path", ".", "isdir", "(", "fullname", ")", ":", "displayname", "=", "name", "+", "\"/\"", "linkname", "=", "name", "+", "\"/\"", "if", "os", ".", "path", ".", "islink", "(", "fullname", ")", ":", "displayname", "=", "name", "+", "\"@\"", "# Note: a link to a directory displays with @ and links with /", "f", ".", "write", "(", "(", "'<li><a href=\"'", "+", "urllib", ".", "parse", ".", "quote", "(", "linkname", ")", "+", "'\">'", "+", "html", ".", "escape", "(", "displayname", ",", "quote", "=", "True", ")", "+", "'</a>\\n'", ")", ".", "encode", "(", "encoding", ")", ")", "f", ".", "write", "(", "b'</ul>\\n<hr>\\n</body>\\n</html>\\n'", ")", "length", "=", "f", ".", "tell", "(", ")", "f", ".", "seek", "(", "0", ")", "self", ".", "send_response", "(", "200", ")", "self", ".", "send_header", "(", "'Content-Type'", ",", "'text/html; charset='", "+", "encoding", ")", "self", ".", "send_header", "(", "'Content-Length'", ",", "length", ")", "self", ".", "end_headers", "(", ")", "shutil", ".", "copyfileobj", "(", "f", ",", "self", ".", "wfile", ")", "f", ".", "close", "(", ")", "return" ]
Respond to the client with an HTML page listing the contents of the specified directory. :param str dir_path: The path of the directory to list the contents of.
[ "Respond", "to", "the", "client", "with", "an", "HTML", "page", "listing", "the", "contents", "of", "the", "specified", "directory", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L920-L965
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
RequestHandler.respond_redirect
def respond_redirect(self, location='/'): """ Respond to the client with a 301 message and redirect them with a Location header. :param str location: The new location to redirect the client to. """ self.send_response(301) self.send_header('Content-Length', 0) self.send_header('Location', location) self.end_headers() return
python
def respond_redirect(self, location='/'): """ Respond to the client with a 301 message and redirect them with a Location header. :param str location: The new location to redirect the client to. """ self.send_response(301) self.send_header('Content-Length', 0) self.send_header('Location', location) self.end_headers() return
[ "def", "respond_redirect", "(", "self", ",", "location", "=", "'/'", ")", ":", "self", ".", "send_response", "(", "301", ")", "self", ".", "send_header", "(", "'Content-Length'", ",", "0", ")", "self", ".", "send_header", "(", "'Location'", ",", "location", ")", "self", ".", "end_headers", "(", ")", "return" ]
Respond to the client with a 301 message and redirect them with a Location header. :param str location: The new location to redirect the client to.
[ "Respond", "to", "the", "client", "with", "a", "301", "message", "and", "redirect", "them", "with", "a", "Location", "header", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L972-L983
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
RequestHandler.respond_server_error
def respond_server_error(self, status=None, status_line=None, message=None): """ Handle an internal server error, logging a traceback if executed within an exception handler. :param int status: The status code to respond to the client with. :param str status_line: The status message to respond to the client with. :param str message: The body of the response that is sent to the client. """ (ex_type, ex_value, ex_traceback) = sys.exc_info() if ex_type: (ex_file_name, ex_line, _, _) = traceback.extract_tb(ex_traceback)[-1] line_info = "{0}:{1}".format(ex_file_name, ex_line) log_msg = "encountered {0} in {1}".format(repr(ex_value), line_info) self.server.logger.error(log_msg, exc_info=True) status = (status or 500) status_line = (status_line or http.client.responses.get(status, 'Internal Server Error')).strip() self.send_response(status, status_line) message = (message or status_line) if isinstance(message, (str, bytes)): self.send_header('Content-Length', len(message)) self.end_headers() if isinstance(message, str): self.wfile.write(message.encode(sys.getdefaultencoding())) else: self.wfile.write(message) elif hasattr(message, 'fileno'): fs = os.fstat(message.fileno()) self.send_header('Content-Length', fs[6]) self.end_headers() shutil.copyfileobj(message, self.wfile) else: self.end_headers() return
python
def respond_server_error(self, status=None, status_line=None, message=None): """ Handle an internal server error, logging a traceback if executed within an exception handler. :param int status: The status code to respond to the client with. :param str status_line: The status message to respond to the client with. :param str message: The body of the response that is sent to the client. """ (ex_type, ex_value, ex_traceback) = sys.exc_info() if ex_type: (ex_file_name, ex_line, _, _) = traceback.extract_tb(ex_traceback)[-1] line_info = "{0}:{1}".format(ex_file_name, ex_line) log_msg = "encountered {0} in {1}".format(repr(ex_value), line_info) self.server.logger.error(log_msg, exc_info=True) status = (status or 500) status_line = (status_line or http.client.responses.get(status, 'Internal Server Error')).strip() self.send_response(status, status_line) message = (message or status_line) if isinstance(message, (str, bytes)): self.send_header('Content-Length', len(message)) self.end_headers() if isinstance(message, str): self.wfile.write(message.encode(sys.getdefaultencoding())) else: self.wfile.write(message) elif hasattr(message, 'fileno'): fs = os.fstat(message.fileno()) self.send_header('Content-Length', fs[6]) self.end_headers() shutil.copyfileobj(message, self.wfile) else: self.end_headers() return
[ "def", "respond_server_error", "(", "self", ",", "status", "=", "None", ",", "status_line", "=", "None", ",", "message", "=", "None", ")", ":", "(", "ex_type", ",", "ex_value", ",", "ex_traceback", ")", "=", "sys", ".", "exc_info", "(", ")", "if", "ex_type", ":", "(", "ex_file_name", ",", "ex_line", ",", "_", ",", "_", ")", "=", "traceback", ".", "extract_tb", "(", "ex_traceback", ")", "[", "-", "1", "]", "line_info", "=", "\"{0}:{1}\"", ".", "format", "(", "ex_file_name", ",", "ex_line", ")", "log_msg", "=", "\"encountered {0} in {1}\"", ".", "format", "(", "repr", "(", "ex_value", ")", ",", "line_info", ")", "self", ".", "server", ".", "logger", ".", "error", "(", "log_msg", ",", "exc_info", "=", "True", ")", "status", "=", "(", "status", "or", "500", ")", "status_line", "=", "(", "status_line", "or", "http", ".", "client", ".", "responses", ".", "get", "(", "status", ",", "'Internal Server Error'", ")", ")", ".", "strip", "(", ")", "self", ".", "send_response", "(", "status", ",", "status_line", ")", "message", "=", "(", "message", "or", "status_line", ")", "if", "isinstance", "(", "message", ",", "(", "str", ",", "bytes", ")", ")", ":", "self", ".", "send_header", "(", "'Content-Length'", ",", "len", "(", "message", ")", ")", "self", ".", "end_headers", "(", ")", "if", "isinstance", "(", "message", ",", "str", ")", ":", "self", ".", "wfile", ".", "write", "(", "message", ".", "encode", "(", "sys", ".", "getdefaultencoding", "(", ")", ")", ")", "else", ":", "self", ".", "wfile", ".", "write", "(", "message", ")", "elif", "hasattr", "(", "message", ",", "'fileno'", ")", ":", "fs", "=", "os", ".", "fstat", "(", "message", ".", "fileno", "(", ")", ")", "self", ".", "send_header", "(", "'Content-Length'", ",", "fs", "[", "6", "]", ")", "self", ".", "end_headers", "(", ")", "shutil", ".", "copyfileobj", "(", "message", ",", "self", ".", "wfile", ")", "else", ":", "self", ".", "end_headers", "(", ")", "return" ]
Handle an internal server error, logging a traceback if executed within an exception handler. :param int status: The status code to respond to the client with. :param str status_line: The status message to respond to the client with. :param str message: The body of the response that is sent to the client.
[ "Handle", "an", "internal", "server", "error", "logging", "a", "traceback", "if", "executed", "within", "an", "exception", "handler", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L985-L1018
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
RequestHandler.respond_unauthorized
def respond_unauthorized(self, request_authentication=False): """ Respond to the client that the request is unauthorized. :param bool request_authentication: Whether to request basic authentication information by sending a WWW-Authenticate header. """ headers = {} if request_authentication: headers['WWW-Authenticate'] = 'Basic realm="' + self.__config['server_version'] + '"' self.send_response_full(b'Unauthorized', status=401, headers=headers) return
python
def respond_unauthorized(self, request_authentication=False): """ Respond to the client that the request is unauthorized. :param bool request_authentication: Whether to request basic authentication information by sending a WWW-Authenticate header. """ headers = {} if request_authentication: headers['WWW-Authenticate'] = 'Basic realm="' + self.__config['server_version'] + '"' self.send_response_full(b'Unauthorized', status=401, headers=headers) return
[ "def", "respond_unauthorized", "(", "self", ",", "request_authentication", "=", "False", ")", ":", "headers", "=", "{", "}", "if", "request_authentication", ":", "headers", "[", "'WWW-Authenticate'", "]", "=", "'Basic realm=\"'", "+", "self", ".", "__config", "[", "'server_version'", "]", "+", "'\"'", "self", ".", "send_response_full", "(", "b'Unauthorized'", ",", "status", "=", "401", ",", "headers", "=", "headers", ")", "return" ]
Respond to the client that the request is unauthorized. :param bool request_authentication: Whether to request basic authentication information by sending a WWW-Authenticate header.
[ "Respond", "to", "the", "client", "that", "the", "request", "is", "unauthorized", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L1020-L1030
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
RequestHandler.dispatch_handler
def dispatch_handler(self, query=None): """ Dispatch functions based on the established handler_map. It is generally not necessary to override this function and doing so will prevent any handlers from being executed. This function is executed automatically when requests of either GET, HEAD, or POST are received. :param dict query: Parsed query parameters from the corresponding request. """ query = (query or {}) # normalize the path # abandon query parameters self.path = self.path.split('?', 1)[0] self.path = self.path.split('#', 1)[0] original_path = urllib.parse.unquote(self.path) self.path = posixpath.normpath(original_path) words = self.path.split('/') words = filter(None, words) tmp_path = '' for word in words: _, word = os.path.splitdrive(word) _, word = os.path.split(word) if word in (os.curdir, os.pardir): continue tmp_path = os.path.join(tmp_path, word) self.path = tmp_path if self.path == 'robots.txt' and self.__config['serve_robots_txt']: self.send_response_full(self.__config['robots_txt']) return self.cookies = http.cookies.SimpleCookie(self.headers.get('cookie', '')) handler, is_method = self.__get_handler(is_rpc=False) if handler is not None: try: handler(*((query,) if is_method else (self, query))) except Exception: self.respond_server_error() return if not self.__config['serve_files']: self.respond_not_found() return file_path = self.__config['serve_files_root'] file_path = os.path.join(file_path, tmp_path) if os.path.isfile(file_path) and os.access(file_path, os.R_OK): self.respond_file(file_path, query=query) return elif os.path.isdir(file_path) and os.access(file_path, os.R_OK): if not original_path.endswith('/'): # redirect browser, doing what apache does destination = self.path + '/' if self.command == 'GET' and self.query_data: destination += '?' + urllib.parse.urlencode(self.query_data, True) self.respond_redirect(destination) return for index in ['index.html', 'index.htm']: index = os.path.join(file_path, index) if os.path.isfile(index) and os.access(index, os.R_OK): self.respond_file(index, query=query) return if self.__config['serve_files_list_directories']: self.respond_list_directory(file_path, query=query) return self.respond_not_found() return
python
def dispatch_handler(self, query=None): """ Dispatch functions based on the established handler_map. It is generally not necessary to override this function and doing so will prevent any handlers from being executed. This function is executed automatically when requests of either GET, HEAD, or POST are received. :param dict query: Parsed query parameters from the corresponding request. """ query = (query or {}) # normalize the path # abandon query parameters self.path = self.path.split('?', 1)[0] self.path = self.path.split('#', 1)[0] original_path = urllib.parse.unquote(self.path) self.path = posixpath.normpath(original_path) words = self.path.split('/') words = filter(None, words) tmp_path = '' for word in words: _, word = os.path.splitdrive(word) _, word = os.path.split(word) if word in (os.curdir, os.pardir): continue tmp_path = os.path.join(tmp_path, word) self.path = tmp_path if self.path == 'robots.txt' and self.__config['serve_robots_txt']: self.send_response_full(self.__config['robots_txt']) return self.cookies = http.cookies.SimpleCookie(self.headers.get('cookie', '')) handler, is_method = self.__get_handler(is_rpc=False) if handler is not None: try: handler(*((query,) if is_method else (self, query))) except Exception: self.respond_server_error() return if not self.__config['serve_files']: self.respond_not_found() return file_path = self.__config['serve_files_root'] file_path = os.path.join(file_path, tmp_path) if os.path.isfile(file_path) and os.access(file_path, os.R_OK): self.respond_file(file_path, query=query) return elif os.path.isdir(file_path) and os.access(file_path, os.R_OK): if not original_path.endswith('/'): # redirect browser, doing what apache does destination = self.path + '/' if self.command == 'GET' and self.query_data: destination += '?' + urllib.parse.urlencode(self.query_data, True) self.respond_redirect(destination) return for index in ['index.html', 'index.htm']: index = os.path.join(file_path, index) if os.path.isfile(index) and os.access(index, os.R_OK): self.respond_file(index, query=query) return if self.__config['serve_files_list_directories']: self.respond_list_directory(file_path, query=query) return self.respond_not_found() return
[ "def", "dispatch_handler", "(", "self", ",", "query", "=", "None", ")", ":", "query", "=", "(", "query", "or", "{", "}", ")", "# normalize the path", "# abandon query parameters", "self", ".", "path", "=", "self", ".", "path", ".", "split", "(", "'?'", ",", "1", ")", "[", "0", "]", "self", ".", "path", "=", "self", ".", "path", ".", "split", "(", "'#'", ",", "1", ")", "[", "0", "]", "original_path", "=", "urllib", ".", "parse", ".", "unquote", "(", "self", ".", "path", ")", "self", ".", "path", "=", "posixpath", ".", "normpath", "(", "original_path", ")", "words", "=", "self", ".", "path", ".", "split", "(", "'/'", ")", "words", "=", "filter", "(", "None", ",", "words", ")", "tmp_path", "=", "''", "for", "word", "in", "words", ":", "_", ",", "word", "=", "os", ".", "path", ".", "splitdrive", "(", "word", ")", "_", ",", "word", "=", "os", ".", "path", ".", "split", "(", "word", ")", "if", "word", "in", "(", "os", ".", "curdir", ",", "os", ".", "pardir", ")", ":", "continue", "tmp_path", "=", "os", ".", "path", ".", "join", "(", "tmp_path", ",", "word", ")", "self", ".", "path", "=", "tmp_path", "if", "self", ".", "path", "==", "'robots.txt'", "and", "self", ".", "__config", "[", "'serve_robots_txt'", "]", ":", "self", ".", "send_response_full", "(", "self", ".", "__config", "[", "'robots_txt'", "]", ")", "return", "self", ".", "cookies", "=", "http", ".", "cookies", ".", "SimpleCookie", "(", "self", ".", "headers", ".", "get", "(", "'cookie'", ",", "''", ")", ")", "handler", ",", "is_method", "=", "self", ".", "__get_handler", "(", "is_rpc", "=", "False", ")", "if", "handler", "is", "not", "None", ":", "try", ":", "handler", "(", "*", "(", "(", "query", ",", ")", "if", "is_method", "else", "(", "self", ",", "query", ")", ")", ")", "except", "Exception", ":", "self", ".", "respond_server_error", "(", ")", "return", "if", "not", "self", ".", "__config", "[", "'serve_files'", "]", ":", "self", ".", "respond_not_found", "(", ")", "return", "file_path", "=", "self", ".", "__config", "[", "'serve_files_root'", "]", "file_path", "=", "os", ".", "path", ".", "join", "(", "file_path", ",", "tmp_path", ")", "if", "os", ".", "path", ".", "isfile", "(", "file_path", ")", "and", "os", ".", "access", "(", "file_path", ",", "os", ".", "R_OK", ")", ":", "self", ".", "respond_file", "(", "file_path", ",", "query", "=", "query", ")", "return", "elif", "os", ".", "path", ".", "isdir", "(", "file_path", ")", "and", "os", ".", "access", "(", "file_path", ",", "os", ".", "R_OK", ")", ":", "if", "not", "original_path", ".", "endswith", "(", "'/'", ")", ":", "# redirect browser, doing what apache does", "destination", "=", "self", ".", "path", "+", "'/'", "if", "self", ".", "command", "==", "'GET'", "and", "self", ".", "query_data", ":", "destination", "+=", "'?'", "+", "urllib", ".", "parse", ".", "urlencode", "(", "self", ".", "query_data", ",", "True", ")", "self", ".", "respond_redirect", "(", "destination", ")", "return", "for", "index", "in", "[", "'index.html'", ",", "'index.htm'", "]", ":", "index", "=", "os", ".", "path", ".", "join", "(", "file_path", ",", "index", ")", "if", "os", ".", "path", ".", "isfile", "(", "index", ")", "and", "os", ".", "access", "(", "index", ",", "os", ".", "R_OK", ")", ":", "self", ".", "respond_file", "(", "index", ",", "query", "=", "query", ")", "return", "if", "self", ".", "__config", "[", "'serve_files_list_directories'", "]", ":", "self", ".", "respond_list_directory", "(", "file_path", ",", "query", "=", "query", ")", "return", "self", ".", "respond_not_found", "(", ")", "return" ]
Dispatch functions based on the established handler_map. It is generally not necessary to override this function and doing so will prevent any handlers from being executed. This function is executed automatically when requests of either GET, HEAD, or POST are received. :param dict query: Parsed query parameters from the corresponding request.
[ "Dispatch", "functions", "based", "on", "the", "established", "handler_map", ".", "It", "is", "generally", "not", "necessary", "to", "override", "this", "function", "and", "doing", "so", "will", "prevent", "any", "handlers", "from", "being", "executed", ".", "This", "function", "is", "executed", "automatically", "when", "requests", "of", "either", "GET", "HEAD", "or", "POST", "are", "received", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L1032-L1099
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
RequestHandler.guess_mime_type
def guess_mime_type(self, path): """ Guess an appropriate MIME type based on the extension of the provided path. :param str path: The of the file to analyze. :return: The guessed MIME type of the default if non are found. :rtype: str """ _, ext = posixpath.splitext(path) if ext in self.extensions_map: return self.extensions_map[ext] ext = ext.lower() return self.extensions_map[ext if ext in self.extensions_map else '']
python
def guess_mime_type(self, path): """ Guess an appropriate MIME type based on the extension of the provided path. :param str path: The of the file to analyze. :return: The guessed MIME type of the default if non are found. :rtype: str """ _, ext = posixpath.splitext(path) if ext in self.extensions_map: return self.extensions_map[ext] ext = ext.lower() return self.extensions_map[ext if ext in self.extensions_map else '']
[ "def", "guess_mime_type", "(", "self", ",", "path", ")", ":", "_", ",", "ext", "=", "posixpath", ".", "splitext", "(", "path", ")", "if", "ext", "in", "self", ".", "extensions_map", ":", "return", "self", ".", "extensions_map", "[", "ext", "]", "ext", "=", "ext", ".", "lower", "(", ")", "return", "self", ".", "extensions_map", "[", "ext", "if", "ext", "in", "self", ".", "extensions_map", "else", "''", "]" ]
Guess an appropriate MIME type based on the extension of the provided path. :param str path: The of the file to analyze. :return: The guessed MIME type of the default if non are found. :rtype: str
[ "Guess", "an", "appropriate", "MIME", "type", "based", "on", "the", "extension", "of", "the", "provided", "path", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L1135-L1148
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
RequestHandler.check_authorization
def check_authorization(self): """ Check for the presence of a basic auth Authorization header and if the credentials contained within in are valid. :return: Whether or not the credentials are valid. :rtype: bool """ try: store = self.__config.get('basic_auth') if store is None: return True auth_info = self.headers.get('Authorization') if not auth_info: return False auth_info = auth_info.split() if len(auth_info) != 2 or auth_info[0] != 'Basic': return False auth_info = base64.b64decode(auth_info[1]).decode(sys.getdefaultencoding()) username = auth_info.split(':')[0] password = ':'.join(auth_info.split(':')[1:]) password_bytes = password.encode(sys.getdefaultencoding()) if hasattr(self, 'custom_authentication'): if self.custom_authentication(username, password): self.basic_auth_user = username return True return False if not username in store: self.server.logger.warning('received invalid username: ' + username) return False password_data = store[username] if password_data['type'] == 'plain': if password == password_data['value']: self.basic_auth_user = username return True elif hashlib.new(password_data['type'], password_bytes).digest() == password_data['value']: self.basic_auth_user = username return True self.server.logger.warning('received invalid password from user: ' + username) except Exception: pass return False
python
def check_authorization(self): """ Check for the presence of a basic auth Authorization header and if the credentials contained within in are valid. :return: Whether or not the credentials are valid. :rtype: bool """ try: store = self.__config.get('basic_auth') if store is None: return True auth_info = self.headers.get('Authorization') if not auth_info: return False auth_info = auth_info.split() if len(auth_info) != 2 or auth_info[0] != 'Basic': return False auth_info = base64.b64decode(auth_info[1]).decode(sys.getdefaultencoding()) username = auth_info.split(':')[0] password = ':'.join(auth_info.split(':')[1:]) password_bytes = password.encode(sys.getdefaultencoding()) if hasattr(self, 'custom_authentication'): if self.custom_authentication(username, password): self.basic_auth_user = username return True return False if not username in store: self.server.logger.warning('received invalid username: ' + username) return False password_data = store[username] if password_data['type'] == 'plain': if password == password_data['value']: self.basic_auth_user = username return True elif hashlib.new(password_data['type'], password_bytes).digest() == password_data['value']: self.basic_auth_user = username return True self.server.logger.warning('received invalid password from user: ' + username) except Exception: pass return False
[ "def", "check_authorization", "(", "self", ")", ":", "try", ":", "store", "=", "self", ".", "__config", ".", "get", "(", "'basic_auth'", ")", "if", "store", "is", "None", ":", "return", "True", "auth_info", "=", "self", ".", "headers", ".", "get", "(", "'Authorization'", ")", "if", "not", "auth_info", ":", "return", "False", "auth_info", "=", "auth_info", ".", "split", "(", ")", "if", "len", "(", "auth_info", ")", "!=", "2", "or", "auth_info", "[", "0", "]", "!=", "'Basic'", ":", "return", "False", "auth_info", "=", "base64", ".", "b64decode", "(", "auth_info", "[", "1", "]", ")", ".", "decode", "(", "sys", ".", "getdefaultencoding", "(", ")", ")", "username", "=", "auth_info", ".", "split", "(", "':'", ")", "[", "0", "]", "password", "=", "':'", ".", "join", "(", "auth_info", ".", "split", "(", "':'", ")", "[", "1", ":", "]", ")", "password_bytes", "=", "password", ".", "encode", "(", "sys", ".", "getdefaultencoding", "(", ")", ")", "if", "hasattr", "(", "self", ",", "'custom_authentication'", ")", ":", "if", "self", ".", "custom_authentication", "(", "username", ",", "password", ")", ":", "self", ".", "basic_auth_user", "=", "username", "return", "True", "return", "False", "if", "not", "username", "in", "store", ":", "self", ".", "server", ".", "logger", ".", "warning", "(", "'received invalid username: '", "+", "username", ")", "return", "False", "password_data", "=", "store", "[", "username", "]", "if", "password_data", "[", "'type'", "]", "==", "'plain'", ":", "if", "password", "==", "password_data", "[", "'value'", "]", ":", "self", ".", "basic_auth_user", "=", "username", "return", "True", "elif", "hashlib", ".", "new", "(", "password_data", "[", "'type'", "]", ",", "password_bytes", ")", ".", "digest", "(", ")", "==", "password_data", "[", "'value'", "]", ":", "self", ".", "basic_auth_user", "=", "username", "return", "True", "self", ".", "server", ".", "logger", ".", "warning", "(", "'received invalid password from user: '", "+", "username", ")", "except", "Exception", ":", "pass", "return", "False" ]
Check for the presence of a basic auth Authorization header and if the credentials contained within in are valid. :return: Whether or not the credentials are valid. :rtype: bool
[ "Check", "for", "the", "presence", "of", "a", "basic", "auth", "Authorization", "header", "and", "if", "the", "credentials", "contained", "within", "in", "are", "valid", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L1162-L1204
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
RequestHandler.cookie_get
def cookie_get(self, name): """ Check for a cookie value by name. :param str name: Name of the cookie value to retreive. :return: Returns the cookie value if it's set or None if it's not found. """ if not hasattr(self, 'cookies'): return None if self.cookies.get(name): return self.cookies.get(name).value return None
python
def cookie_get(self, name): """ Check for a cookie value by name. :param str name: Name of the cookie value to retreive. :return: Returns the cookie value if it's set or None if it's not found. """ if not hasattr(self, 'cookies'): return None if self.cookies.get(name): return self.cookies.get(name).value return None
[ "def", "cookie_get", "(", "self", ",", "name", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'cookies'", ")", ":", "return", "None", "if", "self", ".", "cookies", ".", "get", "(", "name", ")", ":", "return", "self", ".", "cookies", ".", "get", "(", "name", ")", ".", "value", "return", "None" ]
Check for a cookie value by name. :param str name: Name of the cookie value to retreive. :return: Returns the cookie value if it's set or None if it's not found.
[ "Check", "for", "a", "cookie", "value", "by", "name", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L1206-L1217
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
RequestHandler.cookie_set
def cookie_set(self, name, value): """ Set the value of a client cookie. This can only be called while headers can be sent. :param str name: The name of the cookie value to set. :param str value: The value of the cookie to set. """ if not self.headers_active: raise RuntimeError('headers have already been ended') cookie = "{0}={1}; Path=/; HttpOnly".format(name, value) self.send_header('Set-Cookie', cookie)
python
def cookie_set(self, name, value): """ Set the value of a client cookie. This can only be called while headers can be sent. :param str name: The name of the cookie value to set. :param str value: The value of the cookie to set. """ if not self.headers_active: raise RuntimeError('headers have already been ended') cookie = "{0}={1}; Path=/; HttpOnly".format(name, value) self.send_header('Set-Cookie', cookie)
[ "def", "cookie_set", "(", "self", ",", "name", ",", "value", ")", ":", "if", "not", "self", ".", "headers_active", ":", "raise", "RuntimeError", "(", "'headers have already been ended'", ")", "cookie", "=", "\"{0}={1}; Path=/; HttpOnly\"", ".", "format", "(", "name", ",", "value", ")", "self", ".", "send_header", "(", "'Set-Cookie'", ",", "cookie", ")" ]
Set the value of a client cookie. This can only be called while headers can be sent. :param str name: The name of the cookie value to set. :param str value: The value of the cookie to set.
[ "Set", "the", "value", "of", "a", "client", "cookie", ".", "This", "can", "only", "be", "called", "while", "headers", "can", "be", "sent", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L1219-L1230
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
RequestHandler.get_content_type_charset
def get_content_type_charset(self, default='UTF-8'): """ Inspect the Content-Type header to retrieve the charset that the client has specified. :param str default: The default charset to return if none exists. :return: The charset of the request. :rtype: str """ encoding = default header = self.headers.get('Content-Type', '') idx = header.find('charset=') if idx > 0: encoding = (header[idx + 8:].split(' ', 1)[0] or encoding) return encoding
python
def get_content_type_charset(self, default='UTF-8'): """ Inspect the Content-Type header to retrieve the charset that the client has specified. :param str default: The default charset to return if none exists. :return: The charset of the request. :rtype: str """ encoding = default header = self.headers.get('Content-Type', '') idx = header.find('charset=') if idx > 0: encoding = (header[idx + 8:].split(' ', 1)[0] or encoding) return encoding
[ "def", "get_content_type_charset", "(", "self", ",", "default", "=", "'UTF-8'", ")", ":", "encoding", "=", "default", "header", "=", "self", ".", "headers", ".", "get", "(", "'Content-Type'", ",", "''", ")", "idx", "=", "header", ".", "find", "(", "'charset='", ")", "if", "idx", ">", "0", ":", "encoding", "=", "(", "header", "[", "idx", "+", "8", ":", "]", ".", "split", "(", "' '", ",", "1", ")", "[", "0", "]", "or", "encoding", ")", "return", "encoding" ]
Inspect the Content-Type header to retrieve the charset that the client has specified. :param str default: The default charset to return if none exists. :return: The charset of the request. :rtype: str
[ "Inspect", "the", "Content", "-", "Type", "header", "to", "retrieve", "the", "charset", "that", "the", "client", "has", "specified", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L1373-L1387
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
WebSocketHandler.close
def close(self): """ Close the web socket connection and stop processing results. If the connection is still open, a WebSocket close message will be sent to the peer. """ if not self.connected: return self.connected = False if self.handler.wfile.closed: return if select.select([], [self.handler.wfile], [], 0)[1]: with self.lock: self.handler.wfile.write(b'\x88\x00') self.handler.wfile.flush() self.on_closed()
python
def close(self): """ Close the web socket connection and stop processing results. If the connection is still open, a WebSocket close message will be sent to the peer. """ if not self.connected: return self.connected = False if self.handler.wfile.closed: return if select.select([], [self.handler.wfile], [], 0)[1]: with self.lock: self.handler.wfile.write(b'\x88\x00') self.handler.wfile.flush() self.on_closed()
[ "def", "close", "(", "self", ")", ":", "if", "not", "self", ".", "connected", ":", "return", "self", ".", "connected", "=", "False", "if", "self", ".", "handler", ".", "wfile", ".", "closed", ":", "return", "if", "select", ".", "select", "(", "[", "]", ",", "[", "self", ".", "handler", ".", "wfile", "]", ",", "[", "]", ",", "0", ")", "[", "1", "]", ":", "with", "self", ".", "lock", ":", "self", ".", "handler", ".", "wfile", ".", "write", "(", "b'\\x88\\x00'", ")", "self", ".", "handler", ".", "wfile", ".", "flush", "(", ")", "self", ".", "on_closed", "(", ")" ]
Close the web socket connection and stop processing results. If the connection is still open, a WebSocket close message will be sent to the peer.
[ "Close", "the", "web", "socket", "connection", "and", "stop", "processing", "results", ".", "If", "the", "connection", "is", "still", "open", "a", "WebSocket", "close", "message", "will", "be", "sent", "to", "the", "peer", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L1521-L1536
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
WebSocketHandler.send_message
def send_message(self, opcode, message): """ Send a message to the peer over the socket. :param int opcode: The opcode for the message to send. :param bytes message: The message data to send. """ if not isinstance(message, bytes): message = message.encode('utf-8') length = len(message) if not select.select([], [self.handler.wfile], [], 0)[1]: self.logger.error('the socket is not ready for writing') self.close() return buffer = b'' buffer += struct.pack('B', 0x80 + opcode) if length <= 125: buffer += struct.pack('B', length) elif 126 <= length <= 65535: buffer += struct.pack('>BH', 126, length) else: buffer += struct.pack('>BQ', 127, length) buffer += message self._last_sent_opcode = opcode self.lock.acquire() try: self.handler.wfile.write(buffer) self.handler.wfile.flush() except Exception: self.logger.error('an error occurred while sending a message', exc_info=True) self.close() finally: self.lock.release()
python
def send_message(self, opcode, message): """ Send a message to the peer over the socket. :param int opcode: The opcode for the message to send. :param bytes message: The message data to send. """ if not isinstance(message, bytes): message = message.encode('utf-8') length = len(message) if not select.select([], [self.handler.wfile], [], 0)[1]: self.logger.error('the socket is not ready for writing') self.close() return buffer = b'' buffer += struct.pack('B', 0x80 + opcode) if length <= 125: buffer += struct.pack('B', length) elif 126 <= length <= 65535: buffer += struct.pack('>BH', 126, length) else: buffer += struct.pack('>BQ', 127, length) buffer += message self._last_sent_opcode = opcode self.lock.acquire() try: self.handler.wfile.write(buffer) self.handler.wfile.flush() except Exception: self.logger.error('an error occurred while sending a message', exc_info=True) self.close() finally: self.lock.release()
[ "def", "send_message", "(", "self", ",", "opcode", ",", "message", ")", ":", "if", "not", "isinstance", "(", "message", ",", "bytes", ")", ":", "message", "=", "message", ".", "encode", "(", "'utf-8'", ")", "length", "=", "len", "(", "message", ")", "if", "not", "select", ".", "select", "(", "[", "]", ",", "[", "self", ".", "handler", ".", "wfile", "]", ",", "[", "]", ",", "0", ")", "[", "1", "]", ":", "self", ".", "logger", ".", "error", "(", "'the socket is not ready for writing'", ")", "self", ".", "close", "(", ")", "return", "buffer", "=", "b''", "buffer", "+=", "struct", ".", "pack", "(", "'B'", ",", "0x80", "+", "opcode", ")", "if", "length", "<=", "125", ":", "buffer", "+=", "struct", ".", "pack", "(", "'B'", ",", "length", ")", "elif", "126", "<=", "length", "<=", "65535", ":", "buffer", "+=", "struct", ".", "pack", "(", "'>BH'", ",", "126", ",", "length", ")", "else", ":", "buffer", "+=", "struct", ".", "pack", "(", "'>BQ'", ",", "127", ",", "length", ")", "buffer", "+=", "message", "self", ".", "_last_sent_opcode", "=", "opcode", "self", ".", "lock", ".", "acquire", "(", ")", "try", ":", "self", ".", "handler", ".", "wfile", ".", "write", "(", "buffer", ")", "self", ".", "handler", ".", "wfile", ".", "flush", "(", ")", "except", "Exception", ":", "self", ".", "logger", ".", "error", "(", "'an error occurred while sending a message'", ",", "exc_info", "=", "True", ")", "self", ".", "close", "(", ")", "finally", ":", "self", ".", "lock", ".", "release", "(", ")" ]
Send a message to the peer over the socket. :param int opcode: The opcode for the message to send. :param bytes message: The message data to send.
[ "Send", "a", "message", "to", "the", "peer", "over", "the", "socket", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L1538-L1570
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
WebSocketHandler.on_message
def on_message(self, opcode, message): """ The primary dispatch function to handle incoming WebSocket messages. :param int opcode: The opcode of the message that was received. :param bytes message: The data contained within the message. """ self.logger.debug("processing {0} (opcode: 0x{1:02x}) message".format(self._opcode_names.get(opcode, 'UNKNOWN'), opcode)) if opcode == self._opcode_close: self.close() elif opcode == self._opcode_ping: if len(message) > 125: self.close() return self.send_message(self._opcode_pong, message) elif opcode == self._opcode_pong: pass elif opcode == self._opcode_binary: self.on_message_binary(message) elif opcode == self._opcode_text: try: message = self._decode_string(message) except UnicodeDecodeError: self.logger.warning('closing connection due to invalid unicode within a text message') self.close() else: self.on_message_text(message) elif opcode == self._opcode_continue: self.close() else: self.logger.warning("received unknown opcode: {0} (0x{0:02x})".format(opcode)) self.close()
python
def on_message(self, opcode, message): """ The primary dispatch function to handle incoming WebSocket messages. :param int opcode: The opcode of the message that was received. :param bytes message: The data contained within the message. """ self.logger.debug("processing {0} (opcode: 0x{1:02x}) message".format(self._opcode_names.get(opcode, 'UNKNOWN'), opcode)) if opcode == self._opcode_close: self.close() elif opcode == self._opcode_ping: if len(message) > 125: self.close() return self.send_message(self._opcode_pong, message) elif opcode == self._opcode_pong: pass elif opcode == self._opcode_binary: self.on_message_binary(message) elif opcode == self._opcode_text: try: message = self._decode_string(message) except UnicodeDecodeError: self.logger.warning('closing connection due to invalid unicode within a text message') self.close() else: self.on_message_text(message) elif opcode == self._opcode_continue: self.close() else: self.logger.warning("received unknown opcode: {0} (0x{0:02x})".format(opcode)) self.close()
[ "def", "on_message", "(", "self", ",", "opcode", ",", "message", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"processing {0} (opcode: 0x{1:02x}) message\"", ".", "format", "(", "self", ".", "_opcode_names", ".", "get", "(", "opcode", ",", "'UNKNOWN'", ")", ",", "opcode", ")", ")", "if", "opcode", "==", "self", ".", "_opcode_close", ":", "self", ".", "close", "(", ")", "elif", "opcode", "==", "self", ".", "_opcode_ping", ":", "if", "len", "(", "message", ")", ">", "125", ":", "self", ".", "close", "(", ")", "return", "self", ".", "send_message", "(", "self", ".", "_opcode_pong", ",", "message", ")", "elif", "opcode", "==", "self", ".", "_opcode_pong", ":", "pass", "elif", "opcode", "==", "self", ".", "_opcode_binary", ":", "self", ".", "on_message_binary", "(", "message", ")", "elif", "opcode", "==", "self", ".", "_opcode_text", ":", "try", ":", "message", "=", "self", ".", "_decode_string", "(", "message", ")", "except", "UnicodeDecodeError", ":", "self", ".", "logger", ".", "warning", "(", "'closing connection due to invalid unicode within a text message'", ")", "self", ".", "close", "(", ")", "else", ":", "self", ".", "on_message_text", "(", "message", ")", "elif", "opcode", "==", "self", ".", "_opcode_continue", ":", "self", ".", "close", "(", ")", "else", ":", "self", ".", "logger", ".", "warning", "(", "\"received unknown opcode: {0} (0x{0:02x})\"", ".", "format", "(", "opcode", ")", ")", "self", ".", "close", "(", ")" ]
The primary dispatch function to handle incoming WebSocket messages. :param int opcode: The opcode of the message that was received. :param bytes message: The data contained within the message.
[ "The", "primary", "dispatch", "function", "to", "handle", "incoming", "WebSocket", "messages", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L1595-L1626
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
Serializer.from_content_type
def from_content_type(cls, content_type): """ Build a serializer object from a MIME Content-Type string. :param str content_type: The Content-Type string to parse. :return: A new serializer instance. :rtype: :py:class:`.Serializer` """ name = content_type options = {} if ';' in content_type: name, options_str = content_type.split(';', 1) for part in options_str.split(';'): part = part.strip() if '=' in part: key, value = part.split('=') else: key, value = (part, None) options[key] = value # old style compatibility if name.endswith('+zlib'): options['compression'] = 'zlib' name = name[:-5] return cls(name, charset=options.get('charset', 'UTF-8'), compression=options.get('compression'))
python
def from_content_type(cls, content_type): """ Build a serializer object from a MIME Content-Type string. :param str content_type: The Content-Type string to parse. :return: A new serializer instance. :rtype: :py:class:`.Serializer` """ name = content_type options = {} if ';' in content_type: name, options_str = content_type.split(';', 1) for part in options_str.split(';'): part = part.strip() if '=' in part: key, value = part.split('=') else: key, value = (part, None) options[key] = value # old style compatibility if name.endswith('+zlib'): options['compression'] = 'zlib' name = name[:-5] return cls(name, charset=options.get('charset', 'UTF-8'), compression=options.get('compression'))
[ "def", "from_content_type", "(", "cls", ",", "content_type", ")", ":", "name", "=", "content_type", "options", "=", "{", "}", "if", "';'", "in", "content_type", ":", "name", ",", "options_str", "=", "content_type", ".", "split", "(", "';'", ",", "1", ")", "for", "part", "in", "options_str", ".", "split", "(", "';'", ")", ":", "part", "=", "part", ".", "strip", "(", ")", "if", "'='", "in", "part", ":", "key", ",", "value", "=", "part", ".", "split", "(", "'='", ")", "else", ":", "key", ",", "value", "=", "(", "part", ",", "None", ")", "options", "[", "key", "]", "=", "value", "# old style compatibility", "if", "name", ".", "endswith", "(", "'+zlib'", ")", ":", "options", "[", "'compression'", "]", "=", "'zlib'", "name", "=", "name", "[", ":", "-", "5", "]", "return", "cls", "(", "name", ",", "charset", "=", "options", ".", "get", "(", "'charset'", ",", "'UTF-8'", ")", ",", "compression", "=", "options", ".", "get", "(", "'compression'", ")", ")" ]
Build a serializer object from a MIME Content-Type string. :param str content_type: The Content-Type string to parse. :return: A new serializer instance. :rtype: :py:class:`.Serializer`
[ "Build", "a", "serializer", "object", "from", "a", "MIME", "Content", "-", "Type", "string", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L1669-L1692
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
Serializer.dumps
def dumps(self, data): """ Serialize a python data type for transmission or storage. :param data: The python object to serialize. :return: The serialized representation of the object. :rtype: bytes """ data = g_serializer_drivers[self.name]['dumps'](data) if sys.version_info[0] == 3 and isinstance(data, str): data = data.encode(self._charset) if self._compression == 'zlib': data = zlib.compress(data) assert isinstance(data, bytes) return data
python
def dumps(self, data): """ Serialize a python data type for transmission or storage. :param data: The python object to serialize. :return: The serialized representation of the object. :rtype: bytes """ data = g_serializer_drivers[self.name]['dumps'](data) if sys.version_info[0] == 3 and isinstance(data, str): data = data.encode(self._charset) if self._compression == 'zlib': data = zlib.compress(data) assert isinstance(data, bytes) return data
[ "def", "dumps", "(", "self", ",", "data", ")", ":", "data", "=", "g_serializer_drivers", "[", "self", ".", "name", "]", "[", "'dumps'", "]", "(", "data", ")", "if", "sys", ".", "version_info", "[", "0", "]", "==", "3", "and", "isinstance", "(", "data", ",", "str", ")", ":", "data", "=", "data", ".", "encode", "(", "self", ".", "_charset", ")", "if", "self", ".", "_compression", "==", "'zlib'", ":", "data", "=", "zlib", ".", "compress", "(", "data", ")", "assert", "isinstance", "(", "data", ",", "bytes", ")", "return", "data" ]
Serialize a python data type for transmission or storage. :param data: The python object to serialize. :return: The serialized representation of the object. :rtype: bytes
[ "Serialize", "a", "python", "data", "type", "for", "transmission", "or", "storage", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L1694-L1708
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
Serializer.loads
def loads(self, data): """ Deserialize the data into it's original python object. :param bytes data: The serialized object to load. :return: The original python object. """ if not isinstance(data, bytes): raise TypeError("loads() argument 1 must be bytes, not {0}".format(type(data).__name__)) if self._compression == 'zlib': data = zlib.decompress(data) if sys.version_info[0] == 3 and self.name.startswith('application/'): data = data.decode(self._charset) data = g_serializer_drivers[self.name]['loads'](data, (self._charset if sys.version_info[0] == 3 else None)) if isinstance(data, list): data = tuple(data) return data
python
def loads(self, data): """ Deserialize the data into it's original python object. :param bytes data: The serialized object to load. :return: The original python object. """ if not isinstance(data, bytes): raise TypeError("loads() argument 1 must be bytes, not {0}".format(type(data).__name__)) if self._compression == 'zlib': data = zlib.decompress(data) if sys.version_info[0] == 3 and self.name.startswith('application/'): data = data.decode(self._charset) data = g_serializer_drivers[self.name]['loads'](data, (self._charset if sys.version_info[0] == 3 else None)) if isinstance(data, list): data = tuple(data) return data
[ "def", "loads", "(", "self", ",", "data", ")", ":", "if", "not", "isinstance", "(", "data", ",", "bytes", ")", ":", "raise", "TypeError", "(", "\"loads() argument 1 must be bytes, not {0}\"", ".", "format", "(", "type", "(", "data", ")", ".", "__name__", ")", ")", "if", "self", ".", "_compression", "==", "'zlib'", ":", "data", "=", "zlib", ".", "decompress", "(", "data", ")", "if", "sys", ".", "version_info", "[", "0", "]", "==", "3", "and", "self", ".", "name", ".", "startswith", "(", "'application/'", ")", ":", "data", "=", "data", ".", "decode", "(", "self", ".", "_charset", ")", "data", "=", "g_serializer_drivers", "[", "self", ".", "name", "]", "[", "'loads'", "]", "(", "data", ",", "(", "self", ".", "_charset", "if", "sys", ".", "version_info", "[", "0", "]", "==", "3", "else", "None", ")", ")", "if", "isinstance", "(", "data", ",", "list", ")", ":", "data", "=", "tuple", "(", "data", ")", "return", "data" ]
Deserialize the data into it's original python object. :param bytes data: The serialized object to load. :return: The original python object.
[ "Deserialize", "the", "data", "into", "it", "s", "original", "python", "object", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L1710-L1726
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
AdvancedHTTPServer.add_sni_cert
def add_sni_cert(self, hostname, ssl_certfile=None, ssl_keyfile=None, ssl_version=None): """ Add an SSL certificate for a specific hostname as supported by SSL's Server Name Indicator (SNI) extension. See :rfc:`3546` for more details on SSL extensions. In order to use this method, the server instance must have been initialized with at least one address configured for SSL. .. warning:: This method will raise a :py:exc:`RuntimeError` if either the SNI extension is not available in the :py:mod:`ssl` module or if SSL was not enabled at initialization time through the use of arguments to :py:meth:`~.__init__`. .. versionadded:: 2.0.0 :param str hostname: The hostname for this configuration. :param str ssl_certfile: An SSL certificate file to use, setting this enables SSL. :param str ssl_keyfile: An SSL certificate file to use. :param ssl_version: The SSL protocol version to use. """ if not g_ssl_has_server_sni: raise RuntimeError('the ssl server name indicator extension is unavailable') if self._ssl_sni_entries is None: raise RuntimeError('ssl was not enabled on initialization') if ssl_certfile: ssl_certfile = os.path.abspath(ssl_certfile) if ssl_keyfile: ssl_keyfile = os.path.abspath(ssl_keyfile) cert_info = SSLSNICertificate(hostname, ssl_certfile, ssl_keyfile) if ssl_version is None or isinstance(ssl_version, str): ssl_version = resolve_ssl_protocol_version(ssl_version) ssl_ctx = ssl.SSLContext(ssl_version) ssl_ctx.load_cert_chain(ssl_certfile, keyfile=ssl_keyfile) self._ssl_sni_entries[hostname] = SSLSNIEntry(context=ssl_ctx, certificate=cert_info)
python
def add_sni_cert(self, hostname, ssl_certfile=None, ssl_keyfile=None, ssl_version=None): """ Add an SSL certificate for a specific hostname as supported by SSL's Server Name Indicator (SNI) extension. See :rfc:`3546` for more details on SSL extensions. In order to use this method, the server instance must have been initialized with at least one address configured for SSL. .. warning:: This method will raise a :py:exc:`RuntimeError` if either the SNI extension is not available in the :py:mod:`ssl` module or if SSL was not enabled at initialization time through the use of arguments to :py:meth:`~.__init__`. .. versionadded:: 2.0.0 :param str hostname: The hostname for this configuration. :param str ssl_certfile: An SSL certificate file to use, setting this enables SSL. :param str ssl_keyfile: An SSL certificate file to use. :param ssl_version: The SSL protocol version to use. """ if not g_ssl_has_server_sni: raise RuntimeError('the ssl server name indicator extension is unavailable') if self._ssl_sni_entries is None: raise RuntimeError('ssl was not enabled on initialization') if ssl_certfile: ssl_certfile = os.path.abspath(ssl_certfile) if ssl_keyfile: ssl_keyfile = os.path.abspath(ssl_keyfile) cert_info = SSLSNICertificate(hostname, ssl_certfile, ssl_keyfile) if ssl_version is None or isinstance(ssl_version, str): ssl_version = resolve_ssl_protocol_version(ssl_version) ssl_ctx = ssl.SSLContext(ssl_version) ssl_ctx.load_cert_chain(ssl_certfile, keyfile=ssl_keyfile) self._ssl_sni_entries[hostname] = SSLSNIEntry(context=ssl_ctx, certificate=cert_info)
[ "def", "add_sni_cert", "(", "self", ",", "hostname", ",", "ssl_certfile", "=", "None", ",", "ssl_keyfile", "=", "None", ",", "ssl_version", "=", "None", ")", ":", "if", "not", "g_ssl_has_server_sni", ":", "raise", "RuntimeError", "(", "'the ssl server name indicator extension is unavailable'", ")", "if", "self", ".", "_ssl_sni_entries", "is", "None", ":", "raise", "RuntimeError", "(", "'ssl was not enabled on initialization'", ")", "if", "ssl_certfile", ":", "ssl_certfile", "=", "os", ".", "path", ".", "abspath", "(", "ssl_certfile", ")", "if", "ssl_keyfile", ":", "ssl_keyfile", "=", "os", ".", "path", ".", "abspath", "(", "ssl_keyfile", ")", "cert_info", "=", "SSLSNICertificate", "(", "hostname", ",", "ssl_certfile", ",", "ssl_keyfile", ")", "if", "ssl_version", "is", "None", "or", "isinstance", "(", "ssl_version", ",", "str", ")", ":", "ssl_version", "=", "resolve_ssl_protocol_version", "(", "ssl_version", ")", "ssl_ctx", "=", "ssl", ".", "SSLContext", "(", "ssl_version", ")", "ssl_ctx", ".", "load_cert_chain", "(", "ssl_certfile", ",", "keyfile", "=", "ssl_keyfile", ")", "self", ".", "_ssl_sni_entries", "[", "hostname", "]", "=", "SSLSNIEntry", "(", "context", "=", "ssl_ctx", ",", "certificate", "=", "cert_info", ")" ]
Add an SSL certificate for a specific hostname as supported by SSL's Server Name Indicator (SNI) extension. See :rfc:`3546` for more details on SSL extensions. In order to use this method, the server instance must have been initialized with at least one address configured for SSL. .. warning:: This method will raise a :py:exc:`RuntimeError` if either the SNI extension is not available in the :py:mod:`ssl` module or if SSL was not enabled at initialization time through the use of arguments to :py:meth:`~.__init__`. .. versionadded:: 2.0.0 :param str hostname: The hostname for this configuration. :param str ssl_certfile: An SSL certificate file to use, setting this enables SSL. :param str ssl_keyfile: An SSL certificate file to use. :param ssl_version: The SSL protocol version to use.
[ "Add", "an", "SSL", "certificate", "for", "a", "specific", "hostname", "as", "supported", "by", "SSL", "s", "Server", "Name", "Indicator", "(", "SNI", ")", "extension", ".", "See", ":", "rfc", ":", "3546", "for", "more", "details", "on", "SSL", "extensions", ".", "In", "order", "to", "use", "this", "method", "the", "server", "instance", "must", "have", "been", "initialized", "with", "at", "least", "one", "address", "configured", "for", "SSL", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L1844-L1878
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
AdvancedHTTPServer.remove_sni_cert
def remove_sni_cert(self, hostname): """ Remove the SSL Server Name Indicator (SNI) certificate configuration for the specified *hostname*. .. warning:: This method will raise a :py:exc:`RuntimeError` if either the SNI extension is not available in the :py:mod:`ssl` module or if SSL was not enabled at initialization time through the use of arguments to :py:meth:`~.__init__`. .. versionadded:: 2.2.0 :param str hostname: The hostname to delete the SNI configuration for. """ if not g_ssl_has_server_sni: raise RuntimeError('the ssl server name indicator extension is unavailable') if self._ssl_sni_entries is None: raise RuntimeError('ssl was not enabled on initialization') sni_entry = self._ssl_sni_entries.pop(hostname, None) if sni_entry is None: raise ValueError('the specified hostname does not have an sni certificate configuration')
python
def remove_sni_cert(self, hostname): """ Remove the SSL Server Name Indicator (SNI) certificate configuration for the specified *hostname*. .. warning:: This method will raise a :py:exc:`RuntimeError` if either the SNI extension is not available in the :py:mod:`ssl` module or if SSL was not enabled at initialization time through the use of arguments to :py:meth:`~.__init__`. .. versionadded:: 2.2.0 :param str hostname: The hostname to delete the SNI configuration for. """ if not g_ssl_has_server_sni: raise RuntimeError('the ssl server name indicator extension is unavailable') if self._ssl_sni_entries is None: raise RuntimeError('ssl was not enabled on initialization') sni_entry = self._ssl_sni_entries.pop(hostname, None) if sni_entry is None: raise ValueError('the specified hostname does not have an sni certificate configuration')
[ "def", "remove_sni_cert", "(", "self", ",", "hostname", ")", ":", "if", "not", "g_ssl_has_server_sni", ":", "raise", "RuntimeError", "(", "'the ssl server name indicator extension is unavailable'", ")", "if", "self", ".", "_ssl_sni_entries", "is", "None", ":", "raise", "RuntimeError", "(", "'ssl was not enabled on initialization'", ")", "sni_entry", "=", "self", ".", "_ssl_sni_entries", ".", "pop", "(", "hostname", ",", "None", ")", "if", "sni_entry", "is", "None", ":", "raise", "ValueError", "(", "'the specified hostname does not have an sni certificate configuration'", ")" ]
Remove the SSL Server Name Indicator (SNI) certificate configuration for the specified *hostname*. .. warning:: This method will raise a :py:exc:`RuntimeError` if either the SNI extension is not available in the :py:mod:`ssl` module or if SSL was not enabled at initialization time through the use of arguments to :py:meth:`~.__init__`. .. versionadded:: 2.2.0 :param str hostname: The hostname to delete the SNI configuration for.
[ "Remove", "the", "SSL", "Server", "Name", "Indicator", "(", "SNI", ")", "certificate", "configuration", "for", "the", "specified", "*", "hostname", "*", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L1880-L1902
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
AdvancedHTTPServer.sni_certs
def sni_certs(self): """ .. versionadded:: 2.2.0 :return: Return a tuple of :py:class:`~.SSLSNICertificate` instances for each of the certificates that are configured. :rtype: tuple """ if not g_ssl_has_server_sni or self._ssl_sni_entries is None: return tuple() return tuple(entry.certificate for entry in self._ssl_sni_entries.values())
python
def sni_certs(self): """ .. versionadded:: 2.2.0 :return: Return a tuple of :py:class:`~.SSLSNICertificate` instances for each of the certificates that are configured. :rtype: tuple """ if not g_ssl_has_server_sni or self._ssl_sni_entries is None: return tuple() return tuple(entry.certificate for entry in self._ssl_sni_entries.values())
[ "def", "sni_certs", "(", "self", ")", ":", "if", "not", "g_ssl_has_server_sni", "or", "self", ".", "_ssl_sni_entries", "is", "None", ":", "return", "tuple", "(", ")", "return", "tuple", "(", "entry", ".", "certificate", "for", "entry", "in", "self", ".", "_ssl_sni_entries", ".", "values", "(", ")", ")" ]
.. versionadded:: 2.2.0 :return: Return a tuple of :py:class:`~.SSLSNICertificate` instances for each of the certificates that are configured. :rtype: tuple
[ "..", "versionadded", "::", "2", ".", "2", ".", "0" ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L1905-L1914
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
AdvancedHTTPServer.serve_forever
def serve_forever(self, fork=False): """ Start handling requests. This method must be called and does not return unless the :py:meth:`.shutdown` method is called from another thread. :param bool fork: Whether to fork or not before serving content. :return: The child processes PID if *fork* is set to True. :rtype: int """ if fork: if not hasattr(os, 'fork'): raise OSError('os.fork is not available') child_pid = os.fork() if child_pid != 0: self.logger.info('forked child process: ' + str(child_pid)) return child_pid self.__server_thread = threading.current_thread() self.__wakeup_fd = WakeupFd() self.__is_shutdown.clear() self.__should_stop.clear() self.__is_running.set() while not self.__should_stop.is_set(): try: self._serve_ready() except socket.error: self.logger.warning('encountered socket error, stopping server') self.__should_stop.set() self.__is_shutdown.set() self.__is_running.clear() return 0
python
def serve_forever(self, fork=False): """ Start handling requests. This method must be called and does not return unless the :py:meth:`.shutdown` method is called from another thread. :param bool fork: Whether to fork or not before serving content. :return: The child processes PID if *fork* is set to True. :rtype: int """ if fork: if not hasattr(os, 'fork'): raise OSError('os.fork is not available') child_pid = os.fork() if child_pid != 0: self.logger.info('forked child process: ' + str(child_pid)) return child_pid self.__server_thread = threading.current_thread() self.__wakeup_fd = WakeupFd() self.__is_shutdown.clear() self.__should_stop.clear() self.__is_running.set() while not self.__should_stop.is_set(): try: self._serve_ready() except socket.error: self.logger.warning('encountered socket error, stopping server') self.__should_stop.set() self.__is_shutdown.set() self.__is_running.clear() return 0
[ "def", "serve_forever", "(", "self", ",", "fork", "=", "False", ")", ":", "if", "fork", ":", "if", "not", "hasattr", "(", "os", ",", "'fork'", ")", ":", "raise", "OSError", "(", "'os.fork is not available'", ")", "child_pid", "=", "os", ".", "fork", "(", ")", "if", "child_pid", "!=", "0", ":", "self", ".", "logger", ".", "info", "(", "'forked child process: '", "+", "str", "(", "child_pid", ")", ")", "return", "child_pid", "self", ".", "__server_thread", "=", "threading", ".", "current_thread", "(", ")", "self", ".", "__wakeup_fd", "=", "WakeupFd", "(", ")", "self", ".", "__is_shutdown", ".", "clear", "(", ")", "self", ".", "__should_stop", ".", "clear", "(", ")", "self", ".", "__is_running", ".", "set", "(", ")", "while", "not", "self", ".", "__should_stop", ".", "is_set", "(", ")", ":", "try", ":", "self", ".", "_serve_ready", "(", ")", "except", "socket", ".", "error", ":", "self", ".", "logger", ".", "warning", "(", "'encountered socket error, stopping server'", ")", "self", ".", "__should_stop", ".", "set", "(", ")", "self", ".", "__is_shutdown", ".", "set", "(", ")", "self", ".", "__is_running", ".", "clear", "(", ")", "return", "0" ]
Start handling requests. This method must be called and does not return unless the :py:meth:`.shutdown` method is called from another thread. :param bool fork: Whether to fork or not before serving content. :return: The child processes PID if *fork* is set to True. :rtype: int
[ "Start", "handling", "requests", ".", "This", "method", "must", "be", "called", "and", "does", "not", "return", "unless", "the", ":", "py", ":", "meth", ":", ".", "shutdown", "method", "is", "called", "from", "another", "thread", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L1929-L1959
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
AdvancedHTTPServer.shutdown
def shutdown(self): """Shutdown the server and stop responding to requests.""" self.__should_stop.set() if self.__server_thread == threading.current_thread(): self.__is_shutdown.set() self.__is_running.clear() else: if self.__wakeup_fd is not None: os.write(self.__wakeup_fd.write_fd, b'\x00') self.__is_shutdown.wait() if self.__wakeup_fd is not None: self.__wakeup_fd.close() self.__wakeup_fd = None for server in self.sub_servers: server.shutdown()
python
def shutdown(self): """Shutdown the server and stop responding to requests.""" self.__should_stop.set() if self.__server_thread == threading.current_thread(): self.__is_shutdown.set() self.__is_running.clear() else: if self.__wakeup_fd is not None: os.write(self.__wakeup_fd.write_fd, b'\x00') self.__is_shutdown.wait() if self.__wakeup_fd is not None: self.__wakeup_fd.close() self.__wakeup_fd = None for server in self.sub_servers: server.shutdown()
[ "def", "shutdown", "(", "self", ")", ":", "self", ".", "__should_stop", ".", "set", "(", ")", "if", "self", ".", "__server_thread", "==", "threading", ".", "current_thread", "(", ")", ":", "self", ".", "__is_shutdown", ".", "set", "(", ")", "self", ".", "__is_running", ".", "clear", "(", ")", "else", ":", "if", "self", ".", "__wakeup_fd", "is", "not", "None", ":", "os", ".", "write", "(", "self", ".", "__wakeup_fd", ".", "write_fd", ",", "b'\\x00'", ")", "self", ".", "__is_shutdown", ".", "wait", "(", ")", "if", "self", ".", "__wakeup_fd", "is", "not", "None", ":", "self", ".", "__wakeup_fd", ".", "close", "(", ")", "self", ".", "__wakeup_fd", "=", "None", "for", "server", "in", "self", ".", "sub_servers", ":", "server", ".", "shutdown", "(", ")" ]
Shutdown the server and stop responding to requests.
[ "Shutdown", "the", "server", "and", "stop", "responding", "to", "requests", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L1961-L1975
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
AdvancedHTTPServer.auth_set
def auth_set(self, status): """ Enable or disable requiring authentication on all incoming requests. :param bool status: Whether to enable or disable requiring authentication. """ if not bool(status): self.__config['basic_auth'] = None self.logger.info('basic authentication has been disabled') else: self.__config['basic_auth'] = {} self.logger.info('basic authentication has been enabled')
python
def auth_set(self, status): """ Enable or disable requiring authentication on all incoming requests. :param bool status: Whether to enable or disable requiring authentication. """ if not bool(status): self.__config['basic_auth'] = None self.logger.info('basic authentication has been disabled') else: self.__config['basic_auth'] = {} self.logger.info('basic authentication has been enabled')
[ "def", "auth_set", "(", "self", ",", "status", ")", ":", "if", "not", "bool", "(", "status", ")", ":", "self", ".", "__config", "[", "'basic_auth'", "]", "=", "None", "self", ".", "logger", ".", "info", "(", "'basic authentication has been disabled'", ")", "else", ":", "self", ".", "__config", "[", "'basic_auth'", "]", "=", "{", "}", "self", ".", "logger", ".", "info", "(", "'basic authentication has been enabled'", ")" ]
Enable or disable requiring authentication on all incoming requests. :param bool status: Whether to enable or disable requiring authentication.
[ "Enable", "or", "disable", "requiring", "authentication", "on", "all", "incoming", "requests", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L2050-L2061
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
AdvancedHTTPServer.auth_delete_creds
def auth_delete_creds(self, username=None): """ Delete the credentials for a specific username if specified or all stored credentials. :param str username: The username of the credentials to delete. """ if not username: self.__config['basic_auth'] = {} self.logger.info('basic authentication database has been cleared of all entries') return del self.__config['basic_auth'][username]
python
def auth_delete_creds(self, username=None): """ Delete the credentials for a specific username if specified or all stored credentials. :param str username: The username of the credentials to delete. """ if not username: self.__config['basic_auth'] = {} self.logger.info('basic authentication database has been cleared of all entries') return del self.__config['basic_auth'][username]
[ "def", "auth_delete_creds", "(", "self", ",", "username", "=", "None", ")", ":", "if", "not", "username", ":", "self", ".", "__config", "[", "'basic_auth'", "]", "=", "{", "}", "self", ".", "logger", ".", "info", "(", "'basic authentication database has been cleared of all entries'", ")", "return", "del", "self", ".", "__config", "[", "'basic_auth'", "]", "[", "username", "]" ]
Delete the credentials for a specific username if specified or all stored credentials. :param str username: The username of the credentials to delete.
[ "Delete", "the", "credentials", "for", "a", "specific", "username", "if", "specified", "or", "all", "stored", "credentials", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L2063-L2074
zeroSteiner/AdvancedHTTPServer
advancedhttpserver.py
AdvancedHTTPServer.auth_add_creds
def auth_add_creds(self, username, password, pwtype='plain'): """ Add a valid set of credentials to be accepted for authentication. Calling this function will automatically enable requiring authentication. Passwords can be provided in either plaintext or as a hash by specifying the hash type in the *pwtype* argument. :param str username: The username of the credentials to be added. :param password: The password data of the credentials to be added. :type password: bytes, str :param str pwtype: The type of the *password* data, (plain, md5, sha1, etc.). """ if not isinstance(password, (bytes, str)): raise TypeError("auth_add_creds() argument 2 must be bytes or str, not {0}".format(type(password).__name__)) pwtype = pwtype.lower() if not pwtype in ('plain', 'md5', 'sha1', 'sha256', 'sha384', 'sha512'): raise ValueError('invalid password type, must be \'plain\', or supported by hashlib') if self.__config.get('basic_auth') is None: self.__config['basic_auth'] = {} self.logger.info('basic authentication has been enabled') if pwtype != 'plain': algorithms_available = getattr(hashlib, 'algorithms_available', ()) or getattr(hashlib, 'algorithms', ()) if pwtype not in algorithms_available: raise ValueError('hashlib does not support the desired algorithm') # only md5 and sha1 hex for backwards compatibility if pwtype == 'md5' and len(password) == 32: password = binascii.unhexlify(password) elif pwtype == 'sha1' and len(password) == 40: password = binascii.unhexlify(password) if not isinstance(password, bytes): password = password.encode('UTF-8') if len(hashlib.new(pwtype, b'foobar').digest()) != len(password): raise ValueError('the length of the password hash does not match the type specified') self.__config['basic_auth'][username] = {'value': password, 'type': pwtype}
python
def auth_add_creds(self, username, password, pwtype='plain'): """ Add a valid set of credentials to be accepted for authentication. Calling this function will automatically enable requiring authentication. Passwords can be provided in either plaintext or as a hash by specifying the hash type in the *pwtype* argument. :param str username: The username of the credentials to be added. :param password: The password data of the credentials to be added. :type password: bytes, str :param str pwtype: The type of the *password* data, (plain, md5, sha1, etc.). """ if not isinstance(password, (bytes, str)): raise TypeError("auth_add_creds() argument 2 must be bytes or str, not {0}".format(type(password).__name__)) pwtype = pwtype.lower() if not pwtype in ('plain', 'md5', 'sha1', 'sha256', 'sha384', 'sha512'): raise ValueError('invalid password type, must be \'plain\', or supported by hashlib') if self.__config.get('basic_auth') is None: self.__config['basic_auth'] = {} self.logger.info('basic authentication has been enabled') if pwtype != 'plain': algorithms_available = getattr(hashlib, 'algorithms_available', ()) or getattr(hashlib, 'algorithms', ()) if pwtype not in algorithms_available: raise ValueError('hashlib does not support the desired algorithm') # only md5 and sha1 hex for backwards compatibility if pwtype == 'md5' and len(password) == 32: password = binascii.unhexlify(password) elif pwtype == 'sha1' and len(password) == 40: password = binascii.unhexlify(password) if not isinstance(password, bytes): password = password.encode('UTF-8') if len(hashlib.new(pwtype, b'foobar').digest()) != len(password): raise ValueError('the length of the password hash does not match the type specified') self.__config['basic_auth'][username] = {'value': password, 'type': pwtype}
[ "def", "auth_add_creds", "(", "self", ",", "username", ",", "password", ",", "pwtype", "=", "'plain'", ")", ":", "if", "not", "isinstance", "(", "password", ",", "(", "bytes", ",", "str", ")", ")", ":", "raise", "TypeError", "(", "\"auth_add_creds() argument 2 must be bytes or str, not {0}\"", ".", "format", "(", "type", "(", "password", ")", ".", "__name__", ")", ")", "pwtype", "=", "pwtype", ".", "lower", "(", ")", "if", "not", "pwtype", "in", "(", "'plain'", ",", "'md5'", ",", "'sha1'", ",", "'sha256'", ",", "'sha384'", ",", "'sha512'", ")", ":", "raise", "ValueError", "(", "'invalid password type, must be \\'plain\\', or supported by hashlib'", ")", "if", "self", ".", "__config", ".", "get", "(", "'basic_auth'", ")", "is", "None", ":", "self", ".", "__config", "[", "'basic_auth'", "]", "=", "{", "}", "self", ".", "logger", ".", "info", "(", "'basic authentication has been enabled'", ")", "if", "pwtype", "!=", "'plain'", ":", "algorithms_available", "=", "getattr", "(", "hashlib", ",", "'algorithms_available'", ",", "(", ")", ")", "or", "getattr", "(", "hashlib", ",", "'algorithms'", ",", "(", ")", ")", "if", "pwtype", "not", "in", "algorithms_available", ":", "raise", "ValueError", "(", "'hashlib does not support the desired algorithm'", ")", "# only md5 and sha1 hex for backwards compatibility", "if", "pwtype", "==", "'md5'", "and", "len", "(", "password", ")", "==", "32", ":", "password", "=", "binascii", ".", "unhexlify", "(", "password", ")", "elif", "pwtype", "==", "'sha1'", "and", "len", "(", "password", ")", "==", "40", ":", "password", "=", "binascii", ".", "unhexlify", "(", "password", ")", "if", "not", "isinstance", "(", "password", ",", "bytes", ")", ":", "password", "=", "password", ".", "encode", "(", "'UTF-8'", ")", "if", "len", "(", "hashlib", ".", "new", "(", "pwtype", ",", "b'foobar'", ")", ".", "digest", "(", ")", ")", "!=", "len", "(", "password", ")", ":", "raise", "ValueError", "(", "'the length of the password hash does not match the type specified'", ")", "self", ".", "__config", "[", "'basic_auth'", "]", "[", "username", "]", "=", "{", "'value'", ":", "password", ",", "'type'", ":", "pwtype", "}" ]
Add a valid set of credentials to be accepted for authentication. Calling this function will automatically enable requiring authentication. Passwords can be provided in either plaintext or as a hash by specifying the hash type in the *pwtype* argument. :param str username: The username of the credentials to be added. :param password: The password data of the credentials to be added. :type password: bytes, str :param str pwtype: The type of the *password* data, (plain, md5, sha1, etc.).
[ "Add", "a", "valid", "set", "of", "credentials", "to", "be", "accepted", "for", "authentication", ".", "Calling", "this", "function", "will", "automatically", "enable", "requiring", "authentication", ".", "Passwords", "can", "be", "provided", "in", "either", "plaintext", "or", "as", "a", "hash", "by", "specifying", "the", "hash", "type", "in", "the", "*", "pwtype", "*", "argument", "." ]
train
https://github.com/zeroSteiner/AdvancedHTTPServer/blob/8c53cf7e1ddbf7ae9f573c82c5fe5f6992db7b5a/advancedhttpserver.py#L2076-L2109
jakevdp/supersmoother
supersmoother/utils.py
setattr_context
def setattr_context(obj, **kwargs): """ Context manager to temporarily change the values of object attributes while executing a function. Example ------- >>> class Foo: pass >>> f = Foo(); f.attr = 'hello' >>> with setattr_context(f, attr='goodbye'): ... print(f.attr) goodbye >>> print(f.attr) hello """ old_kwargs = dict([(key, getattr(obj, key)) for key in kwargs]) [setattr(obj, key, val) for key, val in kwargs.items()] try: yield finally: [setattr(obj, key, val) for key, val in old_kwargs.items()]
python
def setattr_context(obj, **kwargs): """ Context manager to temporarily change the values of object attributes while executing a function. Example ------- >>> class Foo: pass >>> f = Foo(); f.attr = 'hello' >>> with setattr_context(f, attr='goodbye'): ... print(f.attr) goodbye >>> print(f.attr) hello """ old_kwargs = dict([(key, getattr(obj, key)) for key in kwargs]) [setattr(obj, key, val) for key, val in kwargs.items()] try: yield finally: [setattr(obj, key, val) for key, val in old_kwargs.items()]
[ "def", "setattr_context", "(", "obj", ",", "*", "*", "kwargs", ")", ":", "old_kwargs", "=", "dict", "(", "[", "(", "key", ",", "getattr", "(", "obj", ",", "key", ")", ")", "for", "key", "in", "kwargs", "]", ")", "[", "setattr", "(", "obj", ",", "key", ",", "val", ")", "for", "key", ",", "val", "in", "kwargs", ".", "items", "(", ")", "]", "try", ":", "yield", "finally", ":", "[", "setattr", "(", "obj", ",", "key", ",", "val", ")", "for", "key", ",", "val", "in", "old_kwargs", ".", "items", "(", ")", "]" ]
Context manager to temporarily change the values of object attributes while executing a function. Example ------- >>> class Foo: pass >>> f = Foo(); f.attr = 'hello' >>> with setattr_context(f, attr='goodbye'): ... print(f.attr) goodbye >>> print(f.attr) hello
[ "Context", "manager", "to", "temporarily", "change", "the", "values", "of", "object", "attributes", "while", "executing", "a", "function", "." ]
train
https://github.com/jakevdp/supersmoother/blob/0c96cf13dcd6f9006d3c0421f9cd6e18abe27a2f/supersmoother/utils.py#L10-L30
jakevdp/supersmoother
supersmoother/utils.py
validate_inputs
def validate_inputs(*arrays, **kwargs): """Validate input arrays This checks that - Arrays are mutually broadcastable - Broadcasted arrays are one-dimensional Optionally, arrays are sorted according to the ``sort_by`` argument. Parameters ---------- *args : ndarrays All non-keyword arguments are arrays which will be validated sort_by : array If specified, sort all inputs by the order given in this array. """ arrays = np.broadcast_arrays(*arrays) sort_by = kwargs.pop('sort_by', None) if kwargs: raise ValueError("unrecognized arguments: {0}".format(kwargs.keys())) if arrays[0].ndim != 1: raise ValueError("Input arrays should be one-dimensional.") if sort_by is not None: isort = np.argsort(sort_by) if isort.shape != arrays[0].shape: raise ValueError("sort shape must equal array shape.") arrays = tuple([a[isort] for a in arrays]) return arrays
python
def validate_inputs(*arrays, **kwargs): """Validate input arrays This checks that - Arrays are mutually broadcastable - Broadcasted arrays are one-dimensional Optionally, arrays are sorted according to the ``sort_by`` argument. Parameters ---------- *args : ndarrays All non-keyword arguments are arrays which will be validated sort_by : array If specified, sort all inputs by the order given in this array. """ arrays = np.broadcast_arrays(*arrays) sort_by = kwargs.pop('sort_by', None) if kwargs: raise ValueError("unrecognized arguments: {0}".format(kwargs.keys())) if arrays[0].ndim != 1: raise ValueError("Input arrays should be one-dimensional.") if sort_by is not None: isort = np.argsort(sort_by) if isort.shape != arrays[0].shape: raise ValueError("sort shape must equal array shape.") arrays = tuple([a[isort] for a in arrays]) return arrays
[ "def", "validate_inputs", "(", "*", "arrays", ",", "*", "*", "kwargs", ")", ":", "arrays", "=", "np", ".", "broadcast_arrays", "(", "*", "arrays", ")", "sort_by", "=", "kwargs", ".", "pop", "(", "'sort_by'", ",", "None", ")", "if", "kwargs", ":", "raise", "ValueError", "(", "\"unrecognized arguments: {0}\"", ".", "format", "(", "kwargs", ".", "keys", "(", ")", ")", ")", "if", "arrays", "[", "0", "]", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"Input arrays should be one-dimensional.\"", ")", "if", "sort_by", "is", "not", "None", ":", "isort", "=", "np", ".", "argsort", "(", "sort_by", ")", "if", "isort", ".", "shape", "!=", "arrays", "[", "0", "]", ".", "shape", ":", "raise", "ValueError", "(", "\"sort shape must equal array shape.\"", ")", "arrays", "=", "tuple", "(", "[", "a", "[", "isort", "]", "for", "a", "in", "arrays", "]", ")", "return", "arrays" ]
Validate input arrays This checks that - Arrays are mutually broadcastable - Broadcasted arrays are one-dimensional Optionally, arrays are sorted according to the ``sort_by`` argument. Parameters ---------- *args : ndarrays All non-keyword arguments are arrays which will be validated sort_by : array If specified, sort all inputs by the order given in this array.
[ "Validate", "input", "arrays" ]
train
https://github.com/jakevdp/supersmoother/blob/0c96cf13dcd6f9006d3c0421f9cd6e18abe27a2f/supersmoother/utils.py#L43-L73
jakevdp/supersmoother
supersmoother/utils.py
_prep_smooth
def _prep_smooth(t, y, dy, span, t_out, span_out, period): """Private function to prepare & check variables for smooth utilities""" # If period is provided, sort by phases. Otherwise sort by t if period: t = t % period if t_out is not None: t_out = t_out % period t, y, dy = validate_inputs(t, y, dy, sort_by=t) if span_out is not None: if t_out is None: raise ValueError("Must specify t_out when span_out is given") if span is not None: raise ValueError("Must specify only one of span, span_out") span, t_out = np.broadcast_arrays(span_out, t_out) indices = np.searchsorted(t, t_out) elif span is None: raise ValueError("Must specify either span_out or span") else: indices = None return t, y, dy, span, t_out, span_out, indices
python
def _prep_smooth(t, y, dy, span, t_out, span_out, period): """Private function to prepare & check variables for smooth utilities""" # If period is provided, sort by phases. Otherwise sort by t if period: t = t % period if t_out is not None: t_out = t_out % period t, y, dy = validate_inputs(t, y, dy, sort_by=t) if span_out is not None: if t_out is None: raise ValueError("Must specify t_out when span_out is given") if span is not None: raise ValueError("Must specify only one of span, span_out") span, t_out = np.broadcast_arrays(span_out, t_out) indices = np.searchsorted(t, t_out) elif span is None: raise ValueError("Must specify either span_out or span") else: indices = None return t, y, dy, span, t_out, span_out, indices
[ "def", "_prep_smooth", "(", "t", ",", "y", ",", "dy", ",", "span", ",", "t_out", ",", "span_out", ",", "period", ")", ":", "# If period is provided, sort by phases. Otherwise sort by t", "if", "period", ":", "t", "=", "t", "%", "period", "if", "t_out", "is", "not", "None", ":", "t_out", "=", "t_out", "%", "period", "t", ",", "y", ",", "dy", "=", "validate_inputs", "(", "t", ",", "y", ",", "dy", ",", "sort_by", "=", "t", ")", "if", "span_out", "is", "not", "None", ":", "if", "t_out", "is", "None", ":", "raise", "ValueError", "(", "\"Must specify t_out when span_out is given\"", ")", "if", "span", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Must specify only one of span, span_out\"", ")", "span", ",", "t_out", "=", "np", ".", "broadcast_arrays", "(", "span_out", ",", "t_out", ")", "indices", "=", "np", ".", "searchsorted", "(", "t", ",", "t_out", ")", "elif", "span", "is", "None", ":", "raise", "ValueError", "(", "\"Must specify either span_out or span\"", ")", "else", ":", "indices", "=", "None", "return", "t", ",", "y", ",", "dy", ",", "span", ",", "t_out", ",", "span_out", ",", "indices" ]
Private function to prepare & check variables for smooth utilities
[ "Private", "function", "to", "prepare", "&", "check", "variables", "for", "smooth", "utilities" ]
train
https://github.com/jakevdp/supersmoother/blob/0c96cf13dcd6f9006d3c0421f9cd6e18abe27a2f/supersmoother/utils.py#L76-L99
jakevdp/supersmoother
supersmoother/utils.py
moving_average_smooth
def moving_average_smooth(t, y, dy, span=None, cv=True, t_out=None, span_out=None, period=None): """Perform a moving-average smooth of the data Parameters ---------- t, y, dy : array_like time, value, and error in value of the input data span : array_like the integer spans of the data cv : boolean (default=True) if True, treat the problem as a cross-validation, i.e. don't use each point in the evaluation of its own smoothing. t_out : array_like (optional) the output times for the moving averages span_out : array_like (optional) the spans associated with the output times t_out period : float if provided, then consider the inputs periodic with the given period Returns ------- y_smooth : array_like smoothed y values at each time t (or t_out) """ prep = _prep_smooth(t, y, dy, span, t_out, span_out, period) t, y, dy, span, t_out, span_out, indices = prep w = 1. / (dy ** 2) w, yw = windowed_sum([w, y * w], t=t, span=span, subtract_mid=cv, indices=indices, period=period) if t_out is None or span_out is not None: return yw / w else: i = np.minimum(len(t) - 1, np.searchsorted(t, t_out)) return yw[i] / w[i]
python
def moving_average_smooth(t, y, dy, span=None, cv=True, t_out=None, span_out=None, period=None): """Perform a moving-average smooth of the data Parameters ---------- t, y, dy : array_like time, value, and error in value of the input data span : array_like the integer spans of the data cv : boolean (default=True) if True, treat the problem as a cross-validation, i.e. don't use each point in the evaluation of its own smoothing. t_out : array_like (optional) the output times for the moving averages span_out : array_like (optional) the spans associated with the output times t_out period : float if provided, then consider the inputs periodic with the given period Returns ------- y_smooth : array_like smoothed y values at each time t (or t_out) """ prep = _prep_smooth(t, y, dy, span, t_out, span_out, period) t, y, dy, span, t_out, span_out, indices = prep w = 1. / (dy ** 2) w, yw = windowed_sum([w, y * w], t=t, span=span, subtract_mid=cv, indices=indices, period=period) if t_out is None or span_out is not None: return yw / w else: i = np.minimum(len(t) - 1, np.searchsorted(t, t_out)) return yw[i] / w[i]
[ "def", "moving_average_smooth", "(", "t", ",", "y", ",", "dy", ",", "span", "=", "None", ",", "cv", "=", "True", ",", "t_out", "=", "None", ",", "span_out", "=", "None", ",", "period", "=", "None", ")", ":", "prep", "=", "_prep_smooth", "(", "t", ",", "y", ",", "dy", ",", "span", ",", "t_out", ",", "span_out", ",", "period", ")", "t", ",", "y", ",", "dy", ",", "span", ",", "t_out", ",", "span_out", ",", "indices", "=", "prep", "w", "=", "1.", "/", "(", "dy", "**", "2", ")", "w", ",", "yw", "=", "windowed_sum", "(", "[", "w", ",", "y", "*", "w", "]", ",", "t", "=", "t", ",", "span", "=", "span", ",", "subtract_mid", "=", "cv", ",", "indices", "=", "indices", ",", "period", "=", "period", ")", "if", "t_out", "is", "None", "or", "span_out", "is", "not", "None", ":", "return", "yw", "/", "w", "else", ":", "i", "=", "np", ".", "minimum", "(", "len", "(", "t", ")", "-", "1", ",", "np", ".", "searchsorted", "(", "t", ",", "t_out", ")", ")", "return", "yw", "[", "i", "]", "/", "w", "[", "i", "]" ]
Perform a moving-average smooth of the data Parameters ---------- t, y, dy : array_like time, value, and error in value of the input data span : array_like the integer spans of the data cv : boolean (default=True) if True, treat the problem as a cross-validation, i.e. don't use each point in the evaluation of its own smoothing. t_out : array_like (optional) the output times for the moving averages span_out : array_like (optional) the spans associated with the output times t_out period : float if provided, then consider the inputs periodic with the given period Returns ------- y_smooth : array_like smoothed y values at each time t (or t_out)
[ "Perform", "a", "moving", "-", "average", "smooth", "of", "the", "data" ]
train
https://github.com/jakevdp/supersmoother/blob/0c96cf13dcd6f9006d3c0421f9cd6e18abe27a2f/supersmoother/utils.py#L102-L138
jakevdp/supersmoother
supersmoother/utils.py
linear_smooth
def linear_smooth(t, y, dy, span=None, cv=True, t_out=None, span_out=None, period=None): """Perform a linear smooth of the data Parameters ---------- t, y, dy : array_like time, value, and error in value of the input data span : array_like the integer spans of the data cv : boolean (default=True) if True, treat the problem as a cross-validation, i.e. don't use each point in the evaluation of its own smoothing. t_out : array_like (optional) the output times for the moving averages span_out : array_like (optional) the spans associated with the output times t_out period : float if provided, then consider the inputs periodic with the given period Returns ------- y_smooth : array_like smoothed y values at each time t or t_out """ t_input = t prep = _prep_smooth(t, y, dy, span, t_out, span_out, period) t, y, dy, span, t_out, span_out, indices = prep if period: t_input = np.asarray(t_input) % period w = 1. / (dy ** 2) w, yw, tw, tyw, ttw = windowed_sum([w, y * w, w, y * w, w], t=t, tpowers=[0, 0, 1, 1, 2], span=span, indices=indices, subtract_mid=cv, period=period) denominator = (w * ttw - tw * tw) slope = (tyw * w - tw * yw) intercept = (ttw * yw - tyw * tw) if np.any(denominator == 0): raise ValueError("Zero denominator in linear smooth. This usually " "indicates that the input contains duplicate points.") if t_out is None: return (slope * t_input + intercept) / denominator elif span_out is not None: return (slope * t_out + intercept) / denominator else: i = np.minimum(len(t) - 1, np.searchsorted(t, t_out)) return (slope[i] * t_out + intercept[i]) / denominator[i]
python
def linear_smooth(t, y, dy, span=None, cv=True, t_out=None, span_out=None, period=None): """Perform a linear smooth of the data Parameters ---------- t, y, dy : array_like time, value, and error in value of the input data span : array_like the integer spans of the data cv : boolean (default=True) if True, treat the problem as a cross-validation, i.e. don't use each point in the evaluation of its own smoothing. t_out : array_like (optional) the output times for the moving averages span_out : array_like (optional) the spans associated with the output times t_out period : float if provided, then consider the inputs periodic with the given period Returns ------- y_smooth : array_like smoothed y values at each time t or t_out """ t_input = t prep = _prep_smooth(t, y, dy, span, t_out, span_out, period) t, y, dy, span, t_out, span_out, indices = prep if period: t_input = np.asarray(t_input) % period w = 1. / (dy ** 2) w, yw, tw, tyw, ttw = windowed_sum([w, y * w, w, y * w, w], t=t, tpowers=[0, 0, 1, 1, 2], span=span, indices=indices, subtract_mid=cv, period=period) denominator = (w * ttw - tw * tw) slope = (tyw * w - tw * yw) intercept = (ttw * yw - tyw * tw) if np.any(denominator == 0): raise ValueError("Zero denominator in linear smooth. This usually " "indicates that the input contains duplicate points.") if t_out is None: return (slope * t_input + intercept) / denominator elif span_out is not None: return (slope * t_out + intercept) / denominator else: i = np.minimum(len(t) - 1, np.searchsorted(t, t_out)) return (slope[i] * t_out + intercept[i]) / denominator[i]
[ "def", "linear_smooth", "(", "t", ",", "y", ",", "dy", ",", "span", "=", "None", ",", "cv", "=", "True", ",", "t_out", "=", "None", ",", "span_out", "=", "None", ",", "period", "=", "None", ")", ":", "t_input", "=", "t", "prep", "=", "_prep_smooth", "(", "t", ",", "y", ",", "dy", ",", "span", ",", "t_out", ",", "span_out", ",", "period", ")", "t", ",", "y", ",", "dy", ",", "span", ",", "t_out", ",", "span_out", ",", "indices", "=", "prep", "if", "period", ":", "t_input", "=", "np", ".", "asarray", "(", "t_input", ")", "%", "period", "w", "=", "1.", "/", "(", "dy", "**", "2", ")", "w", ",", "yw", ",", "tw", ",", "tyw", ",", "ttw", "=", "windowed_sum", "(", "[", "w", ",", "y", "*", "w", ",", "w", ",", "y", "*", "w", ",", "w", "]", ",", "t", "=", "t", ",", "tpowers", "=", "[", "0", ",", "0", ",", "1", ",", "1", ",", "2", "]", ",", "span", "=", "span", ",", "indices", "=", "indices", ",", "subtract_mid", "=", "cv", ",", "period", "=", "period", ")", "denominator", "=", "(", "w", "*", "ttw", "-", "tw", "*", "tw", ")", "slope", "=", "(", "tyw", "*", "w", "-", "tw", "*", "yw", ")", "intercept", "=", "(", "ttw", "*", "yw", "-", "tyw", "*", "tw", ")", "if", "np", ".", "any", "(", "denominator", "==", "0", ")", ":", "raise", "ValueError", "(", "\"Zero denominator in linear smooth. This usually \"", "\"indicates that the input contains duplicate points.\"", ")", "if", "t_out", "is", "None", ":", "return", "(", "slope", "*", "t_input", "+", "intercept", ")", "/", "denominator", "elif", "span_out", "is", "not", "None", ":", "return", "(", "slope", "*", "t_out", "+", "intercept", ")", "/", "denominator", "else", ":", "i", "=", "np", ".", "minimum", "(", "len", "(", "t", ")", "-", "1", ",", "np", ".", "searchsorted", "(", "t", ",", "t_out", ")", ")", "return", "(", "slope", "[", "i", "]", "*", "t_out", "+", "intercept", "[", "i", "]", ")", "/", "denominator", "[", "i", "]" ]
Perform a linear smooth of the data Parameters ---------- t, y, dy : array_like time, value, and error in value of the input data span : array_like the integer spans of the data cv : boolean (default=True) if True, treat the problem as a cross-validation, i.e. don't use each point in the evaluation of its own smoothing. t_out : array_like (optional) the output times for the moving averages span_out : array_like (optional) the spans associated with the output times t_out period : float if provided, then consider the inputs periodic with the given period Returns ------- y_smooth : array_like smoothed y values at each time t or t_out
[ "Perform", "a", "linear", "smooth", "of", "the", "data" ]
train
https://github.com/jakevdp/supersmoother/blob/0c96cf13dcd6f9006d3c0421f9cd6e18abe27a2f/supersmoother/utils.py#L141-L192
jakevdp/supersmoother
supersmoother/utils.py
multinterp
def multinterp(x, y, xquery, slow=False): """Multiple linear interpolations Parameters ---------- x : array_like, shape=(N,) sorted array of x values y : array_like, shape=(N, M) array of y values corresponding to each x value xquery : array_like, shape=(M,) array of query values slow : boolean, default=False if True, use slow method (used mainly for unit testing) Returns ------- yquery : ndarray, shape=(M,) The interpolated values corresponding to each x query. """ x, y, xquery = map(np.asarray, (x, y, xquery)) assert x.ndim == 1 assert xquery.ndim == 1 assert y.shape == x.shape + xquery.shape # make sure xmin < xquery < xmax in all cases xquery = np.clip(xquery, x.min(), x.max()) if slow: from scipy.interpolate import interp1d return np.array([interp1d(x, y)(xq) for xq, y in zip(xquery, y.T)]) elif len(x) == 3: # Most common case: use a faster approach yq_lower = y[0] + (xquery - x[0]) * (y[1] - y[0]) / (x[1] - x[0]) yq_upper = y[1] + (xquery - x[1]) * (y[2] - y[1]) / (x[2] - x[1]) return np.where(xquery < x[1], yq_lower, yq_upper) else: i = np.clip(np.searchsorted(x, xquery, side='right') - 1, 0, len(x) - 2) j = np.arange(len(xquery)) return y[i, j] + ((xquery - x[i]) * (y[i + 1, j] - y[i, j]) / (x[i + 1] - x[i]))
python
def multinterp(x, y, xquery, slow=False): """Multiple linear interpolations Parameters ---------- x : array_like, shape=(N,) sorted array of x values y : array_like, shape=(N, M) array of y values corresponding to each x value xquery : array_like, shape=(M,) array of query values slow : boolean, default=False if True, use slow method (used mainly for unit testing) Returns ------- yquery : ndarray, shape=(M,) The interpolated values corresponding to each x query. """ x, y, xquery = map(np.asarray, (x, y, xquery)) assert x.ndim == 1 assert xquery.ndim == 1 assert y.shape == x.shape + xquery.shape # make sure xmin < xquery < xmax in all cases xquery = np.clip(xquery, x.min(), x.max()) if slow: from scipy.interpolate import interp1d return np.array([interp1d(x, y)(xq) for xq, y in zip(xquery, y.T)]) elif len(x) == 3: # Most common case: use a faster approach yq_lower = y[0] + (xquery - x[0]) * (y[1] - y[0]) / (x[1] - x[0]) yq_upper = y[1] + (xquery - x[1]) * (y[2] - y[1]) / (x[2] - x[1]) return np.where(xquery < x[1], yq_lower, yq_upper) else: i = np.clip(np.searchsorted(x, xquery, side='right') - 1, 0, len(x) - 2) j = np.arange(len(xquery)) return y[i, j] + ((xquery - x[i]) * (y[i + 1, j] - y[i, j]) / (x[i + 1] - x[i]))
[ "def", "multinterp", "(", "x", ",", "y", ",", "xquery", ",", "slow", "=", "False", ")", ":", "x", ",", "y", ",", "xquery", "=", "map", "(", "np", ".", "asarray", ",", "(", "x", ",", "y", ",", "xquery", ")", ")", "assert", "x", ".", "ndim", "==", "1", "assert", "xquery", ".", "ndim", "==", "1", "assert", "y", ".", "shape", "==", "x", ".", "shape", "+", "xquery", ".", "shape", "# make sure xmin < xquery < xmax in all cases", "xquery", "=", "np", ".", "clip", "(", "xquery", ",", "x", ".", "min", "(", ")", ",", "x", ".", "max", "(", ")", ")", "if", "slow", ":", "from", "scipy", ".", "interpolate", "import", "interp1d", "return", "np", ".", "array", "(", "[", "interp1d", "(", "x", ",", "y", ")", "(", "xq", ")", "for", "xq", ",", "y", "in", "zip", "(", "xquery", ",", "y", ".", "T", ")", "]", ")", "elif", "len", "(", "x", ")", "==", "3", ":", "# Most common case: use a faster approach", "yq_lower", "=", "y", "[", "0", "]", "+", "(", "xquery", "-", "x", "[", "0", "]", ")", "*", "(", "y", "[", "1", "]", "-", "y", "[", "0", "]", ")", "/", "(", "x", "[", "1", "]", "-", "x", "[", "0", "]", ")", "yq_upper", "=", "y", "[", "1", "]", "+", "(", "xquery", "-", "x", "[", "1", "]", ")", "*", "(", "y", "[", "2", "]", "-", "y", "[", "1", "]", ")", "/", "(", "x", "[", "2", "]", "-", "x", "[", "1", "]", ")", "return", "np", ".", "where", "(", "xquery", "<", "x", "[", "1", "]", ",", "yq_lower", ",", "yq_upper", ")", "else", ":", "i", "=", "np", ".", "clip", "(", "np", ".", "searchsorted", "(", "x", ",", "xquery", ",", "side", "=", "'right'", ")", "-", "1", ",", "0", ",", "len", "(", "x", ")", "-", "2", ")", "j", "=", "np", ".", "arange", "(", "len", "(", "xquery", ")", ")", "return", "y", "[", "i", ",", "j", "]", "+", "(", "(", "xquery", "-", "x", "[", "i", "]", ")", "*", "(", "y", "[", "i", "+", "1", ",", "j", "]", "-", "y", "[", "i", ",", "j", "]", ")", "/", "(", "x", "[", "i", "+", "1", "]", "-", "x", "[", "i", "]", ")", ")" ]
Multiple linear interpolations Parameters ---------- x : array_like, shape=(N,) sorted array of x values y : array_like, shape=(N, M) array of y values corresponding to each x value xquery : array_like, shape=(M,) array of query values slow : boolean, default=False if True, use slow method (used mainly for unit testing) Returns ------- yquery : ndarray, shape=(M,) The interpolated values corresponding to each x query.
[ "Multiple", "linear", "interpolations" ]
train
https://github.com/jakevdp/supersmoother/blob/0c96cf13dcd6f9006d3c0421f9cd6e18abe27a2f/supersmoother/utils.py#L195-L235
vsudilov/flask-consulate
flask_consulate/consul.py
Consul._create_session
def _create_session(self, test_connection=False): """ Create a consulate.session object, and query for its leader to ensure that the connection is made. :param test_connection: call .leader() to ensure that the connection is valid :type test_connection: bool :return consulate.Session instance """ session = consulate.Session(host=self.host, port=self.port) if test_connection: session.status.leader() return session
python
def _create_session(self, test_connection=False): """ Create a consulate.session object, and query for its leader to ensure that the connection is made. :param test_connection: call .leader() to ensure that the connection is valid :type test_connection: bool :return consulate.Session instance """ session = consulate.Session(host=self.host, port=self.port) if test_connection: session.status.leader() return session
[ "def", "_create_session", "(", "self", ",", "test_connection", "=", "False", ")", ":", "session", "=", "consulate", ".", "Session", "(", "host", "=", "self", ".", "host", ",", "port", "=", "self", ".", "port", ")", "if", "test_connection", ":", "session", ".", "status", ".", "leader", "(", ")", "return", "session" ]
Create a consulate.session object, and query for its leader to ensure that the connection is made. :param test_connection: call .leader() to ensure that the connection is valid :type test_connection: bool :return consulate.Session instance
[ "Create", "a", "consulate", ".", "session", "object", "and", "query", "for", "its", "leader", "to", "ensure", "that", "the", "connection", "is", "made", "." ]
train
https://github.com/vsudilov/flask-consulate/blob/514f8754e7186f960237ed2836206993d5d3d3b6/flask_consulate/consul.py#L62-L75
vsudilov/flask-consulate
flask_consulate/consul.py
Consul.apply_remote_config
def apply_remote_config(self, namespace=None): """ Applies all config values defined in consul's kv store to self.app. There is no guarantee that these values will not be overwritten later elsewhere. :param namespace: kv namespace/directory. Defaults to DEFAULT_KV_NAMESPACE :return: None """ if namespace is None: namespace = "config/{service}/{environment}/".format( service=os.environ.get('SERVICE', 'generic_service'), environment=os.environ.get('ENVIRONMENT', 'generic_environment') ) for k, v in iteritems(self.session.kv.find(namespace)): k = k.replace(namespace, '') try: self.app.config[k] = json.loads(v) except (TypeError, ValueError): self.app.logger.warning("Couldn't de-serialize {} to json, using raw value".format(v)) self.app.config[k] = v msg = "Set {k}={v} from consul kv '{ns}'".format( k=k, v=v, ns=namespace, ) self.app.logger.debug(msg)
python
def apply_remote_config(self, namespace=None): """ Applies all config values defined in consul's kv store to self.app. There is no guarantee that these values will not be overwritten later elsewhere. :param namespace: kv namespace/directory. Defaults to DEFAULT_KV_NAMESPACE :return: None """ if namespace is None: namespace = "config/{service}/{environment}/".format( service=os.environ.get('SERVICE', 'generic_service'), environment=os.environ.get('ENVIRONMENT', 'generic_environment') ) for k, v in iteritems(self.session.kv.find(namespace)): k = k.replace(namespace, '') try: self.app.config[k] = json.loads(v) except (TypeError, ValueError): self.app.logger.warning("Couldn't de-serialize {} to json, using raw value".format(v)) self.app.config[k] = v msg = "Set {k}={v} from consul kv '{ns}'".format( k=k, v=v, ns=namespace, ) self.app.logger.debug(msg)
[ "def", "apply_remote_config", "(", "self", ",", "namespace", "=", "None", ")", ":", "if", "namespace", "is", "None", ":", "namespace", "=", "\"config/{service}/{environment}/\"", ".", "format", "(", "service", "=", "os", ".", "environ", ".", "get", "(", "'SERVICE'", ",", "'generic_service'", ")", ",", "environment", "=", "os", ".", "environ", ".", "get", "(", "'ENVIRONMENT'", ",", "'generic_environment'", ")", ")", "for", "k", ",", "v", "in", "iteritems", "(", "self", ".", "session", ".", "kv", ".", "find", "(", "namespace", ")", ")", ":", "k", "=", "k", ".", "replace", "(", "namespace", ",", "''", ")", "try", ":", "self", ".", "app", ".", "config", "[", "k", "]", "=", "json", ".", "loads", "(", "v", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "self", ".", "app", ".", "logger", ".", "warning", "(", "\"Couldn't de-serialize {} to json, using raw value\"", ".", "format", "(", "v", ")", ")", "self", ".", "app", ".", "config", "[", "k", "]", "=", "v", "msg", "=", "\"Set {k}={v} from consul kv '{ns}'\"", ".", "format", "(", "k", "=", "k", ",", "v", "=", "v", ",", "ns", "=", "namespace", ",", ")", "self", ".", "app", ".", "logger", ".", "debug", "(", "msg", ")" ]
Applies all config values defined in consul's kv store to self.app. There is no guarantee that these values will not be overwritten later elsewhere. :param namespace: kv namespace/directory. Defaults to DEFAULT_KV_NAMESPACE :return: None
[ "Applies", "all", "config", "values", "defined", "in", "consul", "s", "kv", "store", "to", "self", ".", "app", "." ]
train
https://github.com/vsudilov/flask-consulate/blob/514f8754e7186f960237ed2836206993d5d3d3b6/flask_consulate/consul.py#L78-L109
vsudilov/flask-consulate
flask_consulate/consul.py
Consul.register_service
def register_service(self, **kwargs): """ register this service with consul kwargs passed to Consul.agent.service.register """ kwargs.setdefault('name', self.app.name) self.session.agent.service.register(**kwargs)
python
def register_service(self, **kwargs): """ register this service with consul kwargs passed to Consul.agent.service.register """ kwargs.setdefault('name', self.app.name) self.session.agent.service.register(**kwargs)
[ "def", "register_service", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "setdefault", "(", "'name'", ",", "self", ".", "app", ".", "name", ")", "self", ".", "session", ".", "agent", ".", "service", ".", "register", "(", "*", "*", "kwargs", ")" ]
register this service with consul kwargs passed to Consul.agent.service.register
[ "register", "this", "service", "with", "consul", "kwargs", "passed", "to", "Consul", ".", "agent", ".", "service", ".", "register" ]
train
https://github.com/vsudilov/flask-consulate/blob/514f8754e7186f960237ed2836206993d5d3d3b6/flask_consulate/consul.py#L112-L118
vsudilov/flask-consulate
flask_consulate/service.py
ConsulService._resolve
def _resolve(self): """ Query the consul DNS server for the service IP and port """ endpoints = {} r = self.resolver.query(self.service, 'SRV') for rec in r.response.additional: name = rec.name.to_text() addr = rec.items[0].address endpoints[name] = {'addr': addr} for rec in r.response.answer[0].items: name = '.'.join(rec.target.labels) endpoints[name]['port'] = rec.port return [ 'http://{ip}:{port}'.format( ip=v['addr'], port=v['port'] ) for v in endpoints.values() ]
python
def _resolve(self): """ Query the consul DNS server for the service IP and port """ endpoints = {} r = self.resolver.query(self.service, 'SRV') for rec in r.response.additional: name = rec.name.to_text() addr = rec.items[0].address endpoints[name] = {'addr': addr} for rec in r.response.answer[0].items: name = '.'.join(rec.target.labels) endpoints[name]['port'] = rec.port return [ 'http://{ip}:{port}'.format( ip=v['addr'], port=v['port'] ) for v in endpoints.values() ]
[ "def", "_resolve", "(", "self", ")", ":", "endpoints", "=", "{", "}", "r", "=", "self", ".", "resolver", ".", "query", "(", "self", ".", "service", ",", "'SRV'", ")", "for", "rec", "in", "r", ".", "response", ".", "additional", ":", "name", "=", "rec", ".", "name", ".", "to_text", "(", ")", "addr", "=", "rec", ".", "items", "[", "0", "]", ".", "address", "endpoints", "[", "name", "]", "=", "{", "'addr'", ":", "addr", "}", "for", "rec", "in", "r", ".", "response", ".", "answer", "[", "0", "]", ".", "items", ":", "name", "=", "'.'", ".", "join", "(", "rec", ".", "target", ".", "labels", ")", "endpoints", "[", "name", "]", "[", "'port'", "]", "=", "rec", ".", "port", "return", "[", "'http://{ip}:{port}'", ".", "format", "(", "ip", "=", "v", "[", "'addr'", "]", ",", "port", "=", "v", "[", "'port'", "]", ")", "for", "v", "in", "endpoints", ".", "values", "(", ")", "]" ]
Query the consul DNS server for the service IP and port
[ "Query", "the", "consul", "DNS", "server", "for", "the", "service", "IP", "and", "port" ]
train
https://github.com/vsudilov/flask-consulate/blob/514f8754e7186f960237ed2836206993d5d3d3b6/flask_consulate/service.py#L55-L72
vsudilov/flask-consulate
flask_consulate/service.py
ConsulService.request
def request(self, method, endpoint, **kwargs): """ Proxy to requests.request :param method: str formatted http method :param endpoint: service endpoint :param kwargs: kwargs passed directly to requests.request :return: """ kwargs.setdefault('timeout', (1, 30)) return self.session.request( method, urljoin(self.base_url, endpoint), **kwargs )
python
def request(self, method, endpoint, **kwargs): """ Proxy to requests.request :param method: str formatted http method :param endpoint: service endpoint :param kwargs: kwargs passed directly to requests.request :return: """ kwargs.setdefault('timeout', (1, 30)) return self.session.request( method, urljoin(self.base_url, endpoint), **kwargs )
[ "def", "request", "(", "self", ",", "method", ",", "endpoint", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "setdefault", "(", "'timeout'", ",", "(", "1", ",", "30", ")", ")", "return", "self", ".", "session", ".", "request", "(", "method", ",", "urljoin", "(", "self", ".", "base_url", ",", "endpoint", ")", ",", "*", "*", "kwargs", ")" ]
Proxy to requests.request :param method: str formatted http method :param endpoint: service endpoint :param kwargs: kwargs passed directly to requests.request :return:
[ "Proxy", "to", "requests", ".", "request", ":", "param", "method", ":", "str", "formatted", "http", "method", ":", "param", "endpoint", ":", "service", "endpoint", ":", "param", "kwargs", ":", "kwargs", "passed", "directly", "to", "requests", ".", "request", ":", "return", ":" ]
train
https://github.com/vsudilov/flask-consulate/blob/514f8754e7186f960237ed2836206993d5d3d3b6/flask_consulate/service.py#L82-L95
vsudilov/flask-consulate
flask_consulate/decorators.py
with_retry_connections
def with_retry_connections(max_tries=3, sleep=0.05): """ Decorator that wraps an entire function in a try/except clause. On requests.exceptions.ConnectionError, will re-run the function code until success or max_tries is reached. :param max_tries: maximum number of attempts before giving up :param sleep: time to sleep between tries, or None """ def decorator(f): @functools.wraps(f) def f_retry(*args, **kwargs): tries = 0 while True: try: return f(*args, **kwargs) except (ConnectionError, ConnectTimeout) as e: tries += 1 if tries >= max_tries: raise ConsulConnectionError(e) if sleep: time.sleep(sleep) return f_retry return decorator
python
def with_retry_connections(max_tries=3, sleep=0.05): """ Decorator that wraps an entire function in a try/except clause. On requests.exceptions.ConnectionError, will re-run the function code until success or max_tries is reached. :param max_tries: maximum number of attempts before giving up :param sleep: time to sleep between tries, or None """ def decorator(f): @functools.wraps(f) def f_retry(*args, **kwargs): tries = 0 while True: try: return f(*args, **kwargs) except (ConnectionError, ConnectTimeout) as e: tries += 1 if tries >= max_tries: raise ConsulConnectionError(e) if sleep: time.sleep(sleep) return f_retry return decorator
[ "def", "with_retry_connections", "(", "max_tries", "=", "3", ",", "sleep", "=", "0.05", ")", ":", "def", "decorator", "(", "f", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "f_retry", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "tries", "=", "0", "while", "True", ":", "try", ":", "return", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "(", "ConnectionError", ",", "ConnectTimeout", ")", "as", "e", ":", "tries", "+=", "1", "if", "tries", ">=", "max_tries", ":", "raise", "ConsulConnectionError", "(", "e", ")", "if", "sleep", ":", "time", ".", "sleep", "(", "sleep", ")", "return", "f_retry", "return", "decorator" ]
Decorator that wraps an entire function in a try/except clause. On requests.exceptions.ConnectionError, will re-run the function code until success or max_tries is reached. :param max_tries: maximum number of attempts before giving up :param sleep: time to sleep between tries, or None
[ "Decorator", "that", "wraps", "an", "entire", "function", "in", "a", "try", "/", "except", "clause", ".", "On", "requests", ".", "exceptions", ".", "ConnectionError", "will", "re", "-", "run", "the", "function", "code", "until", "success", "or", "max_tries", "is", "reached", "." ]
train
https://github.com/vsudilov/flask-consulate/blob/514f8754e7186f960237ed2836206993d5d3d3b6/flask_consulate/decorators.py#L11-L34
casebeer/audiogen
audiogen/util.py
crop
def crop(gens, seconds=5, cropper=None): ''' Crop the generator to a finite number of frames Return a generator which outputs the provided generator limited to enough samples to produce seconds seconds of audio (default 5s) at the provided frame rate. ''' if hasattr(gens, "next"): # single generator gens = (gens,) if cropper == None: cropper = lambda gen: itertools.islice(gen, 0, seconds * sampler.FRAME_RATE) cropped = [cropper(gen) for gen in gens] return cropped[0] if len(cropped) == 1 else cropped
python
def crop(gens, seconds=5, cropper=None): ''' Crop the generator to a finite number of frames Return a generator which outputs the provided generator limited to enough samples to produce seconds seconds of audio (default 5s) at the provided frame rate. ''' if hasattr(gens, "next"): # single generator gens = (gens,) if cropper == None: cropper = lambda gen: itertools.islice(gen, 0, seconds * sampler.FRAME_RATE) cropped = [cropper(gen) for gen in gens] return cropped[0] if len(cropped) == 1 else cropped
[ "def", "crop", "(", "gens", ",", "seconds", "=", "5", ",", "cropper", "=", "None", ")", ":", "if", "hasattr", "(", "gens", ",", "\"next\"", ")", ":", "# single generator", "gens", "=", "(", "gens", ",", ")", "if", "cropper", "==", "None", ":", "cropper", "=", "lambda", "gen", ":", "itertools", ".", "islice", "(", "gen", ",", "0", ",", "seconds", "*", "sampler", ".", "FRAME_RATE", ")", "cropped", "=", "[", "cropper", "(", "gen", ")", "for", "gen", "in", "gens", "]", "return", "cropped", "[", "0", "]", "if", "len", "(", "cropped", ")", "==", "1", "else", "cropped" ]
Crop the generator to a finite number of frames Return a generator which outputs the provided generator limited to enough samples to produce seconds seconds of audio (default 5s) at the provided frame rate.
[ "Crop", "the", "generator", "to", "a", "finite", "number", "of", "frames" ]
train
https://github.com/casebeer/audiogen/blob/184dee2ca32c2bb4315a0f18e62288728fcd7881/audiogen/util.py#L15-L31
casebeer/audiogen
audiogen/util.py
crop_at_zero_crossing
def crop_at_zero_crossing(gen, seconds=5, error=0.1): ''' Crop the generator, ending at a zero-crossing Crop the generator to produce approximately seconds seconds (default 5s) of audio at the provided FRAME_RATE, attempting to end the clip at a zero crossing point to avoid clicking. ''' source = iter(gen) buffer_length = int(2 * error * sampler.FRAME_RATE) # split the source into two iterators: # - start, which contains the bulk of the sound clip # - and end, which contains the final 100ms, plus 100ms past # the desired clip length. We may cut the clip anywhere # within this +/-100ms end buffer. start = itertools.islice(source, 0, int((seconds - error) * sampler.FRAME_RATE)) end = itertools.islice(source, 0, buffer_length) for sample in start: yield sample # pull end buffer generator into memory so we can work with it end = list(end) # find min by sorting buffer samples, first by abs of sample, then by distance from optimal best = sorted(enumerate(end), key=lambda x: (math.fabs(x[1]),abs((buffer_length/2)-x[0]))) print best[:10] print best[0][0] # todo: better logic when we don't have a perfect zero crossing #if best[0][1] != 0: # # we don't have a perfect zero crossing, so let's look for best fit? # pass # crop samples at index of best zero crossing for sample in end[:best[0][0] + 1]: yield sample
python
def crop_at_zero_crossing(gen, seconds=5, error=0.1): ''' Crop the generator, ending at a zero-crossing Crop the generator to produce approximately seconds seconds (default 5s) of audio at the provided FRAME_RATE, attempting to end the clip at a zero crossing point to avoid clicking. ''' source = iter(gen) buffer_length = int(2 * error * sampler.FRAME_RATE) # split the source into two iterators: # - start, which contains the bulk of the sound clip # - and end, which contains the final 100ms, plus 100ms past # the desired clip length. We may cut the clip anywhere # within this +/-100ms end buffer. start = itertools.islice(source, 0, int((seconds - error) * sampler.FRAME_RATE)) end = itertools.islice(source, 0, buffer_length) for sample in start: yield sample # pull end buffer generator into memory so we can work with it end = list(end) # find min by sorting buffer samples, first by abs of sample, then by distance from optimal best = sorted(enumerate(end), key=lambda x: (math.fabs(x[1]),abs((buffer_length/2)-x[0]))) print best[:10] print best[0][0] # todo: better logic when we don't have a perfect zero crossing #if best[0][1] != 0: # # we don't have a perfect zero crossing, so let's look for best fit? # pass # crop samples at index of best zero crossing for sample in end[:best[0][0] + 1]: yield sample
[ "def", "crop_at_zero_crossing", "(", "gen", ",", "seconds", "=", "5", ",", "error", "=", "0.1", ")", ":", "source", "=", "iter", "(", "gen", ")", "buffer_length", "=", "int", "(", "2", "*", "error", "*", "sampler", ".", "FRAME_RATE", ")", "# split the source into two iterators:", "# - start, which contains the bulk of the sound clip", "# - and end, which contains the final 100ms, plus 100ms past ", "# the desired clip length. We may cut the clip anywhere ", "# within this +/-100ms end buffer.", "start", "=", "itertools", ".", "islice", "(", "source", ",", "0", ",", "int", "(", "(", "seconds", "-", "error", ")", "*", "sampler", ".", "FRAME_RATE", ")", ")", "end", "=", "itertools", ".", "islice", "(", "source", ",", "0", ",", "buffer_length", ")", "for", "sample", "in", "start", ":", "yield", "sample", "# pull end buffer generator into memory so we can work with it", "end", "=", "list", "(", "end", ")", "# find min by sorting buffer samples, first by abs of sample, then by distance from optimal", "best", "=", "sorted", "(", "enumerate", "(", "end", ")", ",", "key", "=", "lambda", "x", ":", "(", "math", ".", "fabs", "(", "x", "[", "1", "]", ")", ",", "abs", "(", "(", "buffer_length", "/", "2", ")", "-", "x", "[", "0", "]", ")", ")", ")", "print", "best", "[", ":", "10", "]", "print", "best", "[", "0", "]", "[", "0", "]", "# todo: better logic when we don't have a perfect zero crossing", "#if best[0][1] != 0:", "#\t# we don't have a perfect zero crossing, so let's look for best fit?", "#\tpass", "# crop samples at index of best zero crossing", "for", "sample", "in", "end", "[", ":", "best", "[", "0", "]", "[", "0", "]", "+", "1", "]", ":", "yield", "sample" ]
Crop the generator, ending at a zero-crossing Crop the generator to produce approximately seconds seconds (default 5s) of audio at the provided FRAME_RATE, attempting to end the clip at a zero crossing point to avoid clicking.
[ "Crop", "the", "generator", "ending", "at", "a", "zero", "-", "crossing" ]
train
https://github.com/casebeer/audiogen/blob/184dee2ca32c2bb4315a0f18e62288728fcd7881/audiogen/util.py#L86-L123
casebeer/audiogen
audiogen/util.py
volume
def volume(gen, dB=0): '''Change the volume of gen by dB decibles''' if not hasattr(dB, 'next'): # not a generator scale = 10 ** (dB / 20.) else: def scale_gen(): while True: yield 10 ** (next(dB) / 20.) scale = scale_gen() return envelope(gen, scale)
python
def volume(gen, dB=0): '''Change the volume of gen by dB decibles''' if not hasattr(dB, 'next'): # not a generator scale = 10 ** (dB / 20.) else: def scale_gen(): while True: yield 10 ** (next(dB) / 20.) scale = scale_gen() return envelope(gen, scale)
[ "def", "volume", "(", "gen", ",", "dB", "=", "0", ")", ":", "if", "not", "hasattr", "(", "dB", ",", "'next'", ")", ":", "# not a generator", "scale", "=", "10", "**", "(", "dB", "/", "20.", ")", "else", ":", "def", "scale_gen", "(", ")", ":", "while", "True", ":", "yield", "10", "**", "(", "next", "(", "dB", ")", "/", "20.", ")", "scale", "=", "scale_gen", "(", ")", "return", "envelope", "(", "gen", ",", "scale", ")" ]
Change the volume of gen by dB decibles
[ "Change", "the", "volume", "of", "gen", "by", "dB", "decibles" ]
train
https://github.com/casebeer/audiogen/blob/184dee2ca32c2bb4315a0f18e62288728fcd7881/audiogen/util.py#L161-L171
casebeer/audiogen
audiogen/util.py
mixer
def mixer(inputs, mix=None): ''' Mix `inputs` together based on `mix` tuple `inputs` should be a tuple of *n* generators. `mix` should be a tuple of *m* tuples, one per desired output channel. Each of the *m* tuples should contain *n* generators, corresponding to the time-sequence of the desired mix levels for each of the *n* input channels. That is, to make an ouput channel contain a 50/50 mix of the two input channels, the tuple would be: (constant(0.5), constant(0.5)) The mix generators need not be constant, allowing for time-varying mix levels: # 50% from input 1, pulse input 2 over a two second cycle (constant(0.5), tone(0.5)) The mixer will return a list of *m* generators, each containing the data from the inputs mixed as specified. If no `mix` tuple is specified, all of the *n* input channels will be mixed together into one generator, with the volume of each reduced *n*-fold. Example: # three in, two out; # 10Hz binaural beat with white noise across both channels mixer( (white_noise(), tone(440), tone(450)), ( (constant(.5), constant(1), constant(0)), (constant(.5), constant(0), constant(1)), ) ) ''' if mix == None: # by default, mix all inputs down to one channel mix = ([constant(1.0 / len(inputs))] * len(inputs),) duped_inputs = zip(*[itertools.tee(i, len(mix)) for i in inputs]) # second zip is backwards return [\ sum(*[multiply(m,i) for m,i in zip(channel_mix, channel_inputs)])\ for channel_mix, channel_inputs in zip(mix, duped_inputs) \ ]
python
def mixer(inputs, mix=None): ''' Mix `inputs` together based on `mix` tuple `inputs` should be a tuple of *n* generators. `mix` should be a tuple of *m* tuples, one per desired output channel. Each of the *m* tuples should contain *n* generators, corresponding to the time-sequence of the desired mix levels for each of the *n* input channels. That is, to make an ouput channel contain a 50/50 mix of the two input channels, the tuple would be: (constant(0.5), constant(0.5)) The mix generators need not be constant, allowing for time-varying mix levels: # 50% from input 1, pulse input 2 over a two second cycle (constant(0.5), tone(0.5)) The mixer will return a list of *m* generators, each containing the data from the inputs mixed as specified. If no `mix` tuple is specified, all of the *n* input channels will be mixed together into one generator, with the volume of each reduced *n*-fold. Example: # three in, two out; # 10Hz binaural beat with white noise across both channels mixer( (white_noise(), tone(440), tone(450)), ( (constant(.5), constant(1), constant(0)), (constant(.5), constant(0), constant(1)), ) ) ''' if mix == None: # by default, mix all inputs down to one channel mix = ([constant(1.0 / len(inputs))] * len(inputs),) duped_inputs = zip(*[itertools.tee(i, len(mix)) for i in inputs]) # second zip is backwards return [\ sum(*[multiply(m,i) for m,i in zip(channel_mix, channel_inputs)])\ for channel_mix, channel_inputs in zip(mix, duped_inputs) \ ]
[ "def", "mixer", "(", "inputs", ",", "mix", "=", "None", ")", ":", "if", "mix", "==", "None", ":", "# by default, mix all inputs down to one channel", "mix", "=", "(", "[", "constant", "(", "1.0", "/", "len", "(", "inputs", ")", ")", "]", "*", "len", "(", "inputs", ")", ",", ")", "duped_inputs", "=", "zip", "(", "*", "[", "itertools", ".", "tee", "(", "i", ",", "len", "(", "mix", ")", ")", "for", "i", "in", "inputs", "]", ")", "# second zip is backwards", "return", "[", "sum", "(", "*", "[", "multiply", "(", "m", ",", "i", ")", "for", "m", ",", "i", "in", "zip", "(", "channel_mix", ",", "channel_inputs", ")", "]", ")", "for", "channel_mix", ",", "channel_inputs", "in", "zip", "(", "mix", ",", "duped_inputs", ")", "]" ]
Mix `inputs` together based on `mix` tuple `inputs` should be a tuple of *n* generators. `mix` should be a tuple of *m* tuples, one per desired output channel. Each of the *m* tuples should contain *n* generators, corresponding to the time-sequence of the desired mix levels for each of the *n* input channels. That is, to make an ouput channel contain a 50/50 mix of the two input channels, the tuple would be: (constant(0.5), constant(0.5)) The mix generators need not be constant, allowing for time-varying mix levels: # 50% from input 1, pulse input 2 over a two second cycle (constant(0.5), tone(0.5)) The mixer will return a list of *m* generators, each containing the data from the inputs mixed as specified. If no `mix` tuple is specified, all of the *n* input channels will be mixed together into one generator, with the volume of each reduced *n*-fold. Example: # three in, two out; # 10Hz binaural beat with white noise across both channels mixer( (white_noise(), tone(440), tone(450)), ( (constant(.5), constant(1), constant(0)), (constant(.5), constant(0), constant(1)), ) )
[ "Mix", "inputs", "together", "based", "on", "mix", "tuple" ]
train
https://github.com/casebeer/audiogen/blob/184dee2ca32c2bb4315a0f18e62288728fcd7881/audiogen/util.py#L199-L250
casebeer/audiogen
audiogen/util.py
channelize
def channelize(gen, channels): ''' Break multi-channel generator into one sub-generator per channel Takes a generator producing n-tuples of samples and returns n generators, each producing samples for a single channel. Since multi-channel generators are the only reasonable way to synchronize samples across channels, and the sampler functions only take tuples of generators, you must use this function to process synchronized streams for output. ''' def pick(g, channel): for samples in g: yield samples[channel] return [pick(gen_copy, channel) for channel, gen_copy in enumerate(itertools.tee(gen, channels))]
python
def channelize(gen, channels): ''' Break multi-channel generator into one sub-generator per channel Takes a generator producing n-tuples of samples and returns n generators, each producing samples for a single channel. Since multi-channel generators are the only reasonable way to synchronize samples across channels, and the sampler functions only take tuples of generators, you must use this function to process synchronized streams for output. ''' def pick(g, channel): for samples in g: yield samples[channel] return [pick(gen_copy, channel) for channel, gen_copy in enumerate(itertools.tee(gen, channels))]
[ "def", "channelize", "(", "gen", ",", "channels", ")", ":", "def", "pick", "(", "g", ",", "channel", ")", ":", "for", "samples", "in", "g", ":", "yield", "samples", "[", "channel", "]", "return", "[", "pick", "(", "gen_copy", ",", "channel", ")", "for", "channel", ",", "gen_copy", "in", "enumerate", "(", "itertools", ".", "tee", "(", "gen", ",", "channels", ")", ")", "]" ]
Break multi-channel generator into one sub-generator per channel Takes a generator producing n-tuples of samples and returns n generators, each producing samples for a single channel. Since multi-channel generators are the only reasonable way to synchronize samples across channels, and the sampler functions only take tuples of generators, you must use this function to process synchronized streams for output.
[ "Break", "multi", "-", "channel", "generator", "into", "one", "sub", "-", "generator", "per", "channel" ]
train
https://github.com/casebeer/audiogen/blob/184dee2ca32c2bb4315a0f18e62288728fcd7881/audiogen/util.py#L252-L266
casebeer/audiogen
audiogen/sampler.py
file_is_seekable
def file_is_seekable(f): ''' Returns True if file `f` is seekable, and False if not Useful to determine, for example, if `f` is STDOUT to a pipe. ''' try: f.tell() logger.info("File is seekable!") except IOError, e: if e.errno == errno.ESPIPE: return False else: raise return True
python
def file_is_seekable(f): ''' Returns True if file `f` is seekable, and False if not Useful to determine, for example, if `f` is STDOUT to a pipe. ''' try: f.tell() logger.info("File is seekable!") except IOError, e: if e.errno == errno.ESPIPE: return False else: raise return True
[ "def", "file_is_seekable", "(", "f", ")", ":", "try", ":", "f", ".", "tell", "(", ")", "logger", ".", "info", "(", "\"File is seekable!\"", ")", "except", "IOError", ",", "e", ":", "if", "e", ".", "errno", "==", "errno", ".", "ESPIPE", ":", "return", "False", "else", ":", "raise", "return", "True" ]
Returns True if file `f` is seekable, and False if not Useful to determine, for example, if `f` is STDOUT to a pipe.
[ "Returns", "True", "if", "file", "f", "is", "seekable", "and", "False", "if", "not", "Useful", "to", "determine", "for", "example", "if", "f", "is", "STDOUT", "to", "a", "pipe", "." ]
train
https://github.com/casebeer/audiogen/blob/184dee2ca32c2bb4315a0f18e62288728fcd7881/audiogen/sampler.py#L54-L69
casebeer/audiogen
audiogen/sampler.py
sample
def sample(generator, min=-1, max=1, width=SAMPLE_WIDTH): '''Convert audio waveform generator into packed sample generator.''' # select signed char, short, or in based on sample width fmt = { 1: '<B', 2: '<h', 4: '<i' }[width] return (struct.pack(fmt, int(sample)) for sample in \ normalize(hard_clip(generator, min, max),\ min, max, -2**(width * 8 - 1), 2**(width * 8 - 1) - 1))
python
def sample(generator, min=-1, max=1, width=SAMPLE_WIDTH): '''Convert audio waveform generator into packed sample generator.''' # select signed char, short, or in based on sample width fmt = { 1: '<B', 2: '<h', 4: '<i' }[width] return (struct.pack(fmt, int(sample)) for sample in \ normalize(hard_clip(generator, min, max),\ min, max, -2**(width * 8 - 1), 2**(width * 8 - 1) - 1))
[ "def", "sample", "(", "generator", ",", "min", "=", "-", "1", ",", "max", "=", "1", ",", "width", "=", "SAMPLE_WIDTH", ")", ":", "# select signed char, short, or in based on sample width", "fmt", "=", "{", "1", ":", "'<B'", ",", "2", ":", "'<h'", ",", "4", ":", "'<i'", "}", "[", "width", "]", "return", "(", "struct", ".", "pack", "(", "fmt", ",", "int", "(", "sample", ")", ")", "for", "sample", "in", "normalize", "(", "hard_clip", "(", "generator", ",", "min", ",", "max", ")", ",", "min", ",", "max", ",", "-", "2", "**", "(", "width", "*", "8", "-", "1", ")", ",", "2", "**", "(", "width", "*", "8", "-", "1", ")", "-", "1", ")", ")" ]
Convert audio waveform generator into packed sample generator.
[ "Convert", "audio", "waveform", "generator", "into", "packed", "sample", "generator", "." ]
train
https://github.com/casebeer/audiogen/blob/184dee2ca32c2bb4315a0f18e62288728fcd7881/audiogen/sampler.py#L72-L78
casebeer/audiogen
audiogen/sampler.py
sample_all
def sample_all(generators, *args, **kwargs): '''Convert list of audio waveform generators into list of packed sample generators.''' return [sample(gen, *args, **kwargs) for gen in generators]
python
def sample_all(generators, *args, **kwargs): '''Convert list of audio waveform generators into list of packed sample generators.''' return [sample(gen, *args, **kwargs) for gen in generators]
[ "def", "sample_all", "(", "generators", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "[", "sample", "(", "gen", ",", "*", "args", ",", "*", "*", "kwargs", ")", "for", "gen", "in", "generators", "]" ]
Convert list of audio waveform generators into list of packed sample generators.
[ "Convert", "list", "of", "audio", "waveform", "generators", "into", "list", "of", "packed", "sample", "generators", "." ]
train
https://github.com/casebeer/audiogen/blob/184dee2ca32c2bb4315a0f18e62288728fcd7881/audiogen/sampler.py#L82-L84
casebeer/audiogen
audiogen/sampler.py
buffer
def buffer(stream, buffer_size=BUFFER_SIZE): ''' Buffer the generator into byte strings of buffer_size samples Return a generator that outputs reasonably sized byte strings containing buffer_size samples from the generator stream. This allows us to outputing big chunks of the audio stream to disk at once for faster writes. ''' i = iter(stream) return iter(lambda: "".join(itertools.islice(i, buffer_size)), "")
python
def buffer(stream, buffer_size=BUFFER_SIZE): ''' Buffer the generator into byte strings of buffer_size samples Return a generator that outputs reasonably sized byte strings containing buffer_size samples from the generator stream. This allows us to outputing big chunks of the audio stream to disk at once for faster writes. ''' i = iter(stream) return iter(lambda: "".join(itertools.islice(i, buffer_size)), "")
[ "def", "buffer", "(", "stream", ",", "buffer_size", "=", "BUFFER_SIZE", ")", ":", "i", "=", "iter", "(", "stream", ")", "return", "iter", "(", "lambda", ":", "\"\"", ".", "join", "(", "itertools", ".", "islice", "(", "i", ",", "buffer_size", ")", ")", ",", "\"\"", ")" ]
Buffer the generator into byte strings of buffer_size samples Return a generator that outputs reasonably sized byte strings containing buffer_size samples from the generator stream. This allows us to outputing big chunks of the audio stream to disk at once for faster writes.
[ "Buffer", "the", "generator", "into", "byte", "strings", "of", "buffer_size", "samples" ]
train
https://github.com/casebeer/audiogen/blob/184dee2ca32c2bb4315a0f18e62288728fcd7881/audiogen/sampler.py#L97-L108
casebeer/audiogen
audiogen/sampler.py
wave_module_patched
def wave_module_patched(): '''True if wave module can write data size of 0xFFFFFFFF, False otherwise.''' f = StringIO() w = wave.open(f, "wb") w.setparams((1, 2, 44100, 0, "NONE", "no compression")) patched = True try: w.setnframes((0xFFFFFFFF - 36) / w.getnchannels() / w.getsampwidth()) w._ensure_header_written(0) except struct.error: patched = False logger.info("Error setting wave data size to 0xFFFFFFFF; wave module unpatched, setting sata size to 0x7FFFFFFF") w.setnframes((0x7FFFFFFF - 36) / w.getnchannels() / w.getsampwidth()) w._ensure_header_written(0) return patched
python
def wave_module_patched(): '''True if wave module can write data size of 0xFFFFFFFF, False otherwise.''' f = StringIO() w = wave.open(f, "wb") w.setparams((1, 2, 44100, 0, "NONE", "no compression")) patched = True try: w.setnframes((0xFFFFFFFF - 36) / w.getnchannels() / w.getsampwidth()) w._ensure_header_written(0) except struct.error: patched = False logger.info("Error setting wave data size to 0xFFFFFFFF; wave module unpatched, setting sata size to 0x7FFFFFFF") w.setnframes((0x7FFFFFFF - 36) / w.getnchannels() / w.getsampwidth()) w._ensure_header_written(0) return patched
[ "def", "wave_module_patched", "(", ")", ":", "f", "=", "StringIO", "(", ")", "w", "=", "wave", ".", "open", "(", "f", ",", "\"wb\"", ")", "w", ".", "setparams", "(", "(", "1", ",", "2", ",", "44100", ",", "0", ",", "\"NONE\"", ",", "\"no compression\"", ")", ")", "patched", "=", "True", "try", ":", "w", ".", "setnframes", "(", "(", "0xFFFFFFFF", "-", "36", ")", "/", "w", ".", "getnchannels", "(", ")", "/", "w", ".", "getsampwidth", "(", ")", ")", "w", ".", "_ensure_header_written", "(", "0", ")", "except", "struct", ".", "error", ":", "patched", "=", "False", "logger", ".", "info", "(", "\"Error setting wave data size to 0xFFFFFFFF; wave module unpatched, setting sata size to 0x7FFFFFFF\"", ")", "w", ".", "setnframes", "(", "(", "0x7FFFFFFF", "-", "36", ")", "/", "w", ".", "getnchannels", "(", ")", "/", "w", ".", "getsampwidth", "(", ")", ")", "w", ".", "_ensure_header_written", "(", "0", ")", "return", "patched" ]
True if wave module can write data size of 0xFFFFFFFF, False otherwise.
[ "True", "if", "wave", "module", "can", "write", "data", "size", "of", "0xFFFFFFFF", "False", "otherwise", "." ]
train
https://github.com/casebeer/audiogen/blob/184dee2ca32c2bb4315a0f18e62288728fcd7881/audiogen/sampler.py#L134-L148
casebeer/audiogen
audiogen/sampler.py
cache_finite_samples
def cache_finite_samples(f): '''Decorator to cache audio samples produced by the wrapped generator.''' cache = {} def wrap(*args): key = FRAME_RATE, args if key not in cache: cache[key] = [sample for sample in f(*args)] return (sample for sample in cache[key]) return wrap
python
def cache_finite_samples(f): '''Decorator to cache audio samples produced by the wrapped generator.''' cache = {} def wrap(*args): key = FRAME_RATE, args if key not in cache: cache[key] = [sample for sample in f(*args)] return (sample for sample in cache[key]) return wrap
[ "def", "cache_finite_samples", "(", "f", ")", ":", "cache", "=", "{", "}", "def", "wrap", "(", "*", "args", ")", ":", "key", "=", "FRAME_RATE", ",", "args", "if", "key", "not", "in", "cache", ":", "cache", "[", "key", "]", "=", "[", "sample", "for", "sample", "in", "f", "(", "*", "args", ")", "]", "return", "(", "sample", "for", "sample", "in", "cache", "[", "key", "]", ")", "return", "wrap" ]
Decorator to cache audio samples produced by the wrapped generator.
[ "Decorator", "to", "cache", "audio", "samples", "produced", "by", "the", "wrapped", "generator", "." ]
train
https://github.com/casebeer/audiogen/blob/184dee2ca32c2bb4315a0f18e62288728fcd7881/audiogen/sampler.py#L189-L197
casebeer/audiogen
audiogen/sampler.py
play
def play(channels, blocking=True, raw_samples=False): ''' Play the contents of the generator using PyAudio Play to the system soundcard using PyAudio. PyAudio, an otherwise optional depenency, must be installed for this feature to work. ''' if not pyaudio_loaded: raise Exception("Soundcard playback requires PyAudio. Install with `pip install pyaudio`.") channel_count = 1 if hasattr(channels, "next") else len(channels) wavgen = wav_samples(channels, raw_samples=raw_samples) p = pyaudio.PyAudio() stream = p.open( format=p.get_format_from_width(SAMPLE_WIDTH), channels=channel_count, rate=FRAME_RATE, output=True, stream_callback=_pyaudio_callback(wavgen) if not blocking else None ) if blocking: try: for chunk in buffer(wavgen, 1024): stream.write(chunk) except Exception: raise finally: if not stream.is_stopped(): stream.stop_stream() try: stream.close() except Exception: pass else: return stream
python
def play(channels, blocking=True, raw_samples=False): ''' Play the contents of the generator using PyAudio Play to the system soundcard using PyAudio. PyAudio, an otherwise optional depenency, must be installed for this feature to work. ''' if not pyaudio_loaded: raise Exception("Soundcard playback requires PyAudio. Install with `pip install pyaudio`.") channel_count = 1 if hasattr(channels, "next") else len(channels) wavgen = wav_samples(channels, raw_samples=raw_samples) p = pyaudio.PyAudio() stream = p.open( format=p.get_format_from_width(SAMPLE_WIDTH), channels=channel_count, rate=FRAME_RATE, output=True, stream_callback=_pyaudio_callback(wavgen) if not blocking else None ) if blocking: try: for chunk in buffer(wavgen, 1024): stream.write(chunk) except Exception: raise finally: if not stream.is_stopped(): stream.stop_stream() try: stream.close() except Exception: pass else: return stream
[ "def", "play", "(", "channels", ",", "blocking", "=", "True", ",", "raw_samples", "=", "False", ")", ":", "if", "not", "pyaudio_loaded", ":", "raise", "Exception", "(", "\"Soundcard playback requires PyAudio. Install with `pip install pyaudio`.\"", ")", "channel_count", "=", "1", "if", "hasattr", "(", "channels", ",", "\"next\"", ")", "else", "len", "(", "channels", ")", "wavgen", "=", "wav_samples", "(", "channels", ",", "raw_samples", "=", "raw_samples", ")", "p", "=", "pyaudio", ".", "PyAudio", "(", ")", "stream", "=", "p", ".", "open", "(", "format", "=", "p", ".", "get_format_from_width", "(", "SAMPLE_WIDTH", ")", ",", "channels", "=", "channel_count", ",", "rate", "=", "FRAME_RATE", ",", "output", "=", "True", ",", "stream_callback", "=", "_pyaudio_callback", "(", "wavgen", ")", "if", "not", "blocking", "else", "None", ")", "if", "blocking", ":", "try", ":", "for", "chunk", "in", "buffer", "(", "wavgen", ",", "1024", ")", ":", "stream", ".", "write", "(", "chunk", ")", "except", "Exception", ":", "raise", "finally", ":", "if", "not", "stream", ".", "is_stopped", "(", ")", ":", "stream", ".", "stop_stream", "(", ")", "try", ":", "stream", ".", "close", "(", ")", "except", "Exception", ":", "pass", "else", ":", "return", "stream" ]
Play the contents of the generator using PyAudio Play to the system soundcard using PyAudio. PyAudio, an otherwise optional depenency, must be installed for this feature to work.
[ "Play", "the", "contents", "of", "the", "generator", "using", "PyAudio" ]
train
https://github.com/casebeer/audiogen/blob/184dee2ca32c2bb4315a0f18e62288728fcd7881/audiogen/sampler.py#L211-L245
jakevdp/supersmoother
supersmoother/windowed_sum.py
windowed_sum_slow
def windowed_sum_slow(arrays, span, t=None, indices=None, tpowers=0, period=None, subtract_mid=False): """Compute the windowed sum of the given arrays. This is a slow function, used primarily for testing and validation of the faster version of ``windowed_sum()`` Parameters ---------- arrays : tuple of arrays arrays to window span : int or array of ints The span to use for the sum at each point. If array is provided, it must be broadcastable with ``indices`` indices : array the indices of the center of the desired windows. If ``None``, the indices are assumed to be ``range(len(arrays[0]))`` though these are not actually instantiated. t : array (optional) Times associated with the arrays tpowers : list (optional) Powers of t for each array sum period : float (optional) Period to use, if times are periodic. If supplied, input times must be arranged such that (t % period) is sorted! subtract_mid : boolean If true, then subtract the middle value from each sum Returns ------- arrays : tuple of ndarrays arrays containing the windowed sum of each input array """ span = np.asarray(span, dtype=int) if not np.all(span > 0): raise ValueError("span values must be positive") arrays = tuple(map(np.asarray, arrays)) N = arrays[0].size if not all(a.shape == (N,) for a in arrays): raise ValueError("sizes of provided arrays must match") t_input = t if t is not None: t = np.asarray(t) if t.shape != (N,): raise ValueError("shape of t must match shape of arrays") else: t = np.ones(N) tpowers = tpowers + np.zeros(len(arrays)) if len(tpowers) != len(arrays): raise ValueError("tpowers must be broadcastable with number of arrays") if period: if t_input is None: raise ValueError("periodic requires t to be provided") t = t % period if indices is None: indices = np.arange(N) spans, indices = np.broadcast_arrays(span, indices) results = [] for tpower, array in zip(tpowers, arrays): if period: result = [sum(array[j % N] * (t[j % N] + (j // N) * period) ** tpower for j in range(i - s // 2, i - s // 2 + s) if not (subtract_mid and j == i)) for i, s in np.broadcast(indices, spans)] else: result = [sum(array[j] * t[j] ** tpower for j in range(max(0, i - s // 2), min(N, i - s // 2 + s)) if not (subtract_mid and j == i)) for i, s in np.broadcast(indices, spans)] results.append(np.asarray(result)) return tuple(results)
python
def windowed_sum_slow(arrays, span, t=None, indices=None, tpowers=0, period=None, subtract_mid=False): """Compute the windowed sum of the given arrays. This is a slow function, used primarily for testing and validation of the faster version of ``windowed_sum()`` Parameters ---------- arrays : tuple of arrays arrays to window span : int or array of ints The span to use for the sum at each point. If array is provided, it must be broadcastable with ``indices`` indices : array the indices of the center of the desired windows. If ``None``, the indices are assumed to be ``range(len(arrays[0]))`` though these are not actually instantiated. t : array (optional) Times associated with the arrays tpowers : list (optional) Powers of t for each array sum period : float (optional) Period to use, if times are periodic. If supplied, input times must be arranged such that (t % period) is sorted! subtract_mid : boolean If true, then subtract the middle value from each sum Returns ------- arrays : tuple of ndarrays arrays containing the windowed sum of each input array """ span = np.asarray(span, dtype=int) if not np.all(span > 0): raise ValueError("span values must be positive") arrays = tuple(map(np.asarray, arrays)) N = arrays[0].size if not all(a.shape == (N,) for a in arrays): raise ValueError("sizes of provided arrays must match") t_input = t if t is not None: t = np.asarray(t) if t.shape != (N,): raise ValueError("shape of t must match shape of arrays") else: t = np.ones(N) tpowers = tpowers + np.zeros(len(arrays)) if len(tpowers) != len(arrays): raise ValueError("tpowers must be broadcastable with number of arrays") if period: if t_input is None: raise ValueError("periodic requires t to be provided") t = t % period if indices is None: indices = np.arange(N) spans, indices = np.broadcast_arrays(span, indices) results = [] for tpower, array in zip(tpowers, arrays): if period: result = [sum(array[j % N] * (t[j % N] + (j // N) * period) ** tpower for j in range(i - s // 2, i - s // 2 + s) if not (subtract_mid and j == i)) for i, s in np.broadcast(indices, spans)] else: result = [sum(array[j] * t[j] ** tpower for j in range(max(0, i - s // 2), min(N, i - s // 2 + s)) if not (subtract_mid and j == i)) for i, s in np.broadcast(indices, spans)] results.append(np.asarray(result)) return tuple(results)
[ "def", "windowed_sum_slow", "(", "arrays", ",", "span", ",", "t", "=", "None", ",", "indices", "=", "None", ",", "tpowers", "=", "0", ",", "period", "=", "None", ",", "subtract_mid", "=", "False", ")", ":", "span", "=", "np", ".", "asarray", "(", "span", ",", "dtype", "=", "int", ")", "if", "not", "np", ".", "all", "(", "span", ">", "0", ")", ":", "raise", "ValueError", "(", "\"span values must be positive\"", ")", "arrays", "=", "tuple", "(", "map", "(", "np", ".", "asarray", ",", "arrays", ")", ")", "N", "=", "arrays", "[", "0", "]", ".", "size", "if", "not", "all", "(", "a", ".", "shape", "==", "(", "N", ",", ")", "for", "a", "in", "arrays", ")", ":", "raise", "ValueError", "(", "\"sizes of provided arrays must match\"", ")", "t_input", "=", "t", "if", "t", "is", "not", "None", ":", "t", "=", "np", ".", "asarray", "(", "t", ")", "if", "t", ".", "shape", "!=", "(", "N", ",", ")", ":", "raise", "ValueError", "(", "\"shape of t must match shape of arrays\"", ")", "else", ":", "t", "=", "np", ".", "ones", "(", "N", ")", "tpowers", "=", "tpowers", "+", "np", ".", "zeros", "(", "len", "(", "arrays", ")", ")", "if", "len", "(", "tpowers", ")", "!=", "len", "(", "arrays", ")", ":", "raise", "ValueError", "(", "\"tpowers must be broadcastable with number of arrays\"", ")", "if", "period", ":", "if", "t_input", "is", "None", ":", "raise", "ValueError", "(", "\"periodic requires t to be provided\"", ")", "t", "=", "t", "%", "period", "if", "indices", "is", "None", ":", "indices", "=", "np", ".", "arange", "(", "N", ")", "spans", ",", "indices", "=", "np", ".", "broadcast_arrays", "(", "span", ",", "indices", ")", "results", "=", "[", "]", "for", "tpower", ",", "array", "in", "zip", "(", "tpowers", ",", "arrays", ")", ":", "if", "period", ":", "result", "=", "[", "sum", "(", "array", "[", "j", "%", "N", "]", "*", "(", "t", "[", "j", "%", "N", "]", "+", "(", "j", "//", "N", ")", "*", "period", ")", "**", "tpower", "for", "j", "in", "range", "(", "i", "-", "s", "//", "2", ",", "i", "-", "s", "//", "2", "+", "s", ")", "if", "not", "(", "subtract_mid", "and", "j", "==", "i", ")", ")", "for", "i", ",", "s", "in", "np", ".", "broadcast", "(", "indices", ",", "spans", ")", "]", "else", ":", "result", "=", "[", "sum", "(", "array", "[", "j", "]", "*", "t", "[", "j", "]", "**", "tpower", "for", "j", "in", "range", "(", "max", "(", "0", ",", "i", "-", "s", "//", "2", ")", ",", "min", "(", "N", ",", "i", "-", "s", "//", "2", "+", "s", ")", ")", "if", "not", "(", "subtract_mid", "and", "j", "==", "i", ")", ")", "for", "i", ",", "s", "in", "np", ".", "broadcast", "(", "indices", ",", "spans", ")", "]", "results", ".", "append", "(", "np", ".", "asarray", "(", "result", ")", ")", "return", "tuple", "(", "results", ")" ]
Compute the windowed sum of the given arrays. This is a slow function, used primarily for testing and validation of the faster version of ``windowed_sum()`` Parameters ---------- arrays : tuple of arrays arrays to window span : int or array of ints The span to use for the sum at each point. If array is provided, it must be broadcastable with ``indices`` indices : array the indices of the center of the desired windows. If ``None``, the indices are assumed to be ``range(len(arrays[0]))`` though these are not actually instantiated. t : array (optional) Times associated with the arrays tpowers : list (optional) Powers of t for each array sum period : float (optional) Period to use, if times are periodic. If supplied, input times must be arranged such that (t % period) is sorted! subtract_mid : boolean If true, then subtract the middle value from each sum Returns ------- arrays : tuple of ndarrays arrays containing the windowed sum of each input array
[ "Compute", "the", "windowed", "sum", "of", "the", "given", "arrays", "." ]
train
https://github.com/jakevdp/supersmoother/blob/0c96cf13dcd6f9006d3c0421f9cd6e18abe27a2f/supersmoother/windowed_sum.py#L4-L84
jakevdp/supersmoother
supersmoother/windowed_sum.py
windowed_sum
def windowed_sum(arrays, span, t=None, indices=None, tpowers=0, period=None, subtract_mid=False): """Compute the windowed sum of the given arrays. Parameters ---------- arrays : tuple of arrays arrays to window span : int or array of ints The span to use for the sum at each point. If array is provided, it must be broadcastable with ``indices`` indices : array the indices of the center of the desired windows. If ``None``, the indices are assumed to be ``range(len(arrays[0]))`` though these are not actually instantiated. t : array (optional) Times associated with the arrays tpowers : list (optional) Powers of t for each array sum period : float (optional) Period to use, if times are periodic. If supplied, input times must be arranged such that (t % period) is sorted! subtract_mid : boolean If true, then subtract the middle value from each sum Returns ------- arrays : tuple of ndarrays arrays containing the windowed sum of each input array """ span = np.asarray(span, dtype=int) if not np.all(span > 0): raise ValueError("span values must be positive") arrays = tuple(map(np.asarray, arrays)) N = arrays[0].size if not all(a.shape == (N,) for a in arrays): raise ValueError("sizes of provided arrays must match") t_input = t if t is not None: t = np.asarray(t) if t.shape != (N,): raise ValueError("shape of t must match shape of arrays " "t -> {0} arr -> {1}".format(t.shape, arrays[0].shape)) else: # XXX: special-case no t? t = np.ones(N) tpowers = np.asarray(tpowers) + np.zeros(len(arrays)) if indices is not None: span, indices = np.broadcast_arrays(span, indices) # For the periodic case, re-call the function with padded arrays if period: if t_input is None: raise ValueError("periodic requires t to be provided") t = t % period t, arrays, sl = _pad_arrays(t, arrays, indices, span, period) if len(t) > N: # arrays are padded. Recursively call windowed_sum() and return. if span.ndim == 0 and indices is None: # fixed-span/no index case is done faster this way arrs = windowed_sum(arrays, span, t=t, indices=indices, tpowers=tpowers, period=None, subtract_mid=subtract_mid) return tuple([a[sl] for a in arrs]) else: # this works for variable span and general indices if indices is None: indices = np.arange(N) indices = indices + sl.start return windowed_sum(arrays, span, t=t, indices=indices, tpowers=tpowers, period=None, subtract_mid=subtract_mid) else: # No padding needed! We can carry-on as if it's a non-periodic case period = None # The rest of the algorithm now proceeds without reference to the period # just as a sanity check... assert not period if span.ndim == 0: # fixed-span case. Because of the checks & manipulations above # we know here that indices=None assert indices is None window = np.ones(span) def convolve_same(a, window): if len(window) <= len(a): res = np.convolve(a, window, mode='same') else: res = np.convolve(a, window, mode='full') start = (len(window) - 1) // 2 res = res[start:start + len(a)] return res results = [convolve_same(a * t ** tp, window) for a, tp in zip(arrays, tpowers)] indices = slice(None) else: # variable-span case. Use reduceat() in a clever way for speed. if indices is None: indices = np.arange(len(span)) # we checked this above, but just as a sanity check assert it here... assert span.shape == indices.shape mins = np.asarray(indices) - span // 2 results = [] for a, tp in zip(arrays, tpowers): ranges = np.vstack([np.maximum(0, mins), np.minimum(len(a), mins+span)]).ravel('F') results.append(np.add.reduceat(np.append(a * t ** tp, 0), ranges)[::2]) # Subtract the midpoint if required: this is used in cross-validation if subtract_mid: results = [r - a[indices] * t[indices] ** tp for r, a, tp in zip(results, arrays, tpowers)] return tuple(results)
python
def windowed_sum(arrays, span, t=None, indices=None, tpowers=0, period=None, subtract_mid=False): """Compute the windowed sum of the given arrays. Parameters ---------- arrays : tuple of arrays arrays to window span : int or array of ints The span to use for the sum at each point. If array is provided, it must be broadcastable with ``indices`` indices : array the indices of the center of the desired windows. If ``None``, the indices are assumed to be ``range(len(arrays[0]))`` though these are not actually instantiated. t : array (optional) Times associated with the arrays tpowers : list (optional) Powers of t for each array sum period : float (optional) Period to use, if times are periodic. If supplied, input times must be arranged such that (t % period) is sorted! subtract_mid : boolean If true, then subtract the middle value from each sum Returns ------- arrays : tuple of ndarrays arrays containing the windowed sum of each input array """ span = np.asarray(span, dtype=int) if not np.all(span > 0): raise ValueError("span values must be positive") arrays = tuple(map(np.asarray, arrays)) N = arrays[0].size if not all(a.shape == (N,) for a in arrays): raise ValueError("sizes of provided arrays must match") t_input = t if t is not None: t = np.asarray(t) if t.shape != (N,): raise ValueError("shape of t must match shape of arrays " "t -> {0} arr -> {1}".format(t.shape, arrays[0].shape)) else: # XXX: special-case no t? t = np.ones(N) tpowers = np.asarray(tpowers) + np.zeros(len(arrays)) if indices is not None: span, indices = np.broadcast_arrays(span, indices) # For the periodic case, re-call the function with padded arrays if period: if t_input is None: raise ValueError("periodic requires t to be provided") t = t % period t, arrays, sl = _pad_arrays(t, arrays, indices, span, period) if len(t) > N: # arrays are padded. Recursively call windowed_sum() and return. if span.ndim == 0 and indices is None: # fixed-span/no index case is done faster this way arrs = windowed_sum(arrays, span, t=t, indices=indices, tpowers=tpowers, period=None, subtract_mid=subtract_mid) return tuple([a[sl] for a in arrs]) else: # this works for variable span and general indices if indices is None: indices = np.arange(N) indices = indices + sl.start return windowed_sum(arrays, span, t=t, indices=indices, tpowers=tpowers, period=None, subtract_mid=subtract_mid) else: # No padding needed! We can carry-on as if it's a non-periodic case period = None # The rest of the algorithm now proceeds without reference to the period # just as a sanity check... assert not period if span.ndim == 0: # fixed-span case. Because of the checks & manipulations above # we know here that indices=None assert indices is None window = np.ones(span) def convolve_same(a, window): if len(window) <= len(a): res = np.convolve(a, window, mode='same') else: res = np.convolve(a, window, mode='full') start = (len(window) - 1) // 2 res = res[start:start + len(a)] return res results = [convolve_same(a * t ** tp, window) for a, tp in zip(arrays, tpowers)] indices = slice(None) else: # variable-span case. Use reduceat() in a clever way for speed. if indices is None: indices = np.arange(len(span)) # we checked this above, but just as a sanity check assert it here... assert span.shape == indices.shape mins = np.asarray(indices) - span // 2 results = [] for a, tp in zip(arrays, tpowers): ranges = np.vstack([np.maximum(0, mins), np.minimum(len(a), mins+span)]).ravel('F') results.append(np.add.reduceat(np.append(a * t ** tp, 0), ranges)[::2]) # Subtract the midpoint if required: this is used in cross-validation if subtract_mid: results = [r - a[indices] * t[indices] ** tp for r, a, tp in zip(results, arrays, tpowers)] return tuple(results)
[ "def", "windowed_sum", "(", "arrays", ",", "span", ",", "t", "=", "None", ",", "indices", "=", "None", ",", "tpowers", "=", "0", ",", "period", "=", "None", ",", "subtract_mid", "=", "False", ")", ":", "span", "=", "np", ".", "asarray", "(", "span", ",", "dtype", "=", "int", ")", "if", "not", "np", ".", "all", "(", "span", ">", "0", ")", ":", "raise", "ValueError", "(", "\"span values must be positive\"", ")", "arrays", "=", "tuple", "(", "map", "(", "np", ".", "asarray", ",", "arrays", ")", ")", "N", "=", "arrays", "[", "0", "]", ".", "size", "if", "not", "all", "(", "a", ".", "shape", "==", "(", "N", ",", ")", "for", "a", "in", "arrays", ")", ":", "raise", "ValueError", "(", "\"sizes of provided arrays must match\"", ")", "t_input", "=", "t", "if", "t", "is", "not", "None", ":", "t", "=", "np", ".", "asarray", "(", "t", ")", "if", "t", ".", "shape", "!=", "(", "N", ",", ")", ":", "raise", "ValueError", "(", "\"shape of t must match shape of arrays \"", "\"t -> {0} arr -> {1}\"", ".", "format", "(", "t", ".", "shape", ",", "arrays", "[", "0", "]", ".", "shape", ")", ")", "else", ":", "# XXX: special-case no t?", "t", "=", "np", ".", "ones", "(", "N", ")", "tpowers", "=", "np", ".", "asarray", "(", "tpowers", ")", "+", "np", ".", "zeros", "(", "len", "(", "arrays", ")", ")", "if", "indices", "is", "not", "None", ":", "span", ",", "indices", "=", "np", ".", "broadcast_arrays", "(", "span", ",", "indices", ")", "# For the periodic case, re-call the function with padded arrays", "if", "period", ":", "if", "t_input", "is", "None", ":", "raise", "ValueError", "(", "\"periodic requires t to be provided\"", ")", "t", "=", "t", "%", "period", "t", ",", "arrays", ",", "sl", "=", "_pad_arrays", "(", "t", ",", "arrays", ",", "indices", ",", "span", ",", "period", ")", "if", "len", "(", "t", ")", ">", "N", ":", "# arrays are padded. Recursively call windowed_sum() and return.", "if", "span", ".", "ndim", "==", "0", "and", "indices", "is", "None", ":", "# fixed-span/no index case is done faster this way", "arrs", "=", "windowed_sum", "(", "arrays", ",", "span", ",", "t", "=", "t", ",", "indices", "=", "indices", ",", "tpowers", "=", "tpowers", ",", "period", "=", "None", ",", "subtract_mid", "=", "subtract_mid", ")", "return", "tuple", "(", "[", "a", "[", "sl", "]", "for", "a", "in", "arrs", "]", ")", "else", ":", "# this works for variable span and general indices", "if", "indices", "is", "None", ":", "indices", "=", "np", ".", "arange", "(", "N", ")", "indices", "=", "indices", "+", "sl", ".", "start", "return", "windowed_sum", "(", "arrays", ",", "span", ",", "t", "=", "t", ",", "indices", "=", "indices", ",", "tpowers", "=", "tpowers", ",", "period", "=", "None", ",", "subtract_mid", "=", "subtract_mid", ")", "else", ":", "# No padding needed! We can carry-on as if it's a non-periodic case", "period", "=", "None", "# The rest of the algorithm now proceeds without reference to the period", "# just as a sanity check...", "assert", "not", "period", "if", "span", ".", "ndim", "==", "0", ":", "# fixed-span case. Because of the checks & manipulations above", "# we know here that indices=None", "assert", "indices", "is", "None", "window", "=", "np", ".", "ones", "(", "span", ")", "def", "convolve_same", "(", "a", ",", "window", ")", ":", "if", "len", "(", "window", ")", "<=", "len", "(", "a", ")", ":", "res", "=", "np", ".", "convolve", "(", "a", ",", "window", ",", "mode", "=", "'same'", ")", "else", ":", "res", "=", "np", ".", "convolve", "(", "a", ",", "window", ",", "mode", "=", "'full'", ")", "start", "=", "(", "len", "(", "window", ")", "-", "1", ")", "//", "2", "res", "=", "res", "[", "start", ":", "start", "+", "len", "(", "a", ")", "]", "return", "res", "results", "=", "[", "convolve_same", "(", "a", "*", "t", "**", "tp", ",", "window", ")", "for", "a", ",", "tp", "in", "zip", "(", "arrays", ",", "tpowers", ")", "]", "indices", "=", "slice", "(", "None", ")", "else", ":", "# variable-span case. Use reduceat() in a clever way for speed.", "if", "indices", "is", "None", ":", "indices", "=", "np", ".", "arange", "(", "len", "(", "span", ")", ")", "# we checked this above, but just as a sanity check assert it here...", "assert", "span", ".", "shape", "==", "indices", ".", "shape", "mins", "=", "np", ".", "asarray", "(", "indices", ")", "-", "span", "//", "2", "results", "=", "[", "]", "for", "a", ",", "tp", "in", "zip", "(", "arrays", ",", "tpowers", ")", ":", "ranges", "=", "np", ".", "vstack", "(", "[", "np", ".", "maximum", "(", "0", ",", "mins", ")", ",", "np", ".", "minimum", "(", "len", "(", "a", ")", ",", "mins", "+", "span", ")", "]", ")", ".", "ravel", "(", "'F'", ")", "results", ".", "append", "(", "np", ".", "add", ".", "reduceat", "(", "np", ".", "append", "(", "a", "*", "t", "**", "tp", ",", "0", ")", ",", "ranges", ")", "[", ":", ":", "2", "]", ")", "# Subtract the midpoint if required: this is used in cross-validation", "if", "subtract_mid", ":", "results", "=", "[", "r", "-", "a", "[", "indices", "]", "*", "t", "[", "indices", "]", "**", "tp", "for", "r", ",", "a", ",", "tp", "in", "zip", "(", "results", ",", "arrays", ",", "tpowers", ")", "]", "return", "tuple", "(", "results", ")" ]
Compute the windowed sum of the given arrays. Parameters ---------- arrays : tuple of arrays arrays to window span : int or array of ints The span to use for the sum at each point. If array is provided, it must be broadcastable with ``indices`` indices : array the indices of the center of the desired windows. If ``None``, the indices are assumed to be ``range(len(arrays[0]))`` though these are not actually instantiated. t : array (optional) Times associated with the arrays tpowers : list (optional) Powers of t for each array sum period : float (optional) Period to use, if times are periodic. If supplied, input times must be arranged such that (t % period) is sorted! subtract_mid : boolean If true, then subtract the middle value from each sum Returns ------- arrays : tuple of ndarrays arrays containing the windowed sum of each input array
[ "Compute", "the", "windowed", "sum", "of", "the", "given", "arrays", "." ]
train
https://github.com/jakevdp/supersmoother/blob/0c96cf13dcd6f9006d3c0421f9cd6e18abe27a2f/supersmoother/windowed_sum.py#L87-L212
jakevdp/supersmoother
supersmoother/windowed_sum.py
_pad_arrays
def _pad_arrays(t, arrays, indices, span, period): """Internal routine to pad arrays for periodic models.""" N = len(t) if indices is None: indices = np.arange(N) pad_left = max(0, 0 - np.min(indices - span // 2)) pad_right = max(0, np.max(indices + span - span // 2) - (N - 1)) if pad_left + pad_right > 0: Nright, pad_right = divmod(pad_right, N) Nleft, pad_left = divmod(pad_left, N) t = np.concatenate([t[N - pad_left:] - (Nleft + 1) * period] + [t + i * period for i in range(-Nleft, Nright + 1)] + [t[:pad_right] + (Nright + 1) * period]) arrays = [np.concatenate([a[N - pad_left:]] + (Nleft + Nright + 1) * [a] + [a[:pad_right]]) for a in arrays] pad_left = pad_left % N Nright = pad_right / N pad_right = pad_right % N return (t, arrays, slice(pad_left + Nleft * N, pad_left + (Nleft + 1) * N)) else: return (t, arrays, slice(None))
python
def _pad_arrays(t, arrays, indices, span, period): """Internal routine to pad arrays for periodic models.""" N = len(t) if indices is None: indices = np.arange(N) pad_left = max(0, 0 - np.min(indices - span // 2)) pad_right = max(0, np.max(indices + span - span // 2) - (N - 1)) if pad_left + pad_right > 0: Nright, pad_right = divmod(pad_right, N) Nleft, pad_left = divmod(pad_left, N) t = np.concatenate([t[N - pad_left:] - (Nleft + 1) * period] + [t + i * period for i in range(-Nleft, Nright + 1)] + [t[:pad_right] + (Nright + 1) * period]) arrays = [np.concatenate([a[N - pad_left:]] + (Nleft + Nright + 1) * [a] + [a[:pad_right]]) for a in arrays] pad_left = pad_left % N Nright = pad_right / N pad_right = pad_right % N return (t, arrays, slice(pad_left + Nleft * N, pad_left + (Nleft + 1) * N)) else: return (t, arrays, slice(None))
[ "def", "_pad_arrays", "(", "t", ",", "arrays", ",", "indices", ",", "span", ",", "period", ")", ":", "N", "=", "len", "(", "t", ")", "if", "indices", "is", "None", ":", "indices", "=", "np", ".", "arange", "(", "N", ")", "pad_left", "=", "max", "(", "0", ",", "0", "-", "np", ".", "min", "(", "indices", "-", "span", "//", "2", ")", ")", "pad_right", "=", "max", "(", "0", ",", "np", ".", "max", "(", "indices", "+", "span", "-", "span", "//", "2", ")", "-", "(", "N", "-", "1", ")", ")", "if", "pad_left", "+", "pad_right", ">", "0", ":", "Nright", ",", "pad_right", "=", "divmod", "(", "pad_right", ",", "N", ")", "Nleft", ",", "pad_left", "=", "divmod", "(", "pad_left", ",", "N", ")", "t", "=", "np", ".", "concatenate", "(", "[", "t", "[", "N", "-", "pad_left", ":", "]", "-", "(", "Nleft", "+", "1", ")", "*", "period", "]", "+", "[", "t", "+", "i", "*", "period", "for", "i", "in", "range", "(", "-", "Nleft", ",", "Nright", "+", "1", ")", "]", "+", "[", "t", "[", ":", "pad_right", "]", "+", "(", "Nright", "+", "1", ")", "*", "period", "]", ")", "arrays", "=", "[", "np", ".", "concatenate", "(", "[", "a", "[", "N", "-", "pad_left", ":", "]", "]", "+", "(", "Nleft", "+", "Nright", "+", "1", ")", "*", "[", "a", "]", "+", "[", "a", "[", ":", "pad_right", "]", "]", ")", "for", "a", "in", "arrays", "]", "pad_left", "=", "pad_left", "%", "N", "Nright", "=", "pad_right", "/", "N", "pad_right", "=", "pad_right", "%", "N", "return", "(", "t", ",", "arrays", ",", "slice", "(", "pad_left", "+", "Nleft", "*", "N", ",", "pad_left", "+", "(", "Nleft", "+", "1", ")", "*", "N", ")", ")", "else", ":", "return", "(", "t", ",", "arrays", ",", "slice", "(", "None", ")", ")" ]
Internal routine to pad arrays for periodic models.
[ "Internal", "routine", "to", "pad", "arrays", "for", "periodic", "models", "." ]
train
https://github.com/jakevdp/supersmoother/blob/0c96cf13dcd6f9006d3c0421f9cd6e18abe27a2f/supersmoother/windowed_sum.py#L215-L242
voidpp/PCA9685-driver
pca9685_driver/device.py
Device.get_i2c_bus_numbers
def get_i2c_bus_numbers(glober = glob.glob): """Search all the available I2C devices in the system""" res = [] for device in glober("/dev/i2c-*"): r = re.match("/dev/i2c-([\d]){1,2}", device) res.append(int(r.group(1))) return res
python
def get_i2c_bus_numbers(glober = glob.glob): """Search all the available I2C devices in the system""" res = [] for device in glober("/dev/i2c-*"): r = re.match("/dev/i2c-([\d]){1,2}", device) res.append(int(r.group(1))) return res
[ "def", "get_i2c_bus_numbers", "(", "glober", "=", "glob", ".", "glob", ")", ":", "res", "=", "[", "]", "for", "device", "in", "glober", "(", "\"/dev/i2c-*\"", ")", ":", "r", "=", "re", ".", "match", "(", "\"/dev/i2c-([\\d]){1,2}\"", ",", "device", ")", "res", ".", "append", "(", "int", "(", "r", ".", "group", "(", "1", ")", ")", ")", "return", "res" ]
Search all the available I2C devices in the system
[ "Search", "all", "the", "available", "I2C", "devices", "in", "the", "system" ]
train
https://github.com/voidpp/PCA9685-driver/blob/774790028cbced30fd69384f945198148b1793fc/pca9685_driver/device.py#L69-L75
voidpp/PCA9685-driver
pca9685_driver/device.py
Device.get_led_register_from_name
def get_led_register_from_name(self, name): """Parse the name for led number :param name: attribute name, like: led_1 """ res = re.match('^led_([0-9]{1,2})$', name) if res is None: raise AttributeError("Unknown attribute: '%s'" % name) led_num = int(res.group(1)) if led_num < 0 or led_num > 15: raise AttributeError("Unknown attribute: '%s'" % name) return self.calc_led_register(led_num)
python
def get_led_register_from_name(self, name): """Parse the name for led number :param name: attribute name, like: led_1 """ res = re.match('^led_([0-9]{1,2})$', name) if res is None: raise AttributeError("Unknown attribute: '%s'" % name) led_num = int(res.group(1)) if led_num < 0 or led_num > 15: raise AttributeError("Unknown attribute: '%s'" % name) return self.calc_led_register(led_num)
[ "def", "get_led_register_from_name", "(", "self", ",", "name", ")", ":", "res", "=", "re", ".", "match", "(", "'^led_([0-9]{1,2})$'", ",", "name", ")", "if", "res", "is", "None", ":", "raise", "AttributeError", "(", "\"Unknown attribute: '%s'\"", "%", "name", ")", "led_num", "=", "int", "(", "res", ".", "group", "(", "1", ")", ")", "if", "led_num", "<", "0", "or", "led_num", ">", "15", ":", "raise", "AttributeError", "(", "\"Unknown attribute: '%s'\"", "%", "name", ")", "return", "self", ".", "calc_led_register", "(", "led_num", ")" ]
Parse the name for led number :param name: attribute name, like: led_1
[ "Parse", "the", "name", "for", "led", "number" ]
train
https://github.com/voidpp/PCA9685-driver/blob/774790028cbced30fd69384f945198148b1793fc/pca9685_driver/device.py#L87-L98
voidpp/PCA9685-driver
pca9685_driver/device.py
Device.set_pwm
def set_pwm(self, led_num, value): """Set PWM value for the specified LED :param led_num: LED number (0-15) :param value: the 12 bit value (0-4095) """ self.__check_range('led_number', led_num) self.__check_range('led_value', value) register_low = self.calc_led_register(led_num) self.write(register_low, value_low(value)) self.write(register_low + 1, value_high(value))
python
def set_pwm(self, led_num, value): """Set PWM value for the specified LED :param led_num: LED number (0-15) :param value: the 12 bit value (0-4095) """ self.__check_range('led_number', led_num) self.__check_range('led_value', value) register_low = self.calc_led_register(led_num) self.write(register_low, value_low(value)) self.write(register_low + 1, value_high(value))
[ "def", "set_pwm", "(", "self", ",", "led_num", ",", "value", ")", ":", "self", ".", "__check_range", "(", "'led_number'", ",", "led_num", ")", "self", ".", "__check_range", "(", "'led_value'", ",", "value", ")", "register_low", "=", "self", ".", "calc_led_register", "(", "led_num", ")", "self", ".", "write", "(", "register_low", ",", "value_low", "(", "value", ")", ")", "self", ".", "write", "(", "register_low", "+", "1", ",", "value_high", "(", "value", ")", ")" ]
Set PWM value for the specified LED :param led_num: LED number (0-15) :param value: the 12 bit value (0-4095)
[ "Set", "PWM", "value", "for", "the", "specified", "LED" ]
train
https://github.com/voidpp/PCA9685-driver/blob/774790028cbced30fd69384f945198148b1793fc/pca9685_driver/device.py#L115-L127
voidpp/PCA9685-driver
pca9685_driver/device.py
Device.get_pwm
def get_pwm(self, led_num): """Generic getter for all LED PWM value""" self.__check_range('led_number', led_num) register_low = self.calc_led_register(led_num) return self.__get_led_value(register_low)
python
def get_pwm(self, led_num): """Generic getter for all LED PWM value""" self.__check_range('led_number', led_num) register_low = self.calc_led_register(led_num) return self.__get_led_value(register_low)
[ "def", "get_pwm", "(", "self", ",", "led_num", ")", ":", "self", ".", "__check_range", "(", "'led_number'", ",", "led_num", ")", "register_low", "=", "self", ".", "calc_led_register", "(", "led_num", ")", "return", "self", ".", "__get_led_value", "(", "register_low", ")" ]
Generic getter for all LED PWM value
[ "Generic", "getter", "for", "all", "LED", "PWM", "value" ]
train
https://github.com/voidpp/PCA9685-driver/blob/774790028cbced30fd69384f945198148b1793fc/pca9685_driver/device.py#L134-L138
voidpp/PCA9685-driver
pca9685_driver/device.py
Device.sleep
def sleep(self): """Send the controller to sleep""" logger.debug("Sleep the controller") self.write(Registers.MODE_1, self.mode_1 | (1 << Mode1.SLEEP))
python
def sleep(self): """Send the controller to sleep""" logger.debug("Sleep the controller") self.write(Registers.MODE_1, self.mode_1 | (1 << Mode1.SLEEP))
[ "def", "sleep", "(", "self", ")", ":", "logger", ".", "debug", "(", "\"Sleep the controller\"", ")", "self", ".", "write", "(", "Registers", ".", "MODE_1", ",", "self", ".", "mode_1", "|", "(", "1", "<<", "Mode1", ".", "SLEEP", ")", ")" ]
Send the controller to sleep
[ "Send", "the", "controller", "to", "sleep" ]
train
https://github.com/voidpp/PCA9685-driver/blob/774790028cbced30fd69384f945198148b1793fc/pca9685_driver/device.py#L145-L148
voidpp/PCA9685-driver
pca9685_driver/device.py
Device.write
def write(self, reg, value): """Write raw byte value to the specified register :param reg: the register number (0-69, 250-255) :param value: byte value """ # TODO: check reg: 0-69, 250-255 self.__check_range('register_value', value) logger.debug("Write '%s' to register '%s'" % (value, reg)) self.__bus.write_byte_data(self.__address, reg, value)
python
def write(self, reg, value): """Write raw byte value to the specified register :param reg: the register number (0-69, 250-255) :param value: byte value """ # TODO: check reg: 0-69, 250-255 self.__check_range('register_value', value) logger.debug("Write '%s' to register '%s'" % (value, reg)) self.__bus.write_byte_data(self.__address, reg, value)
[ "def", "write", "(", "self", ",", "reg", ",", "value", ")", ":", "# TODO: check reg: 0-69, 250-255", "self", ".", "__check_range", "(", "'register_value'", ",", "value", ")", "logger", ".", "debug", "(", "\"Write '%s' to register '%s'\"", "%", "(", "value", ",", "reg", ")", ")", "self", ".", "__bus", ".", "write_byte_data", "(", "self", ".", "__address", ",", "reg", ",", "value", ")" ]
Write raw byte value to the specified register :param reg: the register number (0-69, 250-255) :param value: byte value
[ "Write", "raw", "byte", "value", "to", "the", "specified", "register" ]
train
https://github.com/voidpp/PCA9685-driver/blob/774790028cbced30fd69384f945198148b1793fc/pca9685_driver/device.py#L155-L164
voidpp/PCA9685-driver
pca9685_driver/device.py
Device.set_pwm_frequency
def set_pwm_frequency(self, value): """Set the frequency for all PWM output :param value: the frequency in Hz """ self.__check_range('pwm_frequency', value) reg_val = self.calc_pre_scale(value) logger.debug("Calculated prescale value is %s" % reg_val) self.sleep() self.write(Registers.PRE_SCALE, reg_val) self.wake()
python
def set_pwm_frequency(self, value): """Set the frequency for all PWM output :param value: the frequency in Hz """ self.__check_range('pwm_frequency', value) reg_val = self.calc_pre_scale(value) logger.debug("Calculated prescale value is %s" % reg_val) self.sleep() self.write(Registers.PRE_SCALE, reg_val) self.wake()
[ "def", "set_pwm_frequency", "(", "self", ",", "value", ")", ":", "self", ".", "__check_range", "(", "'pwm_frequency'", ",", "value", ")", "reg_val", "=", "self", ".", "calc_pre_scale", "(", "value", ")", "logger", ".", "debug", "(", "\"Calculated prescale value is %s\"", "%", "reg_val", ")", "self", ".", "sleep", "(", ")", "self", ".", "write", "(", "Registers", ".", "PRE_SCALE", ",", "reg_val", ")", "self", ".", "wake", "(", ")" ]
Set the frequency for all PWM output :param value: the frequency in Hz
[ "Set", "the", "frequency", "for", "all", "PWM", "output" ]
train
https://github.com/voidpp/PCA9685-driver/blob/774790028cbced30fd69384f945198148b1793fc/pca9685_driver/device.py#L180-L190
obulkin/string-dist
stringdist/pystringdist/levenshtein.py
levenshtein_norm
def levenshtein_norm(source, target): """Calculates the normalized Levenshtein distance between two string arguments. The result will be a float in the range [0.0, 1.0], with 1.0 signifying the biggest possible distance between strings with these lengths """ # Compute Levenshtein distance using helper function. The max is always # just the length of the longer string, so this is used to normalize result # before returning it distance = _levenshtein_compute(source, target, False) return float(distance) / max(len(source), len(target))
python
def levenshtein_norm(source, target): """Calculates the normalized Levenshtein distance between two string arguments. The result will be a float in the range [0.0, 1.0], with 1.0 signifying the biggest possible distance between strings with these lengths """ # Compute Levenshtein distance using helper function. The max is always # just the length of the longer string, so this is used to normalize result # before returning it distance = _levenshtein_compute(source, target, False) return float(distance) / max(len(source), len(target))
[ "def", "levenshtein_norm", "(", "source", ",", "target", ")", ":", "# Compute Levenshtein distance using helper function. The max is always", "# just the length of the longer string, so this is used to normalize result", "# before returning it", "distance", "=", "_levenshtein_compute", "(", "source", ",", "target", ",", "False", ")", "return", "float", "(", "distance", ")", "/", "max", "(", "len", "(", "source", ")", ",", "len", "(", "target", ")", ")" ]
Calculates the normalized Levenshtein distance between two string arguments. The result will be a float in the range [0.0, 1.0], with 1.0 signifying the biggest possible distance between strings with these lengths
[ "Calculates", "the", "normalized", "Levenshtein", "distance", "between", "two", "string", "arguments", ".", "The", "result", "will", "be", "a", "float", "in", "the", "range", "[", "0", ".", "0", "1", ".", "0", "]", "with", "1", ".", "0", "signifying", "the", "biggest", "possible", "distance", "between", "strings", "with", "these", "lengths" ]
train
https://github.com/obulkin/string-dist/blob/38d04352d617a5d43b06832cc1bf0aee8978559f/stringdist/pystringdist/levenshtein.py#L15-L25
wdecoster/nanoplotter
nanoplotter/nanoplotter_main.py
check_valid_color
def check_valid_color(color): """Check if the color provided by the user is valid. If color is invalid the default is returned. """ if color in list(mcolors.CSS4_COLORS.keys()) + ["#4CB391"]: logging.info("Nanoplotter: Valid color {}.".format(color)) return color else: logging.info("Nanoplotter: Invalid color {}, using default.".format(color)) sys.stderr.write("Invalid color {}, using default.\n".format(color)) return "#4CB391"
python
def check_valid_color(color): """Check if the color provided by the user is valid. If color is invalid the default is returned. """ if color in list(mcolors.CSS4_COLORS.keys()) + ["#4CB391"]: logging.info("Nanoplotter: Valid color {}.".format(color)) return color else: logging.info("Nanoplotter: Invalid color {}, using default.".format(color)) sys.stderr.write("Invalid color {}, using default.\n".format(color)) return "#4CB391"
[ "def", "check_valid_color", "(", "color", ")", ":", "if", "color", "in", "list", "(", "mcolors", ".", "CSS4_COLORS", ".", "keys", "(", ")", ")", "+", "[", "\"#4CB391\"", "]", ":", "logging", ".", "info", "(", "\"Nanoplotter: Valid color {}.\"", ".", "format", "(", "color", ")", ")", "return", "color", "else", ":", "logging", ".", "info", "(", "\"Nanoplotter: Invalid color {}, using default.\"", ".", "format", "(", "color", ")", ")", "sys", ".", "stderr", ".", "write", "(", "\"Invalid color {}, using default.\\n\"", ".", "format", "(", "color", ")", ")", "return", "\"#4CB391\"" ]
Check if the color provided by the user is valid. If color is invalid the default is returned.
[ "Check", "if", "the", "color", "provided", "by", "the", "user", "is", "valid", "." ]
train
https://github.com/wdecoster/nanoplotter/blob/80908dd1be585f450da5a66989de9de4d544ec85/nanoplotter/nanoplotter_main.py#L43-L54
wdecoster/nanoplotter
nanoplotter/nanoplotter_main.py
check_valid_format
def check_valid_format(figformat): """Check if the specified figure format is valid. If format is invalid the default is returned. Probably installation-dependent """ fig = plt.figure() if figformat in list(fig.canvas.get_supported_filetypes().keys()): logging.info("Nanoplotter: valid output format {}".format(figformat)) return figformat else: logging.info("Nanoplotter: invalid output format {}".format(figformat)) sys.stderr.write("Invalid format {}, using default.\n".format(figformat)) return "png"
python
def check_valid_format(figformat): """Check if the specified figure format is valid. If format is invalid the default is returned. Probably installation-dependent """ fig = plt.figure() if figformat in list(fig.canvas.get_supported_filetypes().keys()): logging.info("Nanoplotter: valid output format {}".format(figformat)) return figformat else: logging.info("Nanoplotter: invalid output format {}".format(figformat)) sys.stderr.write("Invalid format {}, using default.\n".format(figformat)) return "png"
[ "def", "check_valid_format", "(", "figformat", ")", ":", "fig", "=", "plt", ".", "figure", "(", ")", "if", "figformat", "in", "list", "(", "fig", ".", "canvas", ".", "get_supported_filetypes", "(", ")", ".", "keys", "(", ")", ")", ":", "logging", ".", "info", "(", "\"Nanoplotter: valid output format {}\"", ".", "format", "(", "figformat", ")", ")", "return", "figformat", "else", ":", "logging", ".", "info", "(", "\"Nanoplotter: invalid output format {}\"", ".", "format", "(", "figformat", ")", ")", "sys", ".", "stderr", ".", "write", "(", "\"Invalid format {}, using default.\\n\"", ".", "format", "(", "figformat", ")", ")", "return", "\"png\"" ]
Check if the specified figure format is valid. If format is invalid the default is returned. Probably installation-dependent
[ "Check", "if", "the", "specified", "figure", "format", "is", "valid", "." ]
train
https://github.com/wdecoster/nanoplotter/blob/80908dd1be585f450da5a66989de9de4d544ec85/nanoplotter/nanoplotter_main.py#L57-L70
wdecoster/nanoplotter
nanoplotter/nanoplotter_main.py
scatter
def scatter(x, y, names, path, plots, color="#4CB391", figformat="png", stat=None, log=False, minvalx=0, minvaly=0, title=None, plot_settings=None): """Create bivariate plots. Create four types of bivariate plots of x vs y, containing marginal summaries -A scatter plot with histograms on axes -A hexagonal binned plot with histograms on axes -A kernel density plot with density curves on axes -A pauvre-style plot using code from https://github.com/conchoecia/pauvre """ logging.info("Nanoplotter: Creating {} vs {} plots using statistics from {} reads.".format( names[0], names[1], x.size)) if not contains_variance([x, y], names): return [] sns.set(style="ticks", **plot_settings) maxvalx = np.amax(x) maxvaly = np.amax(y) plots_made = [] if plots["hex"]: hex_plot = Plot( path=path + "_hex." + figformat, title="{} vs {} plot using hexagonal bins".format(names[0], names[1])) plot = sns.jointplot( x=x, y=y, kind="hex", color=color, stat_func=stat, space=0, xlim=(minvalx, maxvalx), ylim=(minvaly, maxvaly), height=10) plot.set_axis_labels(names[0], names[1]) if log: hex_plot.title = hex_plot.title + " after log transformation of read lengths" ticks = [10**i for i in range(10) if not 10**i > 10 * (10**maxvalx)] plot.ax_joint.set_xticks(np.log10(ticks)) plot.ax_marg_x.set_xticks(np.log10(ticks)) plot.ax_joint.set_xticklabels(ticks) plt.subplots_adjust(top=0.90) plot.fig.suptitle(title or "{} vs {} plot".format(names[0], names[1]), fontsize=25) hex_plot.fig = plot hex_plot.save(format=figformat) plots_made.append(hex_plot) sns.set(style="darkgrid", **plot_settings) if plots["dot"]: dot_plot = Plot( path=path + "_dot." + figformat, title="{} vs {} plot using dots".format(names[0], names[1])) plot = sns.jointplot( x=x, y=y, kind="scatter", color=color, stat_func=stat, xlim=(minvalx, maxvalx), ylim=(minvaly, maxvaly), space=0, height=10, joint_kws={"s": 1}) plot.set_axis_labels(names[0], names[1]) if log: dot_plot.title = dot_plot.title + " after log transformation of read lengths" ticks = [10**i for i in range(10) if not 10**i > 10 * (10**maxvalx)] plot.ax_joint.set_xticks(np.log10(ticks)) plot.ax_marg_x.set_xticks(np.log10(ticks)) plot.ax_joint.set_xticklabels(ticks) plt.subplots_adjust(top=0.90) plot.fig.suptitle(title or "{} vs {} plot".format(names[0], names[1]), fontsize=25) dot_plot.fig = plot dot_plot.save(format=figformat) plots_made.append(dot_plot) if plots["kde"]: idx = np.random.choice(x.index, min(2000, len(x)), replace=False) kde_plot = Plot( path=path + "_kde." + figformat, title="{} vs {} plot using a kernel density estimation".format(names[0], names[1])) plot = sns.jointplot( x=x[idx], y=y[idx], kind="kde", clip=((0, np.Inf), (0, np.Inf)), xlim=(minvalx, maxvalx), ylim=(minvaly, maxvaly), space=0, color=color, stat_func=stat, shade_lowest=False, height=10) plot.set_axis_labels(names[0], names[1]) if log: kde_plot.title = kde_plot.title + " after log transformation of read lengths" ticks = [10**i for i in range(10) if not 10**i > 10 * (10**maxvalx)] plot.ax_joint.set_xticks(np.log10(ticks)) plot.ax_marg_x.set_xticks(np.log10(ticks)) plot.ax_joint.set_xticklabels(ticks) plt.subplots_adjust(top=0.90) plot.fig.suptitle(title or "{} vs {} plot".format(names[0], names[1]), fontsize=25) kde_plot.fig = plot kde_plot.save(format=figformat) plots_made.append(kde_plot) if plots["pauvre"] and names == ['Read lengths', 'Average read quality'] and log is False: pauvre_plot = Plot( path=path + "_pauvre." + figformat, title="{} vs {} plot using pauvre-style @conchoecia".format(names[0], names[1])) sns.set(style="white", **plot_settings) margin_plot(df=pd.DataFrame({"length": x, "meanQual": y}), Y_AXES=False, title=title or "Length vs Quality in Pauvre-style", plot_maxlen=None, plot_minlen=0, plot_maxqual=None, plot_minqual=0, lengthbin=None, qualbin=None, BASENAME="whatever", path=pauvre_plot.path, fileform=[figformat], dpi=600, TRANSPARENT=True, QUIET=True) plots_made.append(pauvre_plot) plt.close("all") return plots_made
python
def scatter(x, y, names, path, plots, color="#4CB391", figformat="png", stat=None, log=False, minvalx=0, minvaly=0, title=None, plot_settings=None): """Create bivariate plots. Create four types of bivariate plots of x vs y, containing marginal summaries -A scatter plot with histograms on axes -A hexagonal binned plot with histograms on axes -A kernel density plot with density curves on axes -A pauvre-style plot using code from https://github.com/conchoecia/pauvre """ logging.info("Nanoplotter: Creating {} vs {} plots using statistics from {} reads.".format( names[0], names[1], x.size)) if not contains_variance([x, y], names): return [] sns.set(style="ticks", **plot_settings) maxvalx = np.amax(x) maxvaly = np.amax(y) plots_made = [] if plots["hex"]: hex_plot = Plot( path=path + "_hex." + figformat, title="{} vs {} plot using hexagonal bins".format(names[0], names[1])) plot = sns.jointplot( x=x, y=y, kind="hex", color=color, stat_func=stat, space=0, xlim=(minvalx, maxvalx), ylim=(minvaly, maxvaly), height=10) plot.set_axis_labels(names[0], names[1]) if log: hex_plot.title = hex_plot.title + " after log transformation of read lengths" ticks = [10**i for i in range(10) if not 10**i > 10 * (10**maxvalx)] plot.ax_joint.set_xticks(np.log10(ticks)) plot.ax_marg_x.set_xticks(np.log10(ticks)) plot.ax_joint.set_xticklabels(ticks) plt.subplots_adjust(top=0.90) plot.fig.suptitle(title or "{} vs {} plot".format(names[0], names[1]), fontsize=25) hex_plot.fig = plot hex_plot.save(format=figformat) plots_made.append(hex_plot) sns.set(style="darkgrid", **plot_settings) if plots["dot"]: dot_plot = Plot( path=path + "_dot." + figformat, title="{} vs {} plot using dots".format(names[0], names[1])) plot = sns.jointplot( x=x, y=y, kind="scatter", color=color, stat_func=stat, xlim=(minvalx, maxvalx), ylim=(minvaly, maxvaly), space=0, height=10, joint_kws={"s": 1}) plot.set_axis_labels(names[0], names[1]) if log: dot_plot.title = dot_plot.title + " after log transformation of read lengths" ticks = [10**i for i in range(10) if not 10**i > 10 * (10**maxvalx)] plot.ax_joint.set_xticks(np.log10(ticks)) plot.ax_marg_x.set_xticks(np.log10(ticks)) plot.ax_joint.set_xticklabels(ticks) plt.subplots_adjust(top=0.90) plot.fig.suptitle(title or "{} vs {} plot".format(names[0], names[1]), fontsize=25) dot_plot.fig = plot dot_plot.save(format=figformat) plots_made.append(dot_plot) if plots["kde"]: idx = np.random.choice(x.index, min(2000, len(x)), replace=False) kde_plot = Plot( path=path + "_kde." + figformat, title="{} vs {} plot using a kernel density estimation".format(names[0], names[1])) plot = sns.jointplot( x=x[idx], y=y[idx], kind="kde", clip=((0, np.Inf), (0, np.Inf)), xlim=(minvalx, maxvalx), ylim=(minvaly, maxvaly), space=0, color=color, stat_func=stat, shade_lowest=False, height=10) plot.set_axis_labels(names[0], names[1]) if log: kde_plot.title = kde_plot.title + " after log transformation of read lengths" ticks = [10**i for i in range(10) if not 10**i > 10 * (10**maxvalx)] plot.ax_joint.set_xticks(np.log10(ticks)) plot.ax_marg_x.set_xticks(np.log10(ticks)) plot.ax_joint.set_xticklabels(ticks) plt.subplots_adjust(top=0.90) plot.fig.suptitle(title or "{} vs {} plot".format(names[0], names[1]), fontsize=25) kde_plot.fig = plot kde_plot.save(format=figformat) plots_made.append(kde_plot) if plots["pauvre"] and names == ['Read lengths', 'Average read quality'] and log is False: pauvre_plot = Plot( path=path + "_pauvre." + figformat, title="{} vs {} plot using pauvre-style @conchoecia".format(names[0], names[1])) sns.set(style="white", **plot_settings) margin_plot(df=pd.DataFrame({"length": x, "meanQual": y}), Y_AXES=False, title=title or "Length vs Quality in Pauvre-style", plot_maxlen=None, plot_minlen=0, plot_maxqual=None, plot_minqual=0, lengthbin=None, qualbin=None, BASENAME="whatever", path=pauvre_plot.path, fileform=[figformat], dpi=600, TRANSPARENT=True, QUIET=True) plots_made.append(pauvre_plot) plt.close("all") return plots_made
[ "def", "scatter", "(", "x", ",", "y", ",", "names", ",", "path", ",", "plots", ",", "color", "=", "\"#4CB391\"", ",", "figformat", "=", "\"png\"", ",", "stat", "=", "None", ",", "log", "=", "False", ",", "minvalx", "=", "0", ",", "minvaly", "=", "0", ",", "title", "=", "None", ",", "plot_settings", "=", "None", ")", ":", "logging", ".", "info", "(", "\"Nanoplotter: Creating {} vs {} plots using statistics from {} reads.\"", ".", "format", "(", "names", "[", "0", "]", ",", "names", "[", "1", "]", ",", "x", ".", "size", ")", ")", "if", "not", "contains_variance", "(", "[", "x", ",", "y", "]", ",", "names", ")", ":", "return", "[", "]", "sns", ".", "set", "(", "style", "=", "\"ticks\"", ",", "*", "*", "plot_settings", ")", "maxvalx", "=", "np", ".", "amax", "(", "x", ")", "maxvaly", "=", "np", ".", "amax", "(", "y", ")", "plots_made", "=", "[", "]", "if", "plots", "[", "\"hex\"", "]", ":", "hex_plot", "=", "Plot", "(", "path", "=", "path", "+", "\"_hex.\"", "+", "figformat", ",", "title", "=", "\"{} vs {} plot using hexagonal bins\"", ".", "format", "(", "names", "[", "0", "]", ",", "names", "[", "1", "]", ")", ")", "plot", "=", "sns", ".", "jointplot", "(", "x", "=", "x", ",", "y", "=", "y", ",", "kind", "=", "\"hex\"", ",", "color", "=", "color", ",", "stat_func", "=", "stat", ",", "space", "=", "0", ",", "xlim", "=", "(", "minvalx", ",", "maxvalx", ")", ",", "ylim", "=", "(", "minvaly", ",", "maxvaly", ")", ",", "height", "=", "10", ")", "plot", ".", "set_axis_labels", "(", "names", "[", "0", "]", ",", "names", "[", "1", "]", ")", "if", "log", ":", "hex_plot", ".", "title", "=", "hex_plot", ".", "title", "+", "\" after log transformation of read lengths\"", "ticks", "=", "[", "10", "**", "i", "for", "i", "in", "range", "(", "10", ")", "if", "not", "10", "**", "i", ">", "10", "*", "(", "10", "**", "maxvalx", ")", "]", "plot", ".", "ax_joint", ".", "set_xticks", "(", "np", ".", "log10", "(", "ticks", ")", ")", "plot", ".", "ax_marg_x", ".", "set_xticks", "(", "np", ".", "log10", "(", "ticks", ")", ")", "plot", ".", "ax_joint", ".", "set_xticklabels", "(", "ticks", ")", "plt", ".", "subplots_adjust", "(", "top", "=", "0.90", ")", "plot", ".", "fig", ".", "suptitle", "(", "title", "or", "\"{} vs {} plot\"", ".", "format", "(", "names", "[", "0", "]", ",", "names", "[", "1", "]", ")", ",", "fontsize", "=", "25", ")", "hex_plot", ".", "fig", "=", "plot", "hex_plot", ".", "save", "(", "format", "=", "figformat", ")", "plots_made", ".", "append", "(", "hex_plot", ")", "sns", ".", "set", "(", "style", "=", "\"darkgrid\"", ",", "*", "*", "plot_settings", ")", "if", "plots", "[", "\"dot\"", "]", ":", "dot_plot", "=", "Plot", "(", "path", "=", "path", "+", "\"_dot.\"", "+", "figformat", ",", "title", "=", "\"{} vs {} plot using dots\"", ".", "format", "(", "names", "[", "0", "]", ",", "names", "[", "1", "]", ")", ")", "plot", "=", "sns", ".", "jointplot", "(", "x", "=", "x", ",", "y", "=", "y", ",", "kind", "=", "\"scatter\"", ",", "color", "=", "color", ",", "stat_func", "=", "stat", ",", "xlim", "=", "(", "minvalx", ",", "maxvalx", ")", ",", "ylim", "=", "(", "minvaly", ",", "maxvaly", ")", ",", "space", "=", "0", ",", "height", "=", "10", ",", "joint_kws", "=", "{", "\"s\"", ":", "1", "}", ")", "plot", ".", "set_axis_labels", "(", "names", "[", "0", "]", ",", "names", "[", "1", "]", ")", "if", "log", ":", "dot_plot", ".", "title", "=", "dot_plot", ".", "title", "+", "\" after log transformation of read lengths\"", "ticks", "=", "[", "10", "**", "i", "for", "i", "in", "range", "(", "10", ")", "if", "not", "10", "**", "i", ">", "10", "*", "(", "10", "**", "maxvalx", ")", "]", "plot", ".", "ax_joint", ".", "set_xticks", "(", "np", ".", "log10", "(", "ticks", ")", ")", "plot", ".", "ax_marg_x", ".", "set_xticks", "(", "np", ".", "log10", "(", "ticks", ")", ")", "plot", ".", "ax_joint", ".", "set_xticklabels", "(", "ticks", ")", "plt", ".", "subplots_adjust", "(", "top", "=", "0.90", ")", "plot", ".", "fig", ".", "suptitle", "(", "title", "or", "\"{} vs {} plot\"", ".", "format", "(", "names", "[", "0", "]", ",", "names", "[", "1", "]", ")", ",", "fontsize", "=", "25", ")", "dot_plot", ".", "fig", "=", "plot", "dot_plot", ".", "save", "(", "format", "=", "figformat", ")", "plots_made", ".", "append", "(", "dot_plot", ")", "if", "plots", "[", "\"kde\"", "]", ":", "idx", "=", "np", ".", "random", ".", "choice", "(", "x", ".", "index", ",", "min", "(", "2000", ",", "len", "(", "x", ")", ")", ",", "replace", "=", "False", ")", "kde_plot", "=", "Plot", "(", "path", "=", "path", "+", "\"_kde.\"", "+", "figformat", ",", "title", "=", "\"{} vs {} plot using a kernel density estimation\"", ".", "format", "(", "names", "[", "0", "]", ",", "names", "[", "1", "]", ")", ")", "plot", "=", "sns", ".", "jointplot", "(", "x", "=", "x", "[", "idx", "]", ",", "y", "=", "y", "[", "idx", "]", ",", "kind", "=", "\"kde\"", ",", "clip", "=", "(", "(", "0", ",", "np", ".", "Inf", ")", ",", "(", "0", ",", "np", ".", "Inf", ")", ")", ",", "xlim", "=", "(", "minvalx", ",", "maxvalx", ")", ",", "ylim", "=", "(", "minvaly", ",", "maxvaly", ")", ",", "space", "=", "0", ",", "color", "=", "color", ",", "stat_func", "=", "stat", ",", "shade_lowest", "=", "False", ",", "height", "=", "10", ")", "plot", ".", "set_axis_labels", "(", "names", "[", "0", "]", ",", "names", "[", "1", "]", ")", "if", "log", ":", "kde_plot", ".", "title", "=", "kde_plot", ".", "title", "+", "\" after log transformation of read lengths\"", "ticks", "=", "[", "10", "**", "i", "for", "i", "in", "range", "(", "10", ")", "if", "not", "10", "**", "i", ">", "10", "*", "(", "10", "**", "maxvalx", ")", "]", "plot", ".", "ax_joint", ".", "set_xticks", "(", "np", ".", "log10", "(", "ticks", ")", ")", "plot", ".", "ax_marg_x", ".", "set_xticks", "(", "np", ".", "log10", "(", "ticks", ")", ")", "plot", ".", "ax_joint", ".", "set_xticklabels", "(", "ticks", ")", "plt", ".", "subplots_adjust", "(", "top", "=", "0.90", ")", "plot", ".", "fig", ".", "suptitle", "(", "title", "or", "\"{} vs {} plot\"", ".", "format", "(", "names", "[", "0", "]", ",", "names", "[", "1", "]", ")", ",", "fontsize", "=", "25", ")", "kde_plot", ".", "fig", "=", "plot", "kde_plot", ".", "save", "(", "format", "=", "figformat", ")", "plots_made", ".", "append", "(", "kde_plot", ")", "if", "plots", "[", "\"pauvre\"", "]", "and", "names", "==", "[", "'Read lengths'", ",", "'Average read quality'", "]", "and", "log", "is", "False", ":", "pauvre_plot", "=", "Plot", "(", "path", "=", "path", "+", "\"_pauvre.\"", "+", "figformat", ",", "title", "=", "\"{} vs {} plot using pauvre-style @conchoecia\"", ".", "format", "(", "names", "[", "0", "]", ",", "names", "[", "1", "]", ")", ")", "sns", ".", "set", "(", "style", "=", "\"white\"", ",", "*", "*", "plot_settings", ")", "margin_plot", "(", "df", "=", "pd", ".", "DataFrame", "(", "{", "\"length\"", ":", "x", ",", "\"meanQual\"", ":", "y", "}", ")", ",", "Y_AXES", "=", "False", ",", "title", "=", "title", "or", "\"Length vs Quality in Pauvre-style\"", ",", "plot_maxlen", "=", "None", ",", "plot_minlen", "=", "0", ",", "plot_maxqual", "=", "None", ",", "plot_minqual", "=", "0", ",", "lengthbin", "=", "None", ",", "qualbin", "=", "None", ",", "BASENAME", "=", "\"whatever\"", ",", "path", "=", "pauvre_plot", ".", "path", ",", "fileform", "=", "[", "figformat", "]", ",", "dpi", "=", "600", ",", "TRANSPARENT", "=", "True", ",", "QUIET", "=", "True", ")", "plots_made", ".", "append", "(", "pauvre_plot", ")", "plt", ".", "close", "(", "\"all\"", ")", "return", "plots_made" ]
Create bivariate plots. Create four types of bivariate plots of x vs y, containing marginal summaries -A scatter plot with histograms on axes -A hexagonal binned plot with histograms on axes -A kernel density plot with density curves on axes -A pauvre-style plot using code from https://github.com/conchoecia/pauvre
[ "Create", "bivariate", "plots", "." ]
train
https://github.com/wdecoster/nanoplotter/blob/80908dd1be585f450da5a66989de9de4d544ec85/nanoplotter/nanoplotter_main.py#L78-L206
wdecoster/nanoplotter
nanoplotter/nanoplotter_main.py
contains_variance
def contains_variance(arrays, names): """ Make sure both arrays for bivariate ("scatter") plot have a stddev > 0 """ for ar, name in zip(arrays, names): if np.std(ar) == 0: sys.stderr.write( "No variation in '{}', skipping bivariate plots.\n".format(name.lower())) logging.info("Nanoplotter: No variation in {}, skipping bivariate plot".format(name)) return False else: return True
python
def contains_variance(arrays, names): """ Make sure both arrays for bivariate ("scatter") plot have a stddev > 0 """ for ar, name in zip(arrays, names): if np.std(ar) == 0: sys.stderr.write( "No variation in '{}', skipping bivariate plots.\n".format(name.lower())) logging.info("Nanoplotter: No variation in {}, skipping bivariate plot".format(name)) return False else: return True
[ "def", "contains_variance", "(", "arrays", ",", "names", ")", ":", "for", "ar", ",", "name", "in", "zip", "(", "arrays", ",", "names", ")", ":", "if", "np", ".", "std", "(", "ar", ")", "==", "0", ":", "sys", ".", "stderr", ".", "write", "(", "\"No variation in '{}', skipping bivariate plots.\\n\"", ".", "format", "(", "name", ".", "lower", "(", ")", ")", ")", "logging", ".", "info", "(", "\"Nanoplotter: No variation in {}, skipping bivariate plot\"", ".", "format", "(", "name", ")", ")", "return", "False", "else", ":", "return", "True" ]
Make sure both arrays for bivariate ("scatter") plot have a stddev > 0
[ "Make", "sure", "both", "arrays", "for", "bivariate", "(", "scatter", ")", "plot", "have", "a", "stddev", ">", "0" ]
train
https://github.com/wdecoster/nanoplotter/blob/80908dd1be585f450da5a66989de9de4d544ec85/nanoplotter/nanoplotter_main.py#L209-L220
wdecoster/nanoplotter
nanoplotter/nanoplotter_main.py
length_plots
def length_plots(array, name, path, title=None, n50=None, color="#4CB391", figformat="png"): """Create histogram of normal and log transformed read lengths.""" logging.info("Nanoplotter: Creating length plots for {}.".format(name)) maxvalx = np.amax(array) if n50: logging.info("Nanoplotter: Using {} reads with read length N50 of {}bp and maximum of {}bp." .format(array.size, n50, maxvalx)) else: logging.info("Nanoplotter: Using {} reads maximum of {}bp.".format(array.size, maxvalx)) plots = [] HistType = namedtuple('HistType', 'weight name ylabel') for h_type in [HistType(None, "", "Number of reads"), HistType(array, "Weighted ", "Number of bases")]: histogram = Plot( path=path + h_type.name.replace(" ", "_") + "Histogram" + name.replace(' ', '') + "." + figformat, title=h_type.name + "Histogram of read lengths") ax = sns.distplot( a=array, kde=False, hist=True, bins=max(round(int(maxvalx) / 500), 10), color=color, hist_kws=dict(weights=h_type.weight, edgecolor=color, linewidth=0.2, alpha=0.8)) if n50: plt.axvline(n50) plt.annotate('N50', xy=(n50, np.amax([h.get_height() for h in ax.patches])), size=8) ax.set( xlabel='Read length', ylabel=h_type.ylabel, title=title or histogram.title) plt.ticklabel_format(style='plain', axis='y') histogram.fig = ax.get_figure() histogram.save(format=figformat) plt.close("all") log_histogram = Plot( path=path + h_type.name.replace(" ", "_") + "LogTransformed_Histogram" + name.replace(' ', '') + "." + figformat, title=h_type.name + "Histogram of read lengths after log transformation") ax = sns.distplot( a=np.log10(array), kde=False, hist=True, color=color, hist_kws=dict(weights=h_type.weight, edgecolor=color, linewidth=0.2, alpha=0.8)) ticks = [10**i for i in range(10) if not 10**i > 10 * maxvalx] ax.set( xticks=np.log10(ticks), xticklabels=ticks, xlabel='Read length', ylabel=h_type.ylabel, title=title or log_histogram.title) if n50: plt.axvline(np.log10(n50)) plt.annotate('N50', xy=(np.log10(n50), np.amax( [h.get_height() for h in ax.patches])), size=8) plt.ticklabel_format(style='plain', axis='y') log_histogram.fig = ax.get_figure() log_histogram.save(format=figformat) plt.close("all") plots.extend([histogram, log_histogram]) plots.append(yield_by_minimal_length_plot(array=array, name=name, path=path, title=title, color=color, figformat=figformat)) return plots
python
def length_plots(array, name, path, title=None, n50=None, color="#4CB391", figformat="png"): """Create histogram of normal and log transformed read lengths.""" logging.info("Nanoplotter: Creating length plots for {}.".format(name)) maxvalx = np.amax(array) if n50: logging.info("Nanoplotter: Using {} reads with read length N50 of {}bp and maximum of {}bp." .format(array.size, n50, maxvalx)) else: logging.info("Nanoplotter: Using {} reads maximum of {}bp.".format(array.size, maxvalx)) plots = [] HistType = namedtuple('HistType', 'weight name ylabel') for h_type in [HistType(None, "", "Number of reads"), HistType(array, "Weighted ", "Number of bases")]: histogram = Plot( path=path + h_type.name.replace(" ", "_") + "Histogram" + name.replace(' ', '') + "." + figformat, title=h_type.name + "Histogram of read lengths") ax = sns.distplot( a=array, kde=False, hist=True, bins=max(round(int(maxvalx) / 500), 10), color=color, hist_kws=dict(weights=h_type.weight, edgecolor=color, linewidth=0.2, alpha=0.8)) if n50: plt.axvline(n50) plt.annotate('N50', xy=(n50, np.amax([h.get_height() for h in ax.patches])), size=8) ax.set( xlabel='Read length', ylabel=h_type.ylabel, title=title or histogram.title) plt.ticklabel_format(style='plain', axis='y') histogram.fig = ax.get_figure() histogram.save(format=figformat) plt.close("all") log_histogram = Plot( path=path + h_type.name.replace(" ", "_") + "LogTransformed_Histogram" + name.replace(' ', '') + "." + figformat, title=h_type.name + "Histogram of read lengths after log transformation") ax = sns.distplot( a=np.log10(array), kde=False, hist=True, color=color, hist_kws=dict(weights=h_type.weight, edgecolor=color, linewidth=0.2, alpha=0.8)) ticks = [10**i for i in range(10) if not 10**i > 10 * maxvalx] ax.set( xticks=np.log10(ticks), xticklabels=ticks, xlabel='Read length', ylabel=h_type.ylabel, title=title or log_histogram.title) if n50: plt.axvline(np.log10(n50)) plt.annotate('N50', xy=(np.log10(n50), np.amax( [h.get_height() for h in ax.patches])), size=8) plt.ticklabel_format(style='plain', axis='y') log_histogram.fig = ax.get_figure() log_histogram.save(format=figformat) plt.close("all") plots.extend([histogram, log_histogram]) plots.append(yield_by_minimal_length_plot(array=array, name=name, path=path, title=title, color=color, figformat=figformat)) return plots
[ "def", "length_plots", "(", "array", ",", "name", ",", "path", ",", "title", "=", "None", ",", "n50", "=", "None", ",", "color", "=", "\"#4CB391\"", ",", "figformat", "=", "\"png\"", ")", ":", "logging", ".", "info", "(", "\"Nanoplotter: Creating length plots for {}.\"", ".", "format", "(", "name", ")", ")", "maxvalx", "=", "np", ".", "amax", "(", "array", ")", "if", "n50", ":", "logging", ".", "info", "(", "\"Nanoplotter: Using {} reads with read length N50 of {}bp and maximum of {}bp.\"", ".", "format", "(", "array", ".", "size", ",", "n50", ",", "maxvalx", ")", ")", "else", ":", "logging", ".", "info", "(", "\"Nanoplotter: Using {} reads maximum of {}bp.\"", ".", "format", "(", "array", ".", "size", ",", "maxvalx", ")", ")", "plots", "=", "[", "]", "HistType", "=", "namedtuple", "(", "'HistType'", ",", "'weight name ylabel'", ")", "for", "h_type", "in", "[", "HistType", "(", "None", ",", "\"\"", ",", "\"Number of reads\"", ")", ",", "HistType", "(", "array", ",", "\"Weighted \"", ",", "\"Number of bases\"", ")", "]", ":", "histogram", "=", "Plot", "(", "path", "=", "path", "+", "h_type", ".", "name", ".", "replace", "(", "\" \"", ",", "\"_\"", ")", "+", "\"Histogram\"", "+", "name", ".", "replace", "(", "' '", ",", "''", ")", "+", "\".\"", "+", "figformat", ",", "title", "=", "h_type", ".", "name", "+", "\"Histogram of read lengths\"", ")", "ax", "=", "sns", ".", "distplot", "(", "a", "=", "array", ",", "kde", "=", "False", ",", "hist", "=", "True", ",", "bins", "=", "max", "(", "round", "(", "int", "(", "maxvalx", ")", "/", "500", ")", ",", "10", ")", ",", "color", "=", "color", ",", "hist_kws", "=", "dict", "(", "weights", "=", "h_type", ".", "weight", ",", "edgecolor", "=", "color", ",", "linewidth", "=", "0.2", ",", "alpha", "=", "0.8", ")", ")", "if", "n50", ":", "plt", ".", "axvline", "(", "n50", ")", "plt", ".", "annotate", "(", "'N50'", ",", "xy", "=", "(", "n50", ",", "np", ".", "amax", "(", "[", "h", ".", "get_height", "(", ")", "for", "h", "in", "ax", ".", "patches", "]", ")", ")", ",", "size", "=", "8", ")", "ax", ".", "set", "(", "xlabel", "=", "'Read length'", ",", "ylabel", "=", "h_type", ".", "ylabel", ",", "title", "=", "title", "or", "histogram", ".", "title", ")", "plt", ".", "ticklabel_format", "(", "style", "=", "'plain'", ",", "axis", "=", "'y'", ")", "histogram", ".", "fig", "=", "ax", ".", "get_figure", "(", ")", "histogram", ".", "save", "(", "format", "=", "figformat", ")", "plt", ".", "close", "(", "\"all\"", ")", "log_histogram", "=", "Plot", "(", "path", "=", "path", "+", "h_type", ".", "name", ".", "replace", "(", "\" \"", ",", "\"_\"", ")", "+", "\"LogTransformed_Histogram\"", "+", "name", ".", "replace", "(", "' '", ",", "''", ")", "+", "\".\"", "+", "figformat", ",", "title", "=", "h_type", ".", "name", "+", "\"Histogram of read lengths after log transformation\"", ")", "ax", "=", "sns", ".", "distplot", "(", "a", "=", "np", ".", "log10", "(", "array", ")", ",", "kde", "=", "False", ",", "hist", "=", "True", ",", "color", "=", "color", ",", "hist_kws", "=", "dict", "(", "weights", "=", "h_type", ".", "weight", ",", "edgecolor", "=", "color", ",", "linewidth", "=", "0.2", ",", "alpha", "=", "0.8", ")", ")", "ticks", "=", "[", "10", "**", "i", "for", "i", "in", "range", "(", "10", ")", "if", "not", "10", "**", "i", ">", "10", "*", "maxvalx", "]", "ax", ".", "set", "(", "xticks", "=", "np", ".", "log10", "(", "ticks", ")", ",", "xticklabels", "=", "ticks", ",", "xlabel", "=", "'Read length'", ",", "ylabel", "=", "h_type", ".", "ylabel", ",", "title", "=", "title", "or", "log_histogram", ".", "title", ")", "if", "n50", ":", "plt", ".", "axvline", "(", "np", ".", "log10", "(", "n50", ")", ")", "plt", ".", "annotate", "(", "'N50'", ",", "xy", "=", "(", "np", ".", "log10", "(", "n50", ")", ",", "np", ".", "amax", "(", "[", "h", ".", "get_height", "(", ")", "for", "h", "in", "ax", ".", "patches", "]", ")", ")", ",", "size", "=", "8", ")", "plt", ".", "ticklabel_format", "(", "style", "=", "'plain'", ",", "axis", "=", "'y'", ")", "log_histogram", ".", "fig", "=", "ax", ".", "get_figure", "(", ")", "log_histogram", ".", "save", "(", "format", "=", "figformat", ")", "plt", ".", "close", "(", "\"all\"", ")", "plots", ".", "extend", "(", "[", "histogram", ",", "log_histogram", "]", ")", "plots", ".", "append", "(", "yield_by_minimal_length_plot", "(", "array", "=", "array", ",", "name", "=", "name", ",", "path", "=", "path", ",", "title", "=", "title", ",", "color", "=", "color", ",", "figformat", "=", "figformat", ")", ")", "return", "plots" ]
Create histogram of normal and log transformed read lengths.
[ "Create", "histogram", "of", "normal", "and", "log", "transformed", "read", "lengths", "." ]
train
https://github.com/wdecoster/nanoplotter/blob/80908dd1be585f450da5a66989de9de4d544ec85/nanoplotter/nanoplotter_main.py#L223-L298
wdecoster/nanoplotter
nanoplotter/spatial_heatmap.py
make_layout
def make_layout(maxval): """Make the physical layout of the MinION flowcell. based on https://bioinformatics.stackexchange.com/a/749/681 returned as a numpy array """ if maxval > 512: return Layout( structure=np.concatenate([np.array([list(range(10 * i + 1, i * 10 + 11)) for i in range(25)]) + j for j in range(0, 3000, 250)], axis=1), template=np.zeros((25, 120)), xticks=range(1, 121), yticks=range(1, 26)) else: layoutlist = [] for i, j in zip( [33, 481, 417, 353, 289, 225, 161, 97], [8, 456, 392, 328, 264, 200, 136, 72]): for n in range(4): layoutlist.append(list(range(i + n * 8, (i + n * 8) + 8, 1)) + list(range(j + n * 8, (j + n * 8) - 8, -1))) return Layout( structure=np.array(layoutlist).transpose(), template=np.zeros((16, 32)), xticks=range(1, 33), yticks=range(1, 17))
python
def make_layout(maxval): """Make the physical layout of the MinION flowcell. based on https://bioinformatics.stackexchange.com/a/749/681 returned as a numpy array """ if maxval > 512: return Layout( structure=np.concatenate([np.array([list(range(10 * i + 1, i * 10 + 11)) for i in range(25)]) + j for j in range(0, 3000, 250)], axis=1), template=np.zeros((25, 120)), xticks=range(1, 121), yticks=range(1, 26)) else: layoutlist = [] for i, j in zip( [33, 481, 417, 353, 289, 225, 161, 97], [8, 456, 392, 328, 264, 200, 136, 72]): for n in range(4): layoutlist.append(list(range(i + n * 8, (i + n * 8) + 8, 1)) + list(range(j + n * 8, (j + n * 8) - 8, -1))) return Layout( structure=np.array(layoutlist).transpose(), template=np.zeros((16, 32)), xticks=range(1, 33), yticks=range(1, 17))
[ "def", "make_layout", "(", "maxval", ")", ":", "if", "maxval", ">", "512", ":", "return", "Layout", "(", "structure", "=", "np", ".", "concatenate", "(", "[", "np", ".", "array", "(", "[", "list", "(", "range", "(", "10", "*", "i", "+", "1", ",", "i", "*", "10", "+", "11", ")", ")", "for", "i", "in", "range", "(", "25", ")", "]", ")", "+", "j", "for", "j", "in", "range", "(", "0", ",", "3000", ",", "250", ")", "]", ",", "axis", "=", "1", ")", ",", "template", "=", "np", ".", "zeros", "(", "(", "25", ",", "120", ")", ")", ",", "xticks", "=", "range", "(", "1", ",", "121", ")", ",", "yticks", "=", "range", "(", "1", ",", "26", ")", ")", "else", ":", "layoutlist", "=", "[", "]", "for", "i", ",", "j", "in", "zip", "(", "[", "33", ",", "481", ",", "417", ",", "353", ",", "289", ",", "225", ",", "161", ",", "97", "]", ",", "[", "8", ",", "456", ",", "392", ",", "328", ",", "264", ",", "200", ",", "136", ",", "72", "]", ")", ":", "for", "n", "in", "range", "(", "4", ")", ":", "layoutlist", ".", "append", "(", "list", "(", "range", "(", "i", "+", "n", "*", "8", ",", "(", "i", "+", "n", "*", "8", ")", "+", "8", ",", "1", ")", ")", "+", "list", "(", "range", "(", "j", "+", "n", "*", "8", ",", "(", "j", "+", "n", "*", "8", ")", "-", "8", ",", "-", "1", ")", ")", ")", "return", "Layout", "(", "structure", "=", "np", ".", "array", "(", "layoutlist", ")", ".", "transpose", "(", ")", ",", "template", "=", "np", ".", "zeros", "(", "(", "16", ",", "32", ")", ")", ",", "xticks", "=", "range", "(", "1", ",", "33", ")", ",", "yticks", "=", "range", "(", "1", ",", "17", ")", ")" ]
Make the physical layout of the MinION flowcell. based on https://bioinformatics.stackexchange.com/a/749/681 returned as a numpy array
[ "Make", "the", "physical", "layout", "of", "the", "MinION", "flowcell", ".", "based", "on", "https", ":", "//", "bioinformatics", ".", "stackexchange", ".", "com", "/", "a", "/", "749", "/", "681", "returned", "as", "a", "numpy", "array" ]
train
https://github.com/wdecoster/nanoplotter/blob/80908dd1be585f450da5a66989de9de4d544ec85/nanoplotter/spatial_heatmap.py#L17-L43
wdecoster/nanoplotter
nanoplotter/spatial_heatmap.py
spatial_heatmap
def spatial_heatmap(array, path, title=None, color="Greens", figformat="png"): """Taking channel information and creating post run channel activity plots.""" logging.info("Nanoplotter: Creating heatmap of reads per channel using {} reads." .format(array.size)) activity_map = Plot( path=path + "." + figformat, title="Number of reads generated per channel") layout = make_layout(maxval=np.amax(array)) valueCounts = pd.value_counts(pd.Series(array)) for entry in valueCounts.keys(): layout.template[np.where(layout.structure == entry)] = valueCounts[entry] plt.figure() ax = sns.heatmap( data=pd.DataFrame(layout.template, index=layout.yticks, columns=layout.xticks), xticklabels="auto", yticklabels="auto", square=True, cbar_kws={"orientation": "horizontal"}, cmap=color, linewidths=0.20) ax.set_title(title or activity_map.title) activity_map.fig = ax.get_figure() activity_map.save(format=figformat) plt.close("all") return [activity_map]
python
def spatial_heatmap(array, path, title=None, color="Greens", figformat="png"): """Taking channel information and creating post run channel activity plots.""" logging.info("Nanoplotter: Creating heatmap of reads per channel using {} reads." .format(array.size)) activity_map = Plot( path=path + "." + figformat, title="Number of reads generated per channel") layout = make_layout(maxval=np.amax(array)) valueCounts = pd.value_counts(pd.Series(array)) for entry in valueCounts.keys(): layout.template[np.where(layout.structure == entry)] = valueCounts[entry] plt.figure() ax = sns.heatmap( data=pd.DataFrame(layout.template, index=layout.yticks, columns=layout.xticks), xticklabels="auto", yticklabels="auto", square=True, cbar_kws={"orientation": "horizontal"}, cmap=color, linewidths=0.20) ax.set_title(title or activity_map.title) activity_map.fig = ax.get_figure() activity_map.save(format=figformat) plt.close("all") return [activity_map]
[ "def", "spatial_heatmap", "(", "array", ",", "path", ",", "title", "=", "None", ",", "color", "=", "\"Greens\"", ",", "figformat", "=", "\"png\"", ")", ":", "logging", ".", "info", "(", "\"Nanoplotter: Creating heatmap of reads per channel using {} reads.\"", ".", "format", "(", "array", ".", "size", ")", ")", "activity_map", "=", "Plot", "(", "path", "=", "path", "+", "\".\"", "+", "figformat", ",", "title", "=", "\"Number of reads generated per channel\"", ")", "layout", "=", "make_layout", "(", "maxval", "=", "np", ".", "amax", "(", "array", ")", ")", "valueCounts", "=", "pd", ".", "value_counts", "(", "pd", ".", "Series", "(", "array", ")", ")", "for", "entry", "in", "valueCounts", ".", "keys", "(", ")", ":", "layout", ".", "template", "[", "np", ".", "where", "(", "layout", ".", "structure", "==", "entry", ")", "]", "=", "valueCounts", "[", "entry", "]", "plt", ".", "figure", "(", ")", "ax", "=", "sns", ".", "heatmap", "(", "data", "=", "pd", ".", "DataFrame", "(", "layout", ".", "template", ",", "index", "=", "layout", ".", "yticks", ",", "columns", "=", "layout", ".", "xticks", ")", ",", "xticklabels", "=", "\"auto\"", ",", "yticklabels", "=", "\"auto\"", ",", "square", "=", "True", ",", "cbar_kws", "=", "{", "\"orientation\"", ":", "\"horizontal\"", "}", ",", "cmap", "=", "color", ",", "linewidths", "=", "0.20", ")", "ax", ".", "set_title", "(", "title", "or", "activity_map", ".", "title", ")", "activity_map", ".", "fig", "=", "ax", ".", "get_figure", "(", ")", "activity_map", ".", "save", "(", "format", "=", "figformat", ")", "plt", ".", "close", "(", "\"all\"", ")", "return", "[", "activity_map", "]" ]
Taking channel information and creating post run channel activity plots.
[ "Taking", "channel", "information", "and", "creating", "post", "run", "channel", "activity", "plots", "." ]
train
https://github.com/wdecoster/nanoplotter/blob/80908dd1be585f450da5a66989de9de4d544ec85/nanoplotter/spatial_heatmap.py#L46-L70
occrp/cronosparser
cronos/cli.py
main
def main(database_dir, target_dir): """Generate CSV files from a CronosPro/CronosPlus database.""" if not os.path.isdir(database_dir): raise click.ClickException("Database directory does not exist!") try: os.makedirs(target_dir) except: pass try: parse(database_dir, target_dir) except CronosException as ex: raise click.ClickException(ex.message)
python
def main(database_dir, target_dir): """Generate CSV files from a CronosPro/CronosPlus database.""" if not os.path.isdir(database_dir): raise click.ClickException("Database directory does not exist!") try: os.makedirs(target_dir) except: pass try: parse(database_dir, target_dir) except CronosException as ex: raise click.ClickException(ex.message)
[ "def", "main", "(", "database_dir", ",", "target_dir", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "database_dir", ")", ":", "raise", "click", ".", "ClickException", "(", "\"Database directory does not exist!\"", ")", "try", ":", "os", ".", "makedirs", "(", "target_dir", ")", "except", ":", "pass", "try", ":", "parse", "(", "database_dir", ",", "target_dir", ")", "except", "CronosException", "as", "ex", ":", "raise", "click", ".", "ClickException", "(", "ex", ".", "message", ")" ]
Generate CSV files from a CronosPro/CronosPlus database.
[ "Generate", "CSV", "files", "from", "a", "CronosPro", "/", "CronosPlus", "database", "." ]
train
https://github.com/occrp/cronosparser/blob/e7748a1a98992b2cc2191f4ce3b1621db1365c3f/cronos/cli.py#L11-L22
wdecoster/nanoplotter
nanoplotter/timeplots.py
check_valid_time_and_sort
def check_valid_time_and_sort(df, timescol, days=5, warning=True): """Check if the data contains reads created within the same `days` timeframe. if not, print warning and only return part of the data which is within `days` days Resetting the index twice to get also an "index" column for plotting the cum_yield_reads plot """ timediff = (df[timescol].max() - df[timescol].min()).days if timediff < days: return df.sort_values(timescol).reset_index(drop=True).reset_index() else: if warning: sys.stderr.write( "\nWarning: data generated is from more than {} days.\n".format(str(days))) sys.stderr.write("Likely this indicates you are combining multiple runs.\n") sys.stderr.write( "Plots based on time are invalid and therefore truncated to first {} days.\n\n" .format(str(days))) logging.warning("Time plots truncated to first {} days: invalid timespan: {} days" .format(str(days), str(timediff))) return df[df[timescol] < timedelta(days=days)] \ .sort_values(timescol) \ .reset_index(drop=True) \ .reset_index()
python
def check_valid_time_and_sort(df, timescol, days=5, warning=True): """Check if the data contains reads created within the same `days` timeframe. if not, print warning and only return part of the data which is within `days` days Resetting the index twice to get also an "index" column for plotting the cum_yield_reads plot """ timediff = (df[timescol].max() - df[timescol].min()).days if timediff < days: return df.sort_values(timescol).reset_index(drop=True).reset_index() else: if warning: sys.stderr.write( "\nWarning: data generated is from more than {} days.\n".format(str(days))) sys.stderr.write("Likely this indicates you are combining multiple runs.\n") sys.stderr.write( "Plots based on time are invalid and therefore truncated to first {} days.\n\n" .format(str(days))) logging.warning("Time plots truncated to first {} days: invalid timespan: {} days" .format(str(days), str(timediff))) return df[df[timescol] < timedelta(days=days)] \ .sort_values(timescol) \ .reset_index(drop=True) \ .reset_index()
[ "def", "check_valid_time_and_sort", "(", "df", ",", "timescol", ",", "days", "=", "5", ",", "warning", "=", "True", ")", ":", "timediff", "=", "(", "df", "[", "timescol", "]", ".", "max", "(", ")", "-", "df", "[", "timescol", "]", ".", "min", "(", ")", ")", ".", "days", "if", "timediff", "<", "days", ":", "return", "df", ".", "sort_values", "(", "timescol", ")", ".", "reset_index", "(", "drop", "=", "True", ")", ".", "reset_index", "(", ")", "else", ":", "if", "warning", ":", "sys", ".", "stderr", ".", "write", "(", "\"\\nWarning: data generated is from more than {} days.\\n\"", ".", "format", "(", "str", "(", "days", ")", ")", ")", "sys", ".", "stderr", ".", "write", "(", "\"Likely this indicates you are combining multiple runs.\\n\"", ")", "sys", ".", "stderr", ".", "write", "(", "\"Plots based on time are invalid and therefore truncated to first {} days.\\n\\n\"", ".", "format", "(", "str", "(", "days", ")", ")", ")", "logging", ".", "warning", "(", "\"Time plots truncated to first {} days: invalid timespan: {} days\"", ".", "format", "(", "str", "(", "days", ")", ",", "str", "(", "timediff", ")", ")", ")", "return", "df", "[", "df", "[", "timescol", "]", "<", "timedelta", "(", "days", "=", "days", ")", "]", ".", "sort_values", "(", "timescol", ")", ".", "reset_index", "(", "drop", "=", "True", ")", ".", "reset_index", "(", ")" ]
Check if the data contains reads created within the same `days` timeframe. if not, print warning and only return part of the data which is within `days` days Resetting the index twice to get also an "index" column for plotting the cum_yield_reads plot
[ "Check", "if", "the", "data", "contains", "reads", "created", "within", "the", "same", "days", "timeframe", "." ]
train
https://github.com/wdecoster/nanoplotter/blob/80908dd1be585f450da5a66989de9de4d544ec85/nanoplotter/timeplots.py#L12-L34
wdecoster/nanoplotter
nanoplotter/timeplots.py
time_plots
def time_plots(df, path, title=None, color="#4CB391", figformat="png", log_length=False, plot_settings=None): """Making plots of time vs read length, time vs quality and cumulative yield.""" dfs = check_valid_time_and_sort(df, "start_time") logging.info("Nanoplotter: Creating timeplots using {} reads.".format(len(dfs))) cumyields = cumulative_yield(dfs=dfs.set_index("start_time"), path=path, figformat=figformat, title=title, color=color) reads_pores_over_time = plot_over_time(dfs=dfs.set_index("start_time"), path=path, figformat=figformat, title=title, color=color) violins = violin_plots_over_time(dfs=dfs, path=path, figformat=figformat, title=title, log_length=log_length, plot_settings=plot_settings) return cumyields + reads_pores_over_time + violins
python
def time_plots(df, path, title=None, color="#4CB391", figformat="png", log_length=False, plot_settings=None): """Making plots of time vs read length, time vs quality and cumulative yield.""" dfs = check_valid_time_and_sort(df, "start_time") logging.info("Nanoplotter: Creating timeplots using {} reads.".format(len(dfs))) cumyields = cumulative_yield(dfs=dfs.set_index("start_time"), path=path, figformat=figformat, title=title, color=color) reads_pores_over_time = plot_over_time(dfs=dfs.set_index("start_time"), path=path, figformat=figformat, title=title, color=color) violins = violin_plots_over_time(dfs=dfs, path=path, figformat=figformat, title=title, log_length=log_length, plot_settings=plot_settings) return cumyields + reads_pores_over_time + violins
[ "def", "time_plots", "(", "df", ",", "path", ",", "title", "=", "None", ",", "color", "=", "\"#4CB391\"", ",", "figformat", "=", "\"png\"", ",", "log_length", "=", "False", ",", "plot_settings", "=", "None", ")", ":", "dfs", "=", "check_valid_time_and_sort", "(", "df", ",", "\"start_time\"", ")", "logging", ".", "info", "(", "\"Nanoplotter: Creating timeplots using {} reads.\"", ".", "format", "(", "len", "(", "dfs", ")", ")", ")", "cumyields", "=", "cumulative_yield", "(", "dfs", "=", "dfs", ".", "set_index", "(", "\"start_time\"", ")", ",", "path", "=", "path", ",", "figformat", "=", "figformat", ",", "title", "=", "title", ",", "color", "=", "color", ")", "reads_pores_over_time", "=", "plot_over_time", "(", "dfs", "=", "dfs", ".", "set_index", "(", "\"start_time\"", ")", ",", "path", "=", "path", ",", "figformat", "=", "figformat", ",", "title", "=", "title", ",", "color", "=", "color", ")", "violins", "=", "violin_plots_over_time", "(", "dfs", "=", "dfs", ",", "path", "=", "path", ",", "figformat", "=", "figformat", ",", "title", "=", "title", ",", "log_length", "=", "log_length", ",", "plot_settings", "=", "plot_settings", ")", "return", "cumyields", "+", "reads_pores_over_time", "+", "violins" ]
Making plots of time vs read length, time vs quality and cumulative yield.
[ "Making", "plots", "of", "time", "vs", "read", "length", "time", "vs", "quality", "and", "cumulative", "yield", "." ]
train
https://github.com/wdecoster/nanoplotter/blob/80908dd1be585f450da5a66989de9de4d544ec85/nanoplotter/timeplots.py#L37-L58
wdecoster/nanoplotter
nanoplotter/compplots.py
violin_or_box_plot
def violin_or_box_plot(df, y, figformat, path, y_name, title=None, plot="violin", log=False, palette=None): """Create a violin or boxplot from the received DataFrame. The x-axis should be divided based on the 'dataset' column, the y-axis is specified in the arguments """ comp = Plot(path=path + "NanoComp_" + y.replace(' ', '_') + '.' + figformat, title="Comparing {}".format(y)) if y == "quals": comp.title = "Comparing base call quality scores" if plot == 'violin': logging.info("Nanoplotter: Creating violin plot for {}.".format(y)) process_violin_and_box(ax=sns.violinplot(x="dataset", y=y, data=df, inner=None, cut=0, palette=palette, linewidth=0), log=log, plot_obj=comp, title=title, y_name=y_name, figformat=figformat, ymax=np.amax(df[y])) elif plot == 'box': logging.info("Nanoplotter: Creating box plot for {}.".format(y)) process_violin_and_box(ax=sns.boxplot(x="dataset", y=y, data=df, palette=palette), log=log, plot_obj=comp, title=title, y_name=y_name, figformat=figformat, ymax=np.amax(df[y])) elif plot == 'ridge': logging.info("Nanoplotter: Creating ridges plot for {}.".format(y)) comp.fig, axes = joypy.joyplot(df, by="dataset", column=y, title=title or comp.title, x_range=[-0.05, np.amax(df[y])]) if log: xticks = [float(i.get_text()) for i in axes[-1].get_xticklabels()] axes[-1].set_xticklabels([10**i for i in xticks]) axes[-1].set_xticklabels(axes[-1].get_xticklabels(), rotation=30, ha='center') comp.save(format=figformat) else: logging.error("Unknown comp plot type {}".format(plot)) sys.exit("Unknown comp plot type {}".format(plot)) plt.close("all") return [comp]
python
def violin_or_box_plot(df, y, figformat, path, y_name, title=None, plot="violin", log=False, palette=None): """Create a violin or boxplot from the received DataFrame. The x-axis should be divided based on the 'dataset' column, the y-axis is specified in the arguments """ comp = Plot(path=path + "NanoComp_" + y.replace(' ', '_') + '.' + figformat, title="Comparing {}".format(y)) if y == "quals": comp.title = "Comparing base call quality scores" if plot == 'violin': logging.info("Nanoplotter: Creating violin plot for {}.".format(y)) process_violin_and_box(ax=sns.violinplot(x="dataset", y=y, data=df, inner=None, cut=0, palette=palette, linewidth=0), log=log, plot_obj=comp, title=title, y_name=y_name, figformat=figformat, ymax=np.amax(df[y])) elif plot == 'box': logging.info("Nanoplotter: Creating box plot for {}.".format(y)) process_violin_and_box(ax=sns.boxplot(x="dataset", y=y, data=df, palette=palette), log=log, plot_obj=comp, title=title, y_name=y_name, figformat=figformat, ymax=np.amax(df[y])) elif plot == 'ridge': logging.info("Nanoplotter: Creating ridges plot for {}.".format(y)) comp.fig, axes = joypy.joyplot(df, by="dataset", column=y, title=title or comp.title, x_range=[-0.05, np.amax(df[y])]) if log: xticks = [float(i.get_text()) for i in axes[-1].get_xticklabels()] axes[-1].set_xticklabels([10**i for i in xticks]) axes[-1].set_xticklabels(axes[-1].get_xticklabels(), rotation=30, ha='center') comp.save(format=figformat) else: logging.error("Unknown comp plot type {}".format(plot)) sys.exit("Unknown comp plot type {}".format(plot)) plt.close("all") return [comp]
[ "def", "violin_or_box_plot", "(", "df", ",", "y", ",", "figformat", ",", "path", ",", "y_name", ",", "title", "=", "None", ",", "plot", "=", "\"violin\"", ",", "log", "=", "False", ",", "palette", "=", "None", ")", ":", "comp", "=", "Plot", "(", "path", "=", "path", "+", "\"NanoComp_\"", "+", "y", ".", "replace", "(", "' '", ",", "'_'", ")", "+", "'.'", "+", "figformat", ",", "title", "=", "\"Comparing {}\"", ".", "format", "(", "y", ")", ")", "if", "y", "==", "\"quals\"", ":", "comp", ".", "title", "=", "\"Comparing base call quality scores\"", "if", "plot", "==", "'violin'", ":", "logging", ".", "info", "(", "\"Nanoplotter: Creating violin plot for {}.\"", ".", "format", "(", "y", ")", ")", "process_violin_and_box", "(", "ax", "=", "sns", ".", "violinplot", "(", "x", "=", "\"dataset\"", ",", "y", "=", "y", ",", "data", "=", "df", ",", "inner", "=", "None", ",", "cut", "=", "0", ",", "palette", "=", "palette", ",", "linewidth", "=", "0", ")", ",", "log", "=", "log", ",", "plot_obj", "=", "comp", ",", "title", "=", "title", ",", "y_name", "=", "y_name", ",", "figformat", "=", "figformat", ",", "ymax", "=", "np", ".", "amax", "(", "df", "[", "y", "]", ")", ")", "elif", "plot", "==", "'box'", ":", "logging", ".", "info", "(", "\"Nanoplotter: Creating box plot for {}.\"", ".", "format", "(", "y", ")", ")", "process_violin_and_box", "(", "ax", "=", "sns", ".", "boxplot", "(", "x", "=", "\"dataset\"", ",", "y", "=", "y", ",", "data", "=", "df", ",", "palette", "=", "palette", ")", ",", "log", "=", "log", ",", "plot_obj", "=", "comp", ",", "title", "=", "title", ",", "y_name", "=", "y_name", ",", "figformat", "=", "figformat", ",", "ymax", "=", "np", ".", "amax", "(", "df", "[", "y", "]", ")", ")", "elif", "plot", "==", "'ridge'", ":", "logging", ".", "info", "(", "\"Nanoplotter: Creating ridges plot for {}.\"", ".", "format", "(", "y", ")", ")", "comp", ".", "fig", ",", "axes", "=", "joypy", ".", "joyplot", "(", "df", ",", "by", "=", "\"dataset\"", ",", "column", "=", "y", ",", "title", "=", "title", "or", "comp", ".", "title", ",", "x_range", "=", "[", "-", "0.05", ",", "np", ".", "amax", "(", "df", "[", "y", "]", ")", "]", ")", "if", "log", ":", "xticks", "=", "[", "float", "(", "i", ".", "get_text", "(", ")", ")", "for", "i", "in", "axes", "[", "-", "1", "]", ".", "get_xticklabels", "(", ")", "]", "axes", "[", "-", "1", "]", ".", "set_xticklabels", "(", "[", "10", "**", "i", "for", "i", "in", "xticks", "]", ")", "axes", "[", "-", "1", "]", ".", "set_xticklabels", "(", "axes", "[", "-", "1", "]", ".", "get_xticklabels", "(", ")", ",", "rotation", "=", "30", ",", "ha", "=", "'center'", ")", "comp", ".", "save", "(", "format", "=", "figformat", ")", "else", ":", "logging", ".", "error", "(", "\"Unknown comp plot type {}\"", ".", "format", "(", "plot", ")", ")", "sys", ".", "exit", "(", "\"Unknown comp plot type {}\"", ".", "format", "(", "plot", ")", ")", "plt", ".", "close", "(", "\"all\"", ")", "return", "[", "comp", "]" ]
Create a violin or boxplot from the received DataFrame. The x-axis should be divided based on the 'dataset' column, the y-axis is specified in the arguments
[ "Create", "a", "violin", "or", "boxplot", "from", "the", "received", "DataFrame", "." ]
train
https://github.com/wdecoster/nanoplotter/blob/80908dd1be585f450da5a66989de9de4d544ec85/nanoplotter/compplots.py#L13-L68
wdecoster/nanoplotter
nanoplotter/compplots.py
output_barplot
def output_barplot(df, figformat, path, title=None, palette=None): """Create barplots based on number of reads and total sum of nucleotides sequenced.""" logging.info("Nanoplotter: Creating barplots for number of reads and total throughput.") read_count = Plot(path=path + "NanoComp_number_of_reads." + figformat, title="Comparing number of reads") ax = sns.countplot(x="dataset", data=df, palette=palette) ax.set(ylabel='Number of reads', title=title or read_count.title) plt.xticks(rotation=30, ha='center') read_count.fig = ax.get_figure() read_count.save(format=figformat) plt.close("all") throughput_bases = Plot(path=path + "NanoComp_total_throughput." + figformat, title="Comparing throughput in gigabases") if "aligned_lengths" in df: throughput = df.groupby('dataset')['aligned_lengths'].sum() ylabel = 'Total gigabase aligned' else: throughput = df.groupby('dataset')['lengths'].sum() ylabel = 'Total gigabase sequenced' ax = sns.barplot(x=list(throughput.index), y=throughput / 1e9, palette=palette, order=df["dataset"].unique()) ax.set(ylabel=ylabel, title=title or throughput_bases.title) plt.xticks(rotation=30, ha='center') throughput_bases.fig = ax.get_figure() throughput_bases.save(format=figformat) plt.close("all") return read_count, throughput_bases
python
def output_barplot(df, figformat, path, title=None, palette=None): """Create barplots based on number of reads and total sum of nucleotides sequenced.""" logging.info("Nanoplotter: Creating barplots for number of reads and total throughput.") read_count = Plot(path=path + "NanoComp_number_of_reads." + figformat, title="Comparing number of reads") ax = sns.countplot(x="dataset", data=df, palette=palette) ax.set(ylabel='Number of reads', title=title or read_count.title) plt.xticks(rotation=30, ha='center') read_count.fig = ax.get_figure() read_count.save(format=figformat) plt.close("all") throughput_bases = Plot(path=path + "NanoComp_total_throughput." + figformat, title="Comparing throughput in gigabases") if "aligned_lengths" in df: throughput = df.groupby('dataset')['aligned_lengths'].sum() ylabel = 'Total gigabase aligned' else: throughput = df.groupby('dataset')['lengths'].sum() ylabel = 'Total gigabase sequenced' ax = sns.barplot(x=list(throughput.index), y=throughput / 1e9, palette=palette, order=df["dataset"].unique()) ax.set(ylabel=ylabel, title=title or throughput_bases.title) plt.xticks(rotation=30, ha='center') throughput_bases.fig = ax.get_figure() throughput_bases.save(format=figformat) plt.close("all") return read_count, throughput_bases
[ "def", "output_barplot", "(", "df", ",", "figformat", ",", "path", ",", "title", "=", "None", ",", "palette", "=", "None", ")", ":", "logging", ".", "info", "(", "\"Nanoplotter: Creating barplots for number of reads and total throughput.\"", ")", "read_count", "=", "Plot", "(", "path", "=", "path", "+", "\"NanoComp_number_of_reads.\"", "+", "figformat", ",", "title", "=", "\"Comparing number of reads\"", ")", "ax", "=", "sns", ".", "countplot", "(", "x", "=", "\"dataset\"", ",", "data", "=", "df", ",", "palette", "=", "palette", ")", "ax", ".", "set", "(", "ylabel", "=", "'Number of reads'", ",", "title", "=", "title", "or", "read_count", ".", "title", ")", "plt", ".", "xticks", "(", "rotation", "=", "30", ",", "ha", "=", "'center'", ")", "read_count", ".", "fig", "=", "ax", ".", "get_figure", "(", ")", "read_count", ".", "save", "(", "format", "=", "figformat", ")", "plt", ".", "close", "(", "\"all\"", ")", "throughput_bases", "=", "Plot", "(", "path", "=", "path", "+", "\"NanoComp_total_throughput.\"", "+", "figformat", ",", "title", "=", "\"Comparing throughput in gigabases\"", ")", "if", "\"aligned_lengths\"", "in", "df", ":", "throughput", "=", "df", ".", "groupby", "(", "'dataset'", ")", "[", "'aligned_lengths'", "]", ".", "sum", "(", ")", "ylabel", "=", "'Total gigabase aligned'", "else", ":", "throughput", "=", "df", ".", "groupby", "(", "'dataset'", ")", "[", "'lengths'", "]", ".", "sum", "(", ")", "ylabel", "=", "'Total gigabase sequenced'", "ax", "=", "sns", ".", "barplot", "(", "x", "=", "list", "(", "throughput", ".", "index", ")", ",", "y", "=", "throughput", "/", "1e9", ",", "palette", "=", "palette", ",", "order", "=", "df", "[", "\"dataset\"", "]", ".", "unique", "(", ")", ")", "ax", ".", "set", "(", "ylabel", "=", "ylabel", ",", "title", "=", "title", "or", "throughput_bases", ".", "title", ")", "plt", ".", "xticks", "(", "rotation", "=", "30", ",", "ha", "=", "'center'", ")", "throughput_bases", ".", "fig", "=", "ax", ".", "get_figure", "(", ")", "throughput_bases", ".", "save", "(", "format", "=", "figformat", ")", "plt", ".", "close", "(", "\"all\"", ")", "return", "read_count", ",", "throughput_bases" ]
Create barplots based on number of reads and total sum of nucleotides sequenced.
[ "Create", "barplots", "based", "on", "number", "of", "reads", "and", "total", "sum", "of", "nucleotides", "sequenced", "." ]
train
https://github.com/wdecoster/nanoplotter/blob/80908dd1be585f450da5a66989de9de4d544ec85/nanoplotter/compplots.py#L83-L116
wdecoster/nanoplotter
nanoplotter/compplots.py
overlay_histogram
def overlay_histogram(df, path, palette=None): """ Use plotly to create an overlay of length histograms Return html code, but also save as png Only has 10 colors, which get recycled up to 5 times. """ if palette is None: palette = plotly.colors.DEFAULT_PLOTLY_COLORS * 5 hist = Plot(path=path + "NanoComp_OverlayHistogram.html", title="Histogram of read lengths") hist.html, hist.fig = plot_overlay_histogram(df, palette, title=hist.title) hist.save() hist_norm = Plot(path=path + "NanoComp_OverlayHistogram_Normalized.html", title="Normalized histogram of read lengths") hist_norm.html, hist_norm.fig = plot_overlay_histogram( df, palette, title=hist_norm.title, histnorm="probability") hist_norm.save() log_hist = Plot(path=path + "NanoComp_OverlayLogHistogram.html", title="Histogram of log transformed read lengths") log_hist.html, log_hist.fig = plot_log_histogram(df, palette, title=log_hist.title) log_hist.save() log_hist_norm = Plot(path=path + "NanoComp_OverlayLogHistogram_Normalized.html", title="Normalized histogram of log transformed read lengths") log_hist_norm.html, log_hist_norm.fig = plot_log_histogram( df, palette, title=log_hist_norm.title, histnorm="probability") log_hist_norm.save() return [hist, hist_norm, log_hist, log_hist_norm]
python
def overlay_histogram(df, path, palette=None): """ Use plotly to create an overlay of length histograms Return html code, but also save as png Only has 10 colors, which get recycled up to 5 times. """ if palette is None: palette = plotly.colors.DEFAULT_PLOTLY_COLORS * 5 hist = Plot(path=path + "NanoComp_OverlayHistogram.html", title="Histogram of read lengths") hist.html, hist.fig = plot_overlay_histogram(df, palette, title=hist.title) hist.save() hist_norm = Plot(path=path + "NanoComp_OverlayHistogram_Normalized.html", title="Normalized histogram of read lengths") hist_norm.html, hist_norm.fig = plot_overlay_histogram( df, palette, title=hist_norm.title, histnorm="probability") hist_norm.save() log_hist = Plot(path=path + "NanoComp_OverlayLogHistogram.html", title="Histogram of log transformed read lengths") log_hist.html, log_hist.fig = plot_log_histogram(df, palette, title=log_hist.title) log_hist.save() log_hist_norm = Plot(path=path + "NanoComp_OverlayLogHistogram_Normalized.html", title="Normalized histogram of log transformed read lengths") log_hist_norm.html, log_hist_norm.fig = plot_log_histogram( df, palette, title=log_hist_norm.title, histnorm="probability") log_hist_norm.save() return [hist, hist_norm, log_hist, log_hist_norm]
[ "def", "overlay_histogram", "(", "df", ",", "path", ",", "palette", "=", "None", ")", ":", "if", "palette", "is", "None", ":", "palette", "=", "plotly", ".", "colors", ".", "DEFAULT_PLOTLY_COLORS", "*", "5", "hist", "=", "Plot", "(", "path", "=", "path", "+", "\"NanoComp_OverlayHistogram.html\"", ",", "title", "=", "\"Histogram of read lengths\"", ")", "hist", ".", "html", ",", "hist", ".", "fig", "=", "plot_overlay_histogram", "(", "df", ",", "palette", ",", "title", "=", "hist", ".", "title", ")", "hist", ".", "save", "(", ")", "hist_norm", "=", "Plot", "(", "path", "=", "path", "+", "\"NanoComp_OverlayHistogram_Normalized.html\"", ",", "title", "=", "\"Normalized histogram of read lengths\"", ")", "hist_norm", ".", "html", ",", "hist_norm", ".", "fig", "=", "plot_overlay_histogram", "(", "df", ",", "palette", ",", "title", "=", "hist_norm", ".", "title", ",", "histnorm", "=", "\"probability\"", ")", "hist_norm", ".", "save", "(", ")", "log_hist", "=", "Plot", "(", "path", "=", "path", "+", "\"NanoComp_OverlayLogHistogram.html\"", ",", "title", "=", "\"Histogram of log transformed read lengths\"", ")", "log_hist", ".", "html", ",", "log_hist", ".", "fig", "=", "plot_log_histogram", "(", "df", ",", "palette", ",", "title", "=", "log_hist", ".", "title", ")", "log_hist", ".", "save", "(", ")", "log_hist_norm", "=", "Plot", "(", "path", "=", "path", "+", "\"NanoComp_OverlayLogHistogram_Normalized.html\"", ",", "title", "=", "\"Normalized histogram of log transformed read lengths\"", ")", "log_hist_norm", ".", "html", ",", "log_hist_norm", ".", "fig", "=", "plot_log_histogram", "(", "df", ",", "palette", ",", "title", "=", "log_hist_norm", ".", "title", ",", "histnorm", "=", "\"probability\"", ")", "log_hist_norm", ".", "save", "(", ")", "return", "[", "hist", ",", "hist_norm", ",", "log_hist", ",", "log_hist_norm", "]" ]
Use plotly to create an overlay of length histograms Return html code, but also save as png Only has 10 colors, which get recycled up to 5 times.
[ "Use", "plotly", "to", "create", "an", "overlay", "of", "length", "histograms", "Return", "html", "code", "but", "also", "save", "as", "png" ]
train
https://github.com/wdecoster/nanoplotter/blob/80908dd1be585f450da5a66989de9de4d544ec85/nanoplotter/compplots.py#L178-L210
wdecoster/nanoplotter
nanoplotter/compplots.py
plot_log_histogram
def plot_log_histogram(df, palette, title, histnorm=""): """ Plot overlaying histograms with log transformation of length Return both html and fig for png """ data = [go.Histogram(x=np.log10(df.loc[df["dataset"] == d, "lengths"]), opacity=0.4, name=d, histnorm=histnorm, marker=dict(color=c)) for d, c in zip(df["dataset"].unique(), palette)] xtickvals = [10**i for i in range(10) if not 10**i > 10 * np.amax(df["lengths"])] html = plotly.offline.plot( {"data": data, "layout": go.Layout(barmode='overlay', title=title, xaxis=dict(tickvals=np.log10(xtickvals), ticktext=xtickvals))}, output_type="div", show_link=False) fig = go.Figure( {"data": data, "layout": go.Layout(barmode='overlay', title=title, xaxis=dict(tickvals=np.log10(xtickvals), ticktext=xtickvals))}) return html, fig
python
def plot_log_histogram(df, palette, title, histnorm=""): """ Plot overlaying histograms with log transformation of length Return both html and fig for png """ data = [go.Histogram(x=np.log10(df.loc[df["dataset"] == d, "lengths"]), opacity=0.4, name=d, histnorm=histnorm, marker=dict(color=c)) for d, c in zip(df["dataset"].unique(), palette)] xtickvals = [10**i for i in range(10) if not 10**i > 10 * np.amax(df["lengths"])] html = plotly.offline.plot( {"data": data, "layout": go.Layout(barmode='overlay', title=title, xaxis=dict(tickvals=np.log10(xtickvals), ticktext=xtickvals))}, output_type="div", show_link=False) fig = go.Figure( {"data": data, "layout": go.Layout(barmode='overlay', title=title, xaxis=dict(tickvals=np.log10(xtickvals), ticktext=xtickvals))}) return html, fig
[ "def", "plot_log_histogram", "(", "df", ",", "palette", ",", "title", ",", "histnorm", "=", "\"\"", ")", ":", "data", "=", "[", "go", ".", "Histogram", "(", "x", "=", "np", ".", "log10", "(", "df", ".", "loc", "[", "df", "[", "\"dataset\"", "]", "==", "d", ",", "\"lengths\"", "]", ")", ",", "opacity", "=", "0.4", ",", "name", "=", "d", ",", "histnorm", "=", "histnorm", ",", "marker", "=", "dict", "(", "color", "=", "c", ")", ")", "for", "d", ",", "c", "in", "zip", "(", "df", "[", "\"dataset\"", "]", ".", "unique", "(", ")", ",", "palette", ")", "]", "xtickvals", "=", "[", "10", "**", "i", "for", "i", "in", "range", "(", "10", ")", "if", "not", "10", "**", "i", ">", "10", "*", "np", ".", "amax", "(", "df", "[", "\"lengths\"", "]", ")", "]", "html", "=", "plotly", ".", "offline", ".", "plot", "(", "{", "\"data\"", ":", "data", ",", "\"layout\"", ":", "go", ".", "Layout", "(", "barmode", "=", "'overlay'", ",", "title", "=", "title", ",", "xaxis", "=", "dict", "(", "tickvals", "=", "np", ".", "log10", "(", "xtickvals", ")", ",", "ticktext", "=", "xtickvals", ")", ")", "}", ",", "output_type", "=", "\"div\"", ",", "show_link", "=", "False", ")", "fig", "=", "go", ".", "Figure", "(", "{", "\"data\"", ":", "data", ",", "\"layout\"", ":", "go", ".", "Layout", "(", "barmode", "=", "'overlay'", ",", "title", "=", "title", ",", "xaxis", "=", "dict", "(", "tickvals", "=", "np", ".", "log10", "(", "xtickvals", ")", ",", "ticktext", "=", "xtickvals", ")", ")", "}", ")", "return", "html", ",", "fig" ]
Plot overlaying histograms with log transformation of length Return both html and fig for png
[ "Plot", "overlaying", "histograms", "with", "log", "transformation", "of", "length", "Return", "both", "html", "and", "fig", "for", "png" ]
train
https://github.com/wdecoster/nanoplotter/blob/80908dd1be585f450da5a66989de9de4d544ec85/nanoplotter/compplots.py#L233-L259
occrp/cronosparser
cronos/parser.py
get_file
def get_file(db_folder, file_name): """Glob for the poor.""" if not os.path.isdir(db_folder): return file_name = file_name.lower().strip() for cand_name in os.listdir(db_folder): if cand_name.lower().strip() == file_name: return os.path.join(db_folder, cand_name)
python
def get_file(db_folder, file_name): """Glob for the poor.""" if not os.path.isdir(db_folder): return file_name = file_name.lower().strip() for cand_name in os.listdir(db_folder): if cand_name.lower().strip() == file_name: return os.path.join(db_folder, cand_name)
[ "def", "get_file", "(", "db_folder", ",", "file_name", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "db_folder", ")", ":", "return", "file_name", "=", "file_name", ".", "lower", "(", ")", ".", "strip", "(", ")", "for", "cand_name", "in", "os", ".", "listdir", "(", "db_folder", ")", ":", "if", "cand_name", ".", "lower", "(", ")", ".", "strip", "(", ")", "==", "file_name", ":", "return", "os", ".", "path", ".", "join", "(", "db_folder", ",", "cand_name", ")" ]
Glob for the poor.
[ "Glob", "for", "the", "poor", "." ]
train
https://github.com/occrp/cronosparser/blob/e7748a1a98992b2cc2191f4ce3b1621db1365c3f/cronos/parser.py#L270-L277
occrp/cronosparser
cronos/parser.py
parse
def parse(db_folder, out_folder): """ Parse a cronos database. Convert the database located in ``db_folder`` into CSV files in the directory ``out_folder``. """ # The database structure, containing table and column definitions as # well as other data. stru_dat = get_file(db_folder, 'CroStru.dat') # Index file for the database, which contains offsets for each record. data_tad = get_file(db_folder, 'CroBank.tad') # Actual data records, can only be decoded using CroBank.tad. data_dat = get_file(db_folder, 'CroBank.dat') if None in [stru_dat, data_tad, data_dat]: raise CronosException("Not all database files are present.") meta, tables = parse_structure(stru_dat) for table in tables: # TODO: do we want to export the "FL" table? if table['abbr'] == 'FL' and table['name'] == 'Files': continue fh = open(make_csv_file_name(meta, table, out_folder), 'w') columns = table.get('columns') writer = csv.writer(fh) writer.writerow([encode_cell(c['name']) for c in columns]) for row in parse_data(data_tad, data_dat, table.get('id'), columns): writer.writerow([encode_cell(c) for c in row]) fh.close()
python
def parse(db_folder, out_folder): """ Parse a cronos database. Convert the database located in ``db_folder`` into CSV files in the directory ``out_folder``. """ # The database structure, containing table and column definitions as # well as other data. stru_dat = get_file(db_folder, 'CroStru.dat') # Index file for the database, which contains offsets for each record. data_tad = get_file(db_folder, 'CroBank.tad') # Actual data records, can only be decoded using CroBank.tad. data_dat = get_file(db_folder, 'CroBank.dat') if None in [stru_dat, data_tad, data_dat]: raise CronosException("Not all database files are present.") meta, tables = parse_structure(stru_dat) for table in tables: # TODO: do we want to export the "FL" table? if table['abbr'] == 'FL' and table['name'] == 'Files': continue fh = open(make_csv_file_name(meta, table, out_folder), 'w') columns = table.get('columns') writer = csv.writer(fh) writer.writerow([encode_cell(c['name']) for c in columns]) for row in parse_data(data_tad, data_dat, table.get('id'), columns): writer.writerow([encode_cell(c) for c in row]) fh.close()
[ "def", "parse", "(", "db_folder", ",", "out_folder", ")", ":", "# The database structure, containing table and column definitions as", "# well as other data.", "stru_dat", "=", "get_file", "(", "db_folder", ",", "'CroStru.dat'", ")", "# Index file for the database, which contains offsets for each record.", "data_tad", "=", "get_file", "(", "db_folder", ",", "'CroBank.tad'", ")", "# Actual data records, can only be decoded using CroBank.tad.", "data_dat", "=", "get_file", "(", "db_folder", ",", "'CroBank.dat'", ")", "if", "None", "in", "[", "stru_dat", ",", "data_tad", ",", "data_dat", "]", ":", "raise", "CronosException", "(", "\"Not all database files are present.\"", ")", "meta", ",", "tables", "=", "parse_structure", "(", "stru_dat", ")", "for", "table", "in", "tables", ":", "# TODO: do we want to export the \"FL\" table?", "if", "table", "[", "'abbr'", "]", "==", "'FL'", "and", "table", "[", "'name'", "]", "==", "'Files'", ":", "continue", "fh", "=", "open", "(", "make_csv_file_name", "(", "meta", ",", "table", ",", "out_folder", ")", ",", "'w'", ")", "columns", "=", "table", ".", "get", "(", "'columns'", ")", "writer", "=", "csv", ".", "writer", "(", "fh", ")", "writer", ".", "writerow", "(", "[", "encode_cell", "(", "c", "[", "'name'", "]", ")", "for", "c", "in", "columns", "]", ")", "for", "row", "in", "parse_data", "(", "data_tad", ",", "data_dat", ",", "table", ".", "get", "(", "'id'", ")", ",", "columns", ")", ":", "writer", ".", "writerow", "(", "[", "encode_cell", "(", "c", ")", "for", "c", "in", "row", "]", ")", "fh", ".", "close", "(", ")" ]
Parse a cronos database. Convert the database located in ``db_folder`` into CSV files in the directory ``out_folder``.
[ "Parse", "a", "cronos", "database", "." ]
train
https://github.com/occrp/cronosparser/blob/e7748a1a98992b2cc2191f4ce3b1621db1365c3f/cronos/parser.py#L280-L309
wdecoster/nanoplotter
nanoplotter/plot.py
Plot.encode1
def encode1(self): """Return the base64 encoding of the figure file and insert in html image tag.""" data_uri = b64encode(open(self.path, 'rb').read()).decode('utf-8').replace('\n', '') return '<img src="data:image/png;base64,{0}">'.format(data_uri)
python
def encode1(self): """Return the base64 encoding of the figure file and insert in html image tag.""" data_uri = b64encode(open(self.path, 'rb').read()).decode('utf-8').replace('\n', '') return '<img src="data:image/png;base64,{0}">'.format(data_uri)
[ "def", "encode1", "(", "self", ")", ":", "data_uri", "=", "b64encode", "(", "open", "(", "self", ".", "path", ",", "'rb'", ")", ".", "read", "(", ")", ")", ".", "decode", "(", "'utf-8'", ")", ".", "replace", "(", "'\\n'", ",", "''", ")", "return", "'<img src=\"data:image/png;base64,{0}\">'", ".", "format", "(", "data_uri", ")" ]
Return the base64 encoding of the figure file and insert in html image tag.
[ "Return", "the", "base64", "encoding", "of", "the", "figure", "file", "and", "insert", "in", "html", "image", "tag", "." ]
train
https://github.com/wdecoster/nanoplotter/blob/80908dd1be585f450da5a66989de9de4d544ec85/nanoplotter/plot.py#L26-L29
wdecoster/nanoplotter
nanoplotter/plot.py
Plot.encode2
def encode2(self): """Return the base64 encoding of the fig attribute and insert in html image tag.""" buf = BytesIO() self.fig.savefig(buf, format='png', bbox_inches='tight', dpi=100) buf.seek(0) string = b64encode(buf.read()) return '<img src="data:image/png;base64,{0}">'.format(urlquote(string))
python
def encode2(self): """Return the base64 encoding of the fig attribute and insert in html image tag.""" buf = BytesIO() self.fig.savefig(buf, format='png', bbox_inches='tight', dpi=100) buf.seek(0) string = b64encode(buf.read()) return '<img src="data:image/png;base64,{0}">'.format(urlquote(string))
[ "def", "encode2", "(", "self", ")", ":", "buf", "=", "BytesIO", "(", ")", "self", ".", "fig", ".", "savefig", "(", "buf", ",", "format", "=", "'png'", ",", "bbox_inches", "=", "'tight'", ",", "dpi", "=", "100", ")", "buf", ".", "seek", "(", "0", ")", "string", "=", "b64encode", "(", "buf", ".", "read", "(", ")", ")", "return", "'<img src=\"data:image/png;base64,{0}\">'", ".", "format", "(", "urlquote", "(", "string", ")", ")" ]
Return the base64 encoding of the fig attribute and insert in html image tag.
[ "Return", "the", "base64", "encoding", "of", "the", "fig", "attribute", "and", "insert", "in", "html", "image", "tag", "." ]
train
https://github.com/wdecoster/nanoplotter/blob/80908dd1be585f450da5a66989de9de4d544ec85/nanoplotter/plot.py#L31-L37