id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
242,100
lsst-sqre/sqre-apikit
apikit/convenience.py
raise_from_response
def raise_from_response(resp): """Turn a failed request response into a BackendError. Handy for reflecting HTTP errors from farther back in the call chain. Parameters ---------- resp: :class:`requests.Response` Raises ------ :class:`apikit.BackendError` If `resp.status_code` is equal to or greater than 400. """ if resp.status_code < 400: # Request was successful. Or at least, not a failure. return raise BackendError(status_code=resp.status_code, reason=resp.reason, content=resp.text)
python
def raise_from_response(resp): """Turn a failed request response into a BackendError. Handy for reflecting HTTP errors from farther back in the call chain. Parameters ---------- resp: :class:`requests.Response` Raises ------ :class:`apikit.BackendError` If `resp.status_code` is equal to or greater than 400. """ if resp.status_code < 400: # Request was successful. Or at least, not a failure. return raise BackendError(status_code=resp.status_code, reason=resp.reason, content=resp.text)
[ "def", "raise_from_response", "(", "resp", ")", ":", "if", "resp", ".", "status_code", "<", "400", ":", "# Request was successful. Or at least, not a failure.", "return", "raise", "BackendError", "(", "status_code", "=", "resp", ".", "status_code", ",", "reason", "=", "resp", ".", "reason", ",", "content", "=", "resp", ".", "text", ")" ]
Turn a failed request response into a BackendError. Handy for reflecting HTTP errors from farther back in the call chain. Parameters ---------- resp: :class:`requests.Response` Raises ------ :class:`apikit.BackendError` If `resp.status_code` is equal to or greater than 400.
[ "Turn", "a", "failed", "request", "response", "into", "a", "BackendError", ".", "Handy", "for", "reflecting", "HTTP", "errors", "from", "farther", "back", "in", "the", "call", "chain", "." ]
ff505b63d2e29303ff7f05f2bd5eabd0f6d7026e
https://github.com/lsst-sqre/sqre-apikit/blob/ff505b63d2e29303ff7f05f2bd5eabd0f6d7026e/apikit/convenience.py#L262-L280
242,101
lsst-sqre/sqre-apikit
apikit/convenience.py
get_logger
def get_logger(file=None, syslog=False, loghost=None, level=None): """Creates a logging object compatible with Python standard logging, but which, as a `structlog` instance, emits JSON. Parameters ---------- file: `None` or `str` (default `None`) If given, send log output to file; otherwise, to `stdout`. syslog: `bool` (default `False`) If `True`, log to syslog. loghost: `None` or `str` (default `None`) If given, send syslog output to specified host, UDP port 514. level: `None` or `str` (default `None`) If given, and if one of (case-insensitive) `DEBUG`, `INFO`, `WARNING`, `ERROR`, or `CRITICAL`, log events of that level or higher. Defaults to `WARNING`. Returns ------- :class:`structlog.Logger` A logging object """ if not syslog: if not file: handler = logging.StreamHandler(sys.stdout) else: handler = logging.FileHandler(file) else: if loghost: handler = logging.handlers.SysLogHandler(loghost, 514) else: handler = logging.handlers.SysLogHandler() root_logger = logging.getLogger() if level: level = level.upper() lldict = { 'DEBUG': logging.DEBUG, 'INFO': logging.INFO, 'WARNING': logging.WARNING, 'ERROR': logging.ERROR, 'CRITICAL': logging.CRITICAL } if level in lldict: root_logger.setLevel(lldict[level]) root_logger.addHandler(handler) structlog.configure( processors=[ structlog.stdlib.filter_by_level, structlog.stdlib.add_logger_name, structlog.stdlib.add_log_level, structlog.stdlib.PositionalArgumentsFormatter(), structlog.processors.TimeStamper(fmt="iso"), structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, structlog.processors.JSONRenderer() ], context_class=structlog.threadlocal.wrap_dict(dict), logger_factory=structlog.stdlib.LoggerFactory(), wrapper_class=structlog.stdlib.BoundLogger, cache_logger_on_first_use=True, ) log = structlog.get_logger() return log
python
def get_logger(file=None, syslog=False, loghost=None, level=None): """Creates a logging object compatible with Python standard logging, but which, as a `structlog` instance, emits JSON. Parameters ---------- file: `None` or `str` (default `None`) If given, send log output to file; otherwise, to `stdout`. syslog: `bool` (default `False`) If `True`, log to syslog. loghost: `None` or `str` (default `None`) If given, send syslog output to specified host, UDP port 514. level: `None` or `str` (default `None`) If given, and if one of (case-insensitive) `DEBUG`, `INFO`, `WARNING`, `ERROR`, or `CRITICAL`, log events of that level or higher. Defaults to `WARNING`. Returns ------- :class:`structlog.Logger` A logging object """ if not syslog: if not file: handler = logging.StreamHandler(sys.stdout) else: handler = logging.FileHandler(file) else: if loghost: handler = logging.handlers.SysLogHandler(loghost, 514) else: handler = logging.handlers.SysLogHandler() root_logger = logging.getLogger() if level: level = level.upper() lldict = { 'DEBUG': logging.DEBUG, 'INFO': logging.INFO, 'WARNING': logging.WARNING, 'ERROR': logging.ERROR, 'CRITICAL': logging.CRITICAL } if level in lldict: root_logger.setLevel(lldict[level]) root_logger.addHandler(handler) structlog.configure( processors=[ structlog.stdlib.filter_by_level, structlog.stdlib.add_logger_name, structlog.stdlib.add_log_level, structlog.stdlib.PositionalArgumentsFormatter(), structlog.processors.TimeStamper(fmt="iso"), structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, structlog.processors.JSONRenderer() ], context_class=structlog.threadlocal.wrap_dict(dict), logger_factory=structlog.stdlib.LoggerFactory(), wrapper_class=structlog.stdlib.BoundLogger, cache_logger_on_first_use=True, ) log = structlog.get_logger() return log
[ "def", "get_logger", "(", "file", "=", "None", ",", "syslog", "=", "False", ",", "loghost", "=", "None", ",", "level", "=", "None", ")", ":", "if", "not", "syslog", ":", "if", "not", "file", ":", "handler", "=", "logging", ".", "StreamHandler", "(", "sys", ".", "stdout", ")", "else", ":", "handler", "=", "logging", ".", "FileHandler", "(", "file", ")", "else", ":", "if", "loghost", ":", "handler", "=", "logging", ".", "handlers", ".", "SysLogHandler", "(", "loghost", ",", "514", ")", "else", ":", "handler", "=", "logging", ".", "handlers", ".", "SysLogHandler", "(", ")", "root_logger", "=", "logging", ".", "getLogger", "(", ")", "if", "level", ":", "level", "=", "level", ".", "upper", "(", ")", "lldict", "=", "{", "'DEBUG'", ":", "logging", ".", "DEBUG", ",", "'INFO'", ":", "logging", ".", "INFO", ",", "'WARNING'", ":", "logging", ".", "WARNING", ",", "'ERROR'", ":", "logging", ".", "ERROR", ",", "'CRITICAL'", ":", "logging", ".", "CRITICAL", "}", "if", "level", "in", "lldict", ":", "root_logger", ".", "setLevel", "(", "lldict", "[", "level", "]", ")", "root_logger", ".", "addHandler", "(", "handler", ")", "structlog", ".", "configure", "(", "processors", "=", "[", "structlog", ".", "stdlib", ".", "filter_by_level", ",", "structlog", ".", "stdlib", ".", "add_logger_name", ",", "structlog", ".", "stdlib", ".", "add_log_level", ",", "structlog", ".", "stdlib", ".", "PositionalArgumentsFormatter", "(", ")", ",", "structlog", ".", "processors", ".", "TimeStamper", "(", "fmt", "=", "\"iso\"", ")", ",", "structlog", ".", "processors", ".", "StackInfoRenderer", "(", ")", ",", "structlog", ".", "processors", ".", "format_exc_info", ",", "structlog", ".", "processors", ".", "JSONRenderer", "(", ")", "]", ",", "context_class", "=", "structlog", ".", "threadlocal", ".", "wrap_dict", "(", "dict", ")", ",", "logger_factory", "=", "structlog", ".", "stdlib", ".", "LoggerFactory", "(", ")", ",", "wrapper_class", "=", "structlog", ".", "stdlib", ".", "BoundLogger", ",", "cache_logger_on_first_use", "=", "True", ",", ")", "log", "=", "structlog", ".", "get_logger", "(", ")", "return", "log" ]
Creates a logging object compatible with Python standard logging, but which, as a `structlog` instance, emits JSON. Parameters ---------- file: `None` or `str` (default `None`) If given, send log output to file; otherwise, to `stdout`. syslog: `bool` (default `False`) If `True`, log to syslog. loghost: `None` or `str` (default `None`) If given, send syslog output to specified host, UDP port 514. level: `None` or `str` (default `None`) If given, and if one of (case-insensitive) `DEBUG`, `INFO`, `WARNING`, `ERROR`, or `CRITICAL`, log events of that level or higher. Defaults to `WARNING`. Returns ------- :class:`structlog.Logger` A logging object
[ "Creates", "a", "logging", "object", "compatible", "with", "Python", "standard", "logging", "but", "which", "as", "a", "structlog", "instance", "emits", "JSON", "." ]
ff505b63d2e29303ff7f05f2bd5eabd0f6d7026e
https://github.com/lsst-sqre/sqre-apikit/blob/ff505b63d2e29303ff7f05f2bd5eabd0f6d7026e/apikit/convenience.py#L283-L348
242,102
Deisss/python-sockjsroom
sockjsroom/httpJsonHandler.py
JsonDefaultHandler.getBody
def getBody(self): """ Extract body json """ data = None try: data = json.loads(self.request.body) except: data = json.loads(urllib.unquote_plus(self.request.body)) return data
python
def getBody(self): """ Extract body json """ data = None try: data = json.loads(self.request.body) except: data = json.loads(urllib.unquote_plus(self.request.body)) return data
[ "def", "getBody", "(", "self", ")", ":", "data", "=", "None", "try", ":", "data", "=", "json", ".", "loads", "(", "self", ".", "request", ".", "body", ")", "except", ":", "data", "=", "json", ".", "loads", "(", "urllib", ".", "unquote_plus", "(", "self", ".", "request", ".", "body", ")", ")", "return", "data" ]
Extract body json
[ "Extract", "body", "json" ]
7c20187571d39e7fede848dc98f954235ca77241
https://github.com/Deisss/python-sockjsroom/blob/7c20187571d39e7fede848dc98f954235ca77241/sockjsroom/httpJsonHandler.py#L25-L32
242,103
Deisss/python-sockjsroom
sockjsroom/httpJsonHandler.py
JsonDefaultHandler.write
def write(self, obj): """ Print object on output """ accept = self.request.headers.get("Accept") if "json" in accept: if JsonDefaultHandler.__parser is None: JsonDefaultHandler.__parser = Parser() super(JsonDefaultHandler, self).write(JsonDefaultHandler.__parser.encode(obj)) return # If we are not in json mode super(JsonDefaultHandler, self).write(obj)
python
def write(self, obj): """ Print object on output """ accept = self.request.headers.get("Accept") if "json" in accept: if JsonDefaultHandler.__parser is None: JsonDefaultHandler.__parser = Parser() super(JsonDefaultHandler, self).write(JsonDefaultHandler.__parser.encode(obj)) return # If we are not in json mode super(JsonDefaultHandler, self).write(obj)
[ "def", "write", "(", "self", ",", "obj", ")", ":", "accept", "=", "self", ".", "request", ".", "headers", ".", "get", "(", "\"Accept\"", ")", "if", "\"json\"", "in", "accept", ":", "if", "JsonDefaultHandler", ".", "__parser", "is", "None", ":", "JsonDefaultHandler", ".", "__parser", "=", "Parser", "(", ")", "super", "(", "JsonDefaultHandler", ",", "self", ")", ".", "write", "(", "JsonDefaultHandler", ".", "__parser", ".", "encode", "(", "obj", ")", ")", "return", "# If we are not in json mode", "super", "(", "JsonDefaultHandler", ",", "self", ")", ".", "write", "(", "obj", ")" ]
Print object on output
[ "Print", "object", "on", "output" ]
7c20187571d39e7fede848dc98f954235ca77241
https://github.com/Deisss/python-sockjsroom/blob/7c20187571d39e7fede848dc98f954235ca77241/sockjsroom/httpJsonHandler.py#L34-L43
242,104
funkybob/antfarm
antfarm/request.py
Request.raw_cookies
def raw_cookies(self): '''Raw access to cookies''' cookie_data = self.environ.get('HTTP_COOKIE', '') cookies = SimpleCookie() if not cookie_data: return cookies cookies.load(cookie_data) return cookies
python
def raw_cookies(self): '''Raw access to cookies''' cookie_data = self.environ.get('HTTP_COOKIE', '') cookies = SimpleCookie() if not cookie_data: return cookies cookies.load(cookie_data) return cookies
[ "def", "raw_cookies", "(", "self", ")", ":", "cookie_data", "=", "self", ".", "environ", ".", "get", "(", "'HTTP_COOKIE'", ",", "''", ")", "cookies", "=", "SimpleCookie", "(", ")", "if", "not", "cookie_data", ":", "return", "cookies", "cookies", ".", "load", "(", "cookie_data", ")", "return", "cookies" ]
Raw access to cookies
[ "Raw", "access", "to", "cookies" ]
40a7cc450eba09a280b7bc8f7c68a807b0177c62
https://github.com/funkybob/antfarm/blob/40a7cc450eba09a280b7bc8f7c68a807b0177c62/antfarm/request.py#L22-L29
242,105
funkybob/antfarm
antfarm/request.py
Request.cookies
def cookies(self): '''Simplified Cookie access''' return { key: self.raw_cookies[key].value for key in self.raw_cookies.keys() }
python
def cookies(self): '''Simplified Cookie access''' return { key: self.raw_cookies[key].value for key in self.raw_cookies.keys() }
[ "def", "cookies", "(", "self", ")", ":", "return", "{", "key", ":", "self", ".", "raw_cookies", "[", "key", "]", ".", "value", "for", "key", "in", "self", ".", "raw_cookies", ".", "keys", "(", ")", "}" ]
Simplified Cookie access
[ "Simplified", "Cookie", "access" ]
40a7cc450eba09a280b7bc8f7c68a807b0177c62
https://github.com/funkybob/antfarm/blob/40a7cc450eba09a280b7bc8f7c68a807b0177c62/antfarm/request.py#L32-L37
242,106
honzamach/pynspect
pynspect/benchmark/bench_jpath.py
random_jpath
def random_jpath(depth = 3): """ Generate random JPath with given node depth. """ chunks = [] while depth > 0: length = random.randint(5, 15) ident = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase) for _ in range(length)) if random.choice((True, False)): index = random.randint(0, 10) ident = "{:s}[{:d}]".format(ident, index) chunks.append(ident) depth -= 1 return ".".join(chunks)
python
def random_jpath(depth = 3): """ Generate random JPath with given node depth. """ chunks = [] while depth > 0: length = random.randint(5, 15) ident = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase) for _ in range(length)) if random.choice((True, False)): index = random.randint(0, 10) ident = "{:s}[{:d}]".format(ident, index) chunks.append(ident) depth -= 1 return ".".join(chunks)
[ "def", "random_jpath", "(", "depth", "=", "3", ")", ":", "chunks", "=", "[", "]", "while", "depth", ">", "0", ":", "length", "=", "random", ".", "randint", "(", "5", ",", "15", ")", "ident", "=", "''", ".", "join", "(", "random", ".", "choice", "(", "string", ".", "ascii_uppercase", "+", "string", ".", "ascii_lowercase", ")", "for", "_", "in", "range", "(", "length", ")", ")", "if", "random", ".", "choice", "(", "(", "True", ",", "False", ")", ")", ":", "index", "=", "random", ".", "randint", "(", "0", ",", "10", ")", "ident", "=", "\"{:s}[{:d}]\"", ".", "format", "(", "ident", ",", "index", ")", "chunks", ".", "append", "(", "ident", ")", "depth", "-=", "1", "return", "\".\"", ".", "join", "(", "chunks", ")" ]
Generate random JPath with given node depth.
[ "Generate", "random", "JPath", "with", "given", "node", "depth", "." ]
0582dcc1f7aafe50e25a21c792ea1b3367ea5881
https://github.com/honzamach/pynspect/blob/0582dcc1f7aafe50e25a21c792ea1b3367ea5881/pynspect/benchmark/bench_jpath.py#L36-L49
242,107
praekelt/jmbo-twitter
jmbo_twitter/admin_views.py
feed_fetch_force
def feed_fetch_force(request, id, redirect_to): """Forcibly fetch tweets for the feed""" feed = Feed.objects.get(id=id) feed.fetch(force=True) msg = _("Fetched tweets for %s" % feed.name) messages.success(request, msg, fail_silently=True) return HttpResponseRedirect(redirect_to)
python
def feed_fetch_force(request, id, redirect_to): """Forcibly fetch tweets for the feed""" feed = Feed.objects.get(id=id) feed.fetch(force=True) msg = _("Fetched tweets for %s" % feed.name) messages.success(request, msg, fail_silently=True) return HttpResponseRedirect(redirect_to)
[ "def", "feed_fetch_force", "(", "request", ",", "id", ",", "redirect_to", ")", ":", "feed", "=", "Feed", ".", "objects", ".", "get", "(", "id", "=", "id", ")", "feed", ".", "fetch", "(", "force", "=", "True", ")", "msg", "=", "_", "(", "\"Fetched tweets for %s\"", "%", "feed", ".", "name", ")", "messages", ".", "success", "(", "request", ",", "msg", ",", "fail_silently", "=", "True", ")", "return", "HttpResponseRedirect", "(", "redirect_to", ")" ]
Forcibly fetch tweets for the feed
[ "Forcibly", "fetch", "tweets", "for", "the", "feed" ]
12e8eb08efcbc9d1423fc213b2f2d9d7fed8a775
https://github.com/praekelt/jmbo-twitter/blob/12e8eb08efcbc9d1423fc213b2f2d9d7fed8a775/jmbo_twitter/admin_views.py#L12-L18
242,108
praekelt/jmbo-twitter
jmbo_twitter/admin_views.py
search_fetch_force
def search_fetch_force(request, id, redirect_to): """Forcibly fetch tweets for the search""" search = Search.objects.get(id=id) search.fetch(force=True) msg = _("Fetched tweets for %s" % search.criteria) messages.success(request, msg, fail_silently=True) return HttpResponseRedirect(redirect_to)
python
def search_fetch_force(request, id, redirect_to): """Forcibly fetch tweets for the search""" search = Search.objects.get(id=id) search.fetch(force=True) msg = _("Fetched tweets for %s" % search.criteria) messages.success(request, msg, fail_silently=True) return HttpResponseRedirect(redirect_to)
[ "def", "search_fetch_force", "(", "request", ",", "id", ",", "redirect_to", ")", ":", "search", "=", "Search", ".", "objects", ".", "get", "(", "id", "=", "id", ")", "search", ".", "fetch", "(", "force", "=", "True", ")", "msg", "=", "_", "(", "\"Fetched tweets for %s\"", "%", "search", ".", "criteria", ")", "messages", ".", "success", "(", "request", ",", "msg", ",", "fail_silently", "=", "True", ")", "return", "HttpResponseRedirect", "(", "redirect_to", ")" ]
Forcibly fetch tweets for the search
[ "Forcibly", "fetch", "tweets", "for", "the", "search" ]
12e8eb08efcbc9d1423fc213b2f2d9d7fed8a775
https://github.com/praekelt/jmbo-twitter/blob/12e8eb08efcbc9d1423fc213b2f2d9d7fed8a775/jmbo_twitter/admin_views.py#L32-L38
242,109
Rafiot/PubSubLogger
pubsublogger/publisher.py
__connect
def __connect(): """ Connect to a redis instance. """ global redis_instance if use_tcp_socket: redis_instance = redis.StrictRedis(host=hostname, port=port) else: redis_instance = redis.StrictRedis(unix_socket_path=unix_socket)
python
def __connect(): """ Connect to a redis instance. """ global redis_instance if use_tcp_socket: redis_instance = redis.StrictRedis(host=hostname, port=port) else: redis_instance = redis.StrictRedis(unix_socket_path=unix_socket)
[ "def", "__connect", "(", ")", ":", "global", "redis_instance", "if", "use_tcp_socket", ":", "redis_instance", "=", "redis", ".", "StrictRedis", "(", "host", "=", "hostname", ",", "port", "=", "port", ")", "else", ":", "redis_instance", "=", "redis", ".", "StrictRedis", "(", "unix_socket_path", "=", "unix_socket", ")" ]
Connect to a redis instance.
[ "Connect", "to", "a", "redis", "instance", "." ]
4f28ad673f42ee2ec7792d414d325aef9a56da53
https://github.com/Rafiot/PubSubLogger/blob/4f28ad673f42ee2ec7792d414d325aef9a56da53/pubsublogger/publisher.py#L35-L43
242,110
Rafiot/PubSubLogger
pubsublogger/publisher.py
log
def log(level, message): """ Publish `message` with the `level` the redis `channel`. :param level: the level of the message :param message: the message you want to log """ if redis_instance is None: __connect() if level not in __error_levels: raise InvalidErrorLevel('You have used an invalid error level. \ Please choose in: ' + ', '.join(__error_levels)) if channel is None: raise NoChannelError('Please set a channel.') c = '{channel}.{level}'.format(channel=channel, level=level) redis_instance.publish(c, message)
python
def log(level, message): """ Publish `message` with the `level` the redis `channel`. :param level: the level of the message :param message: the message you want to log """ if redis_instance is None: __connect() if level not in __error_levels: raise InvalidErrorLevel('You have used an invalid error level. \ Please choose in: ' + ', '.join(__error_levels)) if channel is None: raise NoChannelError('Please set a channel.') c = '{channel}.{level}'.format(channel=channel, level=level) redis_instance.publish(c, message)
[ "def", "log", "(", "level", ",", "message", ")", ":", "if", "redis_instance", "is", "None", ":", "__connect", "(", ")", "if", "level", "not", "in", "__error_levels", ":", "raise", "InvalidErrorLevel", "(", "'You have used an invalid error level. \\\n Please choose in: '", "+", "', '", ".", "join", "(", "__error_levels", ")", ")", "if", "channel", "is", "None", ":", "raise", "NoChannelError", "(", "'Please set a channel.'", ")", "c", "=", "'{channel}.{level}'", ".", "format", "(", "channel", "=", "channel", ",", "level", "=", "level", ")", "redis_instance", ".", "publish", "(", "c", ",", "message", ")" ]
Publish `message` with the `level` the redis `channel`. :param level: the level of the message :param message: the message you want to log
[ "Publish", "message", "with", "the", "level", "the", "redis", "channel", "." ]
4f28ad673f42ee2ec7792d414d325aef9a56da53
https://github.com/Rafiot/PubSubLogger/blob/4f28ad673f42ee2ec7792d414d325aef9a56da53/pubsublogger/publisher.py#L46-L62
242,111
JNRowe/jnrbase
jnrbase/httplib2_certs.py
find_certs
def find_certs() -> str: """Find suitable certificates for ``httplib2``. Warning: The default behaviour is to fall back to the bundled certificates when no system certificates can be found. If you're packaging ``jnrbase`` *please* set ``ALLOW_FALLBACK`` to ``False`` to disable this very much unwanted behaviour, but please maintain the option so that downstream users can inspect the configuration easily. See also: :pypi:`httplib2` Returns: Path to SSL certificates Raises: RuntimeError: When no suitable certificates are found """ bundle = path.realpath(path.dirname(httplib2.CA_CERTS)) # Some distros symlink the bundled path location to the system certs if not bundle.startswith(path.dirname(httplib2.__file__)): return bundle for platform, files in PLATFORM_FILES.items(): if sys.platform.startswith(platform): for cert_file in files: if path.exists(cert_file): return cert_file # An apparently common environment setting for macOS users to workaround # the lack of “standard” certs installation if path.exists(getenv('CURL_CA_BUNDLE', '')): return getenv('CURL_CA_BUNDLE') if ALLOW_FALLBACK: warnings.warn('No system certs detected, falling back to bundled', RuntimeWarning) return httplib2.CA_CERTS else: raise RuntimeError('No system certs detected!')
python
def find_certs() -> str: """Find suitable certificates for ``httplib2``. Warning: The default behaviour is to fall back to the bundled certificates when no system certificates can be found. If you're packaging ``jnrbase`` *please* set ``ALLOW_FALLBACK`` to ``False`` to disable this very much unwanted behaviour, but please maintain the option so that downstream users can inspect the configuration easily. See also: :pypi:`httplib2` Returns: Path to SSL certificates Raises: RuntimeError: When no suitable certificates are found """ bundle = path.realpath(path.dirname(httplib2.CA_CERTS)) # Some distros symlink the bundled path location to the system certs if not bundle.startswith(path.dirname(httplib2.__file__)): return bundle for platform, files in PLATFORM_FILES.items(): if sys.platform.startswith(platform): for cert_file in files: if path.exists(cert_file): return cert_file # An apparently common environment setting for macOS users to workaround # the lack of “standard” certs installation if path.exists(getenv('CURL_CA_BUNDLE', '')): return getenv('CURL_CA_BUNDLE') if ALLOW_FALLBACK: warnings.warn('No system certs detected, falling back to bundled', RuntimeWarning) return httplib2.CA_CERTS else: raise RuntimeError('No system certs detected!')
[ "def", "find_certs", "(", ")", "->", "str", ":", "bundle", "=", "path", ".", "realpath", "(", "path", ".", "dirname", "(", "httplib2", ".", "CA_CERTS", ")", ")", "# Some distros symlink the bundled path location to the system certs", "if", "not", "bundle", ".", "startswith", "(", "path", ".", "dirname", "(", "httplib2", ".", "__file__", ")", ")", ":", "return", "bundle", "for", "platform", ",", "files", "in", "PLATFORM_FILES", ".", "items", "(", ")", ":", "if", "sys", ".", "platform", ".", "startswith", "(", "platform", ")", ":", "for", "cert_file", "in", "files", ":", "if", "path", ".", "exists", "(", "cert_file", ")", ":", "return", "cert_file", "# An apparently common environment setting for macOS users to workaround", "# the lack of “standard” certs installation", "if", "path", ".", "exists", "(", "getenv", "(", "'CURL_CA_BUNDLE'", ",", "''", ")", ")", ":", "return", "getenv", "(", "'CURL_CA_BUNDLE'", ")", "if", "ALLOW_FALLBACK", ":", "warnings", ".", "warn", "(", "'No system certs detected, falling back to bundled'", ",", "RuntimeWarning", ")", "return", "httplib2", ".", "CA_CERTS", "else", ":", "raise", "RuntimeError", "(", "'No system certs detected!'", ")" ]
Find suitable certificates for ``httplib2``. Warning: The default behaviour is to fall back to the bundled certificates when no system certificates can be found. If you're packaging ``jnrbase`` *please* set ``ALLOW_FALLBACK`` to ``False`` to disable this very much unwanted behaviour, but please maintain the option so that downstream users can inspect the configuration easily. See also: :pypi:`httplib2` Returns: Path to SSL certificates Raises: RuntimeError: When no suitable certificates are found
[ "Find", "suitable", "certificates", "for", "httplib2", "." ]
ae505ef69a9feb739b5f4e62c5a8e6533104d3ea
https://github.com/JNRowe/jnrbase/blob/ae505ef69a9feb739b5f4e62c5a8e6533104d3ea/jnrbase/httplib2_certs.py#L41-L76
242,112
tBaxter/tango-articles
build/lib/articles/models.py
Article.get_absolute_url
def get_absolute_url(self): """ If override_url was given, use that. Otherwise, if the content belongs to a blog, use a blog url. If not, use a regular article url. """ if self.override_url: return self.override_url if self.destination.is_blog: return reverse('blog_entry_detail', args=[self.destination.slug, self.slug]) return reverse('article_detail', args=[self.slug])
python
def get_absolute_url(self): """ If override_url was given, use that. Otherwise, if the content belongs to a blog, use a blog url. If not, use a regular article url. """ if self.override_url: return self.override_url if self.destination.is_blog: return reverse('blog_entry_detail', args=[self.destination.slug, self.slug]) return reverse('article_detail', args=[self.slug])
[ "def", "get_absolute_url", "(", "self", ")", ":", "if", "self", ".", "override_url", ":", "return", "self", ".", "override_url", "if", "self", ".", "destination", ".", "is_blog", ":", "return", "reverse", "(", "'blog_entry_detail'", ",", "args", "=", "[", "self", ".", "destination", ".", "slug", ",", "self", ".", "slug", "]", ")", "return", "reverse", "(", "'article_detail'", ",", "args", "=", "[", "self", ".", "slug", "]", ")" ]
If override_url was given, use that. Otherwise, if the content belongs to a blog, use a blog url. If not, use a regular article url.
[ "If", "override_url", "was", "given", "use", "that", ".", "Otherwise", "if", "the", "content", "belongs", "to", "a", "blog", "use", "a", "blog", "url", ".", "If", "not", "use", "a", "regular", "article", "url", "." ]
93818dcca1b62042a4fc19af63474691b0fe931c
https://github.com/tBaxter/tango-articles/blob/93818dcca1b62042a4fc19af63474691b0fe931c/build/lib/articles/models.py#L183-L193
242,113
tBaxter/tango-articles
build/lib/articles/models.py
Article.save
def save(self, *args, **kwargs): """ Store summary if none was given and created formatted version of body text. """ if not self.summary: self.summary = truncatewords(self.body, 50) self.body_formatted = sanetize_text(self.body) super(Article, self).save()
python
def save(self, *args, **kwargs): """ Store summary if none was given and created formatted version of body text. """ if not self.summary: self.summary = truncatewords(self.body, 50) self.body_formatted = sanetize_text(self.body) super(Article, self).save()
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "summary", ":", "self", ".", "summary", "=", "truncatewords", "(", "self", ".", "body", ",", "50", ")", "self", ".", "body_formatted", "=", "sanetize_text", "(", "self", ".", "body", ")", "super", "(", "Article", ",", "self", ")", ".", "save", "(", ")" ]
Store summary if none was given and created formatted version of body text.
[ "Store", "summary", "if", "none", "was", "given", "and", "created", "formatted", "version", "of", "body", "text", "." ]
93818dcca1b62042a4fc19af63474691b0fe931c
https://github.com/tBaxter/tango-articles/blob/93818dcca1b62042a4fc19af63474691b0fe931c/build/lib/articles/models.py#L195-L203
242,114
bsvetchine/django-payzen
django_payzen/models.py
PaymentRequest.set_vads_payment_config
def set_vads_payment_config(self): """ vads_payment_config can be set only after object saving. A custom payment config can be set once PaymentRequest saved (adding elements to the m2m relationship). As a consequence we set vads_payment_config just before sending data elements to payzen.""" self.vads_payment_config = tools.get_vads_payment_config( self.payment_config, self.custom_payment_config.all())
python
def set_vads_payment_config(self): """ vads_payment_config can be set only after object saving. A custom payment config can be set once PaymentRequest saved (adding elements to the m2m relationship). As a consequence we set vads_payment_config just before sending data elements to payzen.""" self.vads_payment_config = tools.get_vads_payment_config( self.payment_config, self.custom_payment_config.all())
[ "def", "set_vads_payment_config", "(", "self", ")", ":", "self", ".", "vads_payment_config", "=", "tools", ".", "get_vads_payment_config", "(", "self", ".", "payment_config", ",", "self", ".", "custom_payment_config", ".", "all", "(", ")", ")" ]
vads_payment_config can be set only after object saving. A custom payment config can be set once PaymentRequest saved (adding elements to the m2m relationship). As a consequence we set vads_payment_config just before sending data elements to payzen.
[ "vads_payment_config", "can", "be", "set", "only", "after", "object", "saving", "." ]
944c3026120151495310cb1eb3c6370dc2db3db9
https://github.com/bsvetchine/django-payzen/blob/944c3026120151495310cb1eb3c6370dc2db3db9/django_payzen/models.py#L407-L416
242,115
bsvetchine/django-payzen
django_payzen/models.py
PaymentRequest.save
def save(self): """ We set up vads_trans_id and theme according to payzen format. If fields values are explicitely set by user, we do not override their values. """ if not self.vads_trans_date: self.vads_trans_date = datetime.datetime.utcnow().replace( tzinfo=utc).strftime("%Y%m%d%H%M%S") if not self.vads_trans_id: self.vads_trans_id = tools.get_vads_trans_id( self.vads_site_id, self.vads_trans_date) if self.theme and not self.vads_theme_config: self.vads_theme_config = str(self.theme) if not self.pk: super(PaymentRequest, self).save() self.set_vads_payment_config() self.set_signature() super(PaymentRequest, self).save()
python
def save(self): """ We set up vads_trans_id and theme according to payzen format. If fields values are explicitely set by user, we do not override their values. """ if not self.vads_trans_date: self.vads_trans_date = datetime.datetime.utcnow().replace( tzinfo=utc).strftime("%Y%m%d%H%M%S") if not self.vads_trans_id: self.vads_trans_id = tools.get_vads_trans_id( self.vads_site_id, self.vads_trans_date) if self.theme and not self.vads_theme_config: self.vads_theme_config = str(self.theme) if not self.pk: super(PaymentRequest, self).save() self.set_vads_payment_config() self.set_signature() super(PaymentRequest, self).save()
[ "def", "save", "(", "self", ")", ":", "if", "not", "self", ".", "vads_trans_date", ":", "self", ".", "vads_trans_date", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "replace", "(", "tzinfo", "=", "utc", ")", ".", "strftime", "(", "\"%Y%m%d%H%M%S\"", ")", "if", "not", "self", ".", "vads_trans_id", ":", "self", ".", "vads_trans_id", "=", "tools", ".", "get_vads_trans_id", "(", "self", ".", "vads_site_id", ",", "self", ".", "vads_trans_date", ")", "if", "self", ".", "theme", "and", "not", "self", ".", "vads_theme_config", ":", "self", ".", "vads_theme_config", "=", "str", "(", "self", ".", "theme", ")", "if", "not", "self", ".", "pk", ":", "super", "(", "PaymentRequest", ",", "self", ")", ".", "save", "(", ")", "self", ".", "set_vads_payment_config", "(", ")", "self", ".", "set_signature", "(", ")", "super", "(", "PaymentRequest", ",", "self", ")", ".", "save", "(", ")" ]
We set up vads_trans_id and theme according to payzen format. If fields values are explicitely set by user, we do not override their values.
[ "We", "set", "up", "vads_trans_id", "and", "theme", "according", "to", "payzen", "format", "." ]
944c3026120151495310cb1eb3c6370dc2db3db9
https://github.com/bsvetchine/django-payzen/blob/944c3026120151495310cb1eb3c6370dc2db3db9/django_payzen/models.py#L435-L454
242,116
casebeer/factual
factual/v2/responses.py
V2ReadResponse.records
def records(self): '''Return a list of dicts corresponding to the data returned by Factual.''' if self._records == None: self._records = self._get_records() return self._records
python
def records(self): '''Return a list of dicts corresponding to the data returned by Factual.''' if self._records == None: self._records = self._get_records() return self._records
[ "def", "records", "(", "self", ")", ":", "if", "self", ".", "_records", "==", "None", ":", "self", ".", "_records", "=", "self", ".", "_get_records", "(", ")", "return", "self", ".", "_records" ]
Return a list of dicts corresponding to the data returned by Factual.
[ "Return", "a", "list", "of", "dicts", "corresponding", "to", "the", "data", "returned", "by", "Factual", "." ]
f2795a8c9fd447c5d62887ae0f960481ce13be84
https://github.com/casebeer/factual/blob/f2795a8c9fd447c5d62887ae0f960481ce13be84/factual/v2/responses.py#L20-L24
242,117
Fuyukai/ConfigMaster
configmaster/ConfigGenerator.py
GenerateConfigFile
def GenerateConfigFile(load_hook, dump_hook, **kwargs) -> ConfigFile: """ Generates a ConfigFile object using the specified hooks. These hooks should be functions, and have one argument. When a hook is called, the ConfigFile object is passed to it. Use this to load your data from the fd object, or request, or whatever. This returns a ConfigFile object. """ def ConfigFileGenerator(filename, safe_load: bool=True): cfg = ConfigFile(fd=filename, load_hook=load_hook, dump_hook=dump_hook, safe_load=safe_load, **kwargs) return cfg return ConfigFileGenerator
python
def GenerateConfigFile(load_hook, dump_hook, **kwargs) -> ConfigFile: """ Generates a ConfigFile object using the specified hooks. These hooks should be functions, and have one argument. When a hook is called, the ConfigFile object is passed to it. Use this to load your data from the fd object, or request, or whatever. This returns a ConfigFile object. """ def ConfigFileGenerator(filename, safe_load: bool=True): cfg = ConfigFile(fd=filename, load_hook=load_hook, dump_hook=dump_hook, safe_load=safe_load, **kwargs) return cfg return ConfigFileGenerator
[ "def", "GenerateConfigFile", "(", "load_hook", ",", "dump_hook", ",", "*", "*", "kwargs", ")", "->", "ConfigFile", ":", "def", "ConfigFileGenerator", "(", "filename", ",", "safe_load", ":", "bool", "=", "True", ")", ":", "cfg", "=", "ConfigFile", "(", "fd", "=", "filename", ",", "load_hook", "=", "load_hook", ",", "dump_hook", "=", "dump_hook", ",", "safe_load", "=", "safe_load", ",", "*", "*", "kwargs", ")", "return", "cfg", "return", "ConfigFileGenerator" ]
Generates a ConfigFile object using the specified hooks. These hooks should be functions, and have one argument. When a hook is called, the ConfigFile object is passed to it. Use this to load your data from the fd object, or request, or whatever. This returns a ConfigFile object.
[ "Generates", "a", "ConfigFile", "object", "using", "the", "specified", "hooks", "." ]
8018aa415da55c84edaa8a49664f674758a14edd
https://github.com/Fuyukai/ConfigMaster/blob/8018aa415da55c84edaa8a49664f674758a14edd/configmaster/ConfigGenerator.py#L3-L16
242,118
Fuyukai/ConfigMaster
configmaster/ConfigGenerator.py
GenerateNetworkedConfigFile
def GenerateNetworkedConfigFile(load_hook, normal_class_load_hook, normal_class_dump_hook, **kwargs) -> NetworkedConfigObject: """ Generates a NetworkedConfigObject using the specified hooks. """ def NetworkedConfigObjectGenerator(url, safe_load: bool=True): cfg = NetworkedConfigObject(url=url, load_hook=load_hook, safe_load=safe_load, normal_class_load_hook=normal_class_load_hook, normal_class_dump_hook=normal_class_dump_hook) return cfg return NetworkedConfigObjectGenerator
python
def GenerateNetworkedConfigFile(load_hook, normal_class_load_hook, normal_class_dump_hook, **kwargs) -> NetworkedConfigObject: """ Generates a NetworkedConfigObject using the specified hooks. """ def NetworkedConfigObjectGenerator(url, safe_load: bool=True): cfg = NetworkedConfigObject(url=url, load_hook=load_hook, safe_load=safe_load, normal_class_load_hook=normal_class_load_hook, normal_class_dump_hook=normal_class_dump_hook) return cfg return NetworkedConfigObjectGenerator
[ "def", "GenerateNetworkedConfigFile", "(", "load_hook", ",", "normal_class_load_hook", ",", "normal_class_dump_hook", ",", "*", "*", "kwargs", ")", "->", "NetworkedConfigObject", ":", "def", "NetworkedConfigObjectGenerator", "(", "url", ",", "safe_load", ":", "bool", "=", "True", ")", ":", "cfg", "=", "NetworkedConfigObject", "(", "url", "=", "url", ",", "load_hook", "=", "load_hook", ",", "safe_load", "=", "safe_load", ",", "normal_class_load_hook", "=", "normal_class_load_hook", ",", "normal_class_dump_hook", "=", "normal_class_dump_hook", ")", "return", "cfg", "return", "NetworkedConfigObjectGenerator" ]
Generates a NetworkedConfigObject using the specified hooks.
[ "Generates", "a", "NetworkedConfigObject", "using", "the", "specified", "hooks", "." ]
8018aa415da55c84edaa8a49664f674758a14edd
https://github.com/Fuyukai/ConfigMaster/blob/8018aa415da55c84edaa8a49664f674758a14edd/configmaster/ConfigGenerator.py#L18-L28
242,119
Meseira/subordinate
subordinate/idrangeset.py
IdRangeSet.append
def append(self, first, count): """ Add to the set a range of count consecutive ids starting at id first. """ self.__range.append(IdRange(first, count))
python
def append(self, first, count): """ Add to the set a range of count consecutive ids starting at id first. """ self.__range.append(IdRange(first, count))
[ "def", "append", "(", "self", ",", "first", ",", "count", ")", ":", "self", ".", "__range", ".", "append", "(", "IdRange", "(", "first", ",", "count", ")", ")" ]
Add to the set a range of count consecutive ids starting at id first.
[ "Add", "to", "the", "set", "a", "range", "of", "count", "consecutive", "ids", "starting", "at", "id", "first", "." ]
3438df304af3dccc5bd1515231402afa708f1cc3
https://github.com/Meseira/subordinate/blob/3438df304af3dccc5bd1515231402afa708f1cc3/subordinate/idrangeset.py#L90-L96
242,120
Meseira/subordinate
subordinate/idrangeset.py
IdRangeSet.remove
def remove(self, first, count): """ Remove a range of count consecutive ids starting at id first from all the ranges in the set. """ # Avoid trivialities if first < 0 or count < 1: return new_range = [] last = first + count - 1 for r in self.__range: if first <= r.last and r.first <= last: # There is an overlap if r.first < first: new_range.append(IdRange(r.first, first-r.first)) if last < r.last: new_range.append(IdRange(last+1, r.last-last)) else: # No overlap, range is kept new_range.append(r) self.__range = new_range
python
def remove(self, first, count): """ Remove a range of count consecutive ids starting at id first from all the ranges in the set. """ # Avoid trivialities if first < 0 or count < 1: return new_range = [] last = first + count - 1 for r in self.__range: if first <= r.last and r.first <= last: # There is an overlap if r.first < first: new_range.append(IdRange(r.first, first-r.first)) if last < r.last: new_range.append(IdRange(last+1, r.last-last)) else: # No overlap, range is kept new_range.append(r) self.__range = new_range
[ "def", "remove", "(", "self", ",", "first", ",", "count", ")", ":", "# Avoid trivialities", "if", "first", "<", "0", "or", "count", "<", "1", ":", "return", "new_range", "=", "[", "]", "last", "=", "first", "+", "count", "-", "1", "for", "r", "in", "self", ".", "__range", ":", "if", "first", "<=", "r", ".", "last", "and", "r", ".", "first", "<=", "last", ":", "# There is an overlap", "if", "r", ".", "first", "<", "first", ":", "new_range", ".", "append", "(", "IdRange", "(", "r", ".", "first", ",", "first", "-", "r", ".", "first", ")", ")", "if", "last", "<", "r", ".", "last", ":", "new_range", ".", "append", "(", "IdRange", "(", "last", "+", "1", ",", "r", ".", "last", "-", "last", ")", ")", "else", ":", "# No overlap, range is kept", "new_range", ".", "append", "(", "r", ")", "self", ".", "__range", "=", "new_range" ]
Remove a range of count consecutive ids starting at id first from all the ranges in the set.
[ "Remove", "a", "range", "of", "count", "consecutive", "ids", "starting", "at", "id", "first", "from", "all", "the", "ranges", "in", "the", "set", "." ]
3438df304af3dccc5bd1515231402afa708f1cc3
https://github.com/Meseira/subordinate/blob/3438df304af3dccc5bd1515231402afa708f1cc3/subordinate/idrangeset.py#L103-L126
242,121
Meseira/subordinate
subordinate/idrangeset.py
IdRangeSet.simplify
def simplify(self): """ Reorganize the ranges in the set in order to ensure that each range is unique and that there is not overlap between to ranges. """ # Sort the ranges self.__range.sort() new_range = [] new_first = self.__range[0].first new_count = self.__range[0].count for r in self.__range: if r.first == new_first: # Longest range starting at new_first new_count = r.count elif r.first <= new_first + new_count: # Overlapping ranges if new_first + new_count - 1 < r.last: # There is a part of the range to add to the new range new_count = r.last - new_first + 1 else: # No overlap, this is a new disjoint range new_range.append(IdRange(new_first, new_count)) new_first = r.first new_count = r.count # End of the last range new_range.append(IdRange(new_first, new_count)) self.__range = new_range
python
def simplify(self): """ Reorganize the ranges in the set in order to ensure that each range is unique and that there is not overlap between to ranges. """ # Sort the ranges self.__range.sort() new_range = [] new_first = self.__range[0].first new_count = self.__range[0].count for r in self.__range: if r.first == new_first: # Longest range starting at new_first new_count = r.count elif r.first <= new_first + new_count: # Overlapping ranges if new_first + new_count - 1 < r.last: # There is a part of the range to add to the new range new_count = r.last - new_first + 1 else: # No overlap, this is a new disjoint range new_range.append(IdRange(new_first, new_count)) new_first = r.first new_count = r.count # End of the last range new_range.append(IdRange(new_first, new_count)) self.__range = new_range
[ "def", "simplify", "(", "self", ")", ":", "# Sort the ranges", "self", ".", "__range", ".", "sort", "(", ")", "new_range", "=", "[", "]", "new_first", "=", "self", ".", "__range", "[", "0", "]", ".", "first", "new_count", "=", "self", ".", "__range", "[", "0", "]", ".", "count", "for", "r", "in", "self", ".", "__range", ":", "if", "r", ".", "first", "==", "new_first", ":", "# Longest range starting at new_first", "new_count", "=", "r", ".", "count", "elif", "r", ".", "first", "<=", "new_first", "+", "new_count", ":", "# Overlapping ranges", "if", "new_first", "+", "new_count", "-", "1", "<", "r", ".", "last", ":", "# There is a part of the range to add to the new range", "new_count", "=", "r", ".", "last", "-", "new_first", "+", "1", "else", ":", "# No overlap, this is a new disjoint range", "new_range", ".", "append", "(", "IdRange", "(", "new_first", ",", "new_count", ")", ")", "new_first", "=", "r", ".", "first", "new_count", "=", "r", ".", "count", "# End of the last range", "new_range", ".", "append", "(", "IdRange", "(", "new_first", ",", "new_count", ")", ")", "self", ".", "__range", "=", "new_range" ]
Reorganize the ranges in the set in order to ensure that each range is unique and that there is not overlap between to ranges.
[ "Reorganize", "the", "ranges", "in", "the", "set", "in", "order", "to", "ensure", "that", "each", "range", "is", "unique", "and", "that", "there", "is", "not", "overlap", "between", "to", "ranges", "." ]
3438df304af3dccc5bd1515231402afa708f1cc3
https://github.com/Meseira/subordinate/blob/3438df304af3dccc5bd1515231402afa708f1cc3/subordinate/idrangeset.py#L128-L159
242,122
fixmydjango/fixmydjango-lib
fixmydjango/__init__.py
ExceptionReporterPatch._get_fix_my_django_submission_url
def _get_fix_my_django_submission_url(self, tb_info, sanitized_tb): """ Links to the error submission url with pre filled fields """ err_post_create_path = '/create/' url = '{0}{1}'.format(base_url, err_post_create_path) return '{url}?{query}'.format( url=url, query=urlencode({ 'exception_type': clean_exception_type(tb_info['parsed_traceback']['exc_type']), 'error_message': tb_info['parsed_traceback']['exc_msg'], 'django_version': '{0[0]}.{0[1]}'.format(django.VERSION), 'traceback': sanitized_tb }))
python
def _get_fix_my_django_submission_url(self, tb_info, sanitized_tb): """ Links to the error submission url with pre filled fields """ err_post_create_path = '/create/' url = '{0}{1}'.format(base_url, err_post_create_path) return '{url}?{query}'.format( url=url, query=urlencode({ 'exception_type': clean_exception_type(tb_info['parsed_traceback']['exc_type']), 'error_message': tb_info['parsed_traceback']['exc_msg'], 'django_version': '{0[0]}.{0[1]}'.format(django.VERSION), 'traceback': sanitized_tb }))
[ "def", "_get_fix_my_django_submission_url", "(", "self", ",", "tb_info", ",", "sanitized_tb", ")", ":", "err_post_create_path", "=", "'/create/'", "url", "=", "'{0}{1}'", ".", "format", "(", "base_url", ",", "err_post_create_path", ")", "return", "'{url}?{query}'", ".", "format", "(", "url", "=", "url", ",", "query", "=", "urlencode", "(", "{", "'exception_type'", ":", "clean_exception_type", "(", "tb_info", "[", "'parsed_traceback'", "]", "[", "'exc_type'", "]", ")", ",", "'error_message'", ":", "tb_info", "[", "'parsed_traceback'", "]", "[", "'exc_msg'", "]", ",", "'django_version'", ":", "'{0[0]}.{0[1]}'", ".", "format", "(", "django", ".", "VERSION", ")", ",", "'traceback'", ":", "sanitized_tb", "}", ")", ")" ]
Links to the error submission url with pre filled fields
[ "Links", "to", "the", "error", "submission", "url", "with", "pre", "filled", "fields" ]
5402e9cb15d85daa68bb5f2418ff9c4ea966d306
https://github.com/fixmydjango/fixmydjango-lib/blob/5402e9cb15d85daa68bb5f2418ff9c4ea966d306/fixmydjango/__init__.py#L62-L75
242,123
iamFIREcracker/aadbook
aadbook/config.py
_get_config
def _get_config(config_file): '''find, read and parse configuraton.''' parser = ConfigParser.SafeConfigParser() if os.path.lexists(config_file): try: log.info('Reading config: %s', config_file) inp = open(config_file) parser.readfp(inp) return parser except (IOError, ConfigParser.ParsingError), err: raise ConfigError("Failed to read configuration %s\n%s" % (config_file, err)) return None
python
def _get_config(config_file): '''find, read and parse configuraton.''' parser = ConfigParser.SafeConfigParser() if os.path.lexists(config_file): try: log.info('Reading config: %s', config_file) inp = open(config_file) parser.readfp(inp) return parser except (IOError, ConfigParser.ParsingError), err: raise ConfigError("Failed to read configuration %s\n%s" % (config_file, err)) return None
[ "def", "_get_config", "(", "config_file", ")", ":", "parser", "=", "ConfigParser", ".", "SafeConfigParser", "(", ")", "if", "os", ".", "path", ".", "lexists", "(", "config_file", ")", ":", "try", ":", "log", ".", "info", "(", "'Reading config: %s'", ",", "config_file", ")", "inp", "=", "open", "(", "config_file", ")", "parser", ".", "readfp", "(", "inp", ")", "return", "parser", "except", "(", "IOError", ",", "ConfigParser", ".", "ParsingError", ")", ",", "err", ":", "raise", "ConfigError", "(", "\"Failed to read configuration %s\\n%s\"", "%", "(", "config_file", ",", "err", ")", ")", "return", "None" ]
find, read and parse configuraton.
[ "find", "read", "and", "parse", "configuraton", "." ]
d191e9d36a2309449ab91c1728eaf5901b7ef91c
https://github.com/iamFIREcracker/aadbook/blob/d191e9d36a2309449ab91c1728eaf5901b7ef91c/aadbook/config.py#L65-L76
242,124
bretth/djset
djset/utils.py
getbool
def getbool(key, default=False): """ Returns True or False for any TRUE, FALSE, 0, or 1. Other values return default. """ value = os.getenv(key) if value and value.lower() in ('true', '1'): value = True elif value and value.lower() in ('false', '0'): value = False else: value = default return value
python
def getbool(key, default=False): """ Returns True or False for any TRUE, FALSE, 0, or 1. Other values return default. """ value = os.getenv(key) if value and value.lower() in ('true', '1'): value = True elif value and value.lower() in ('false', '0'): value = False else: value = default return value
[ "def", "getbool", "(", "key", ",", "default", "=", "False", ")", ":", "value", "=", "os", ".", "getenv", "(", "key", ")", "if", "value", "and", "value", ".", "lower", "(", ")", "in", "(", "'true'", ",", "'1'", ")", ":", "value", "=", "True", "elif", "value", "and", "value", ".", "lower", "(", ")", "in", "(", "'false'", ",", "'0'", ")", ":", "value", "=", "False", "else", ":", "value", "=", "default", "return", "value" ]
Returns True or False for any TRUE, FALSE, 0, or 1. Other values return default.
[ "Returns", "True", "or", "False", "for", "any", "TRUE", "FALSE", "0", "or", "1", ".", "Other", "values", "return", "default", "." ]
e04cbcadc311f6edec50a718415d0004aa304034
https://github.com/bretth/djset/blob/e04cbcadc311f6edec50a718415d0004aa304034/djset/utils.py#L4-L16
242,125
bretth/djset
djset/utils.py
_locate_settings
def _locate_settings(settings=''): "Return the path to the DJANGO_SETTINGS_MODULE" import imp import sys sys.path.append(os.getcwd()) settings = settings or os.getenv('DJANGO_SETTINGS_MODULE') if settings: parts = settings.split('.') f = imp.find_module(parts[0])[1] args = [f] + parts[1:] path = os.path.join(*args) path = path + '.py' if os.path.exists(path): return path
python
def _locate_settings(settings=''): "Return the path to the DJANGO_SETTINGS_MODULE" import imp import sys sys.path.append(os.getcwd()) settings = settings or os.getenv('DJANGO_SETTINGS_MODULE') if settings: parts = settings.split('.') f = imp.find_module(parts[0])[1] args = [f] + parts[1:] path = os.path.join(*args) path = path + '.py' if os.path.exists(path): return path
[ "def", "_locate_settings", "(", "settings", "=", "''", ")", ":", "import", "imp", "import", "sys", "sys", ".", "path", ".", "append", "(", "os", ".", "getcwd", "(", ")", ")", "settings", "=", "settings", "or", "os", ".", "getenv", "(", "'DJANGO_SETTINGS_MODULE'", ")", "if", "settings", ":", "parts", "=", "settings", ".", "split", "(", "'.'", ")", "f", "=", "imp", ".", "find_module", "(", "parts", "[", "0", "]", ")", "[", "1", "]", "args", "=", "[", "f", "]", "+", "parts", "[", "1", ":", "]", "path", "=", "os", ".", "path", ".", "join", "(", "*", "args", ")", "path", "=", "path", "+", "'.py'", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "return", "path" ]
Return the path to the DJANGO_SETTINGS_MODULE
[ "Return", "the", "path", "to", "the", "DJANGO_SETTINGS_MODULE" ]
e04cbcadc311f6edec50a718415d0004aa304034
https://github.com/bretth/djset/blob/e04cbcadc311f6edec50a718415d0004aa304034/djset/utils.py#L19-L33
242,126
saltzm/yadi
yadi/datalog2sql/ast2sql/sql_generator.py
ConjunctiveQuerySQLGenerator.get_from_relations
def get_from_relations(self, query,aliases): ''' Returns list of the names of all positive relations in the query ''' return [aliases[rel.get_name()] for rel in query.get_relations() if not rel.is_negated()]
python
def get_from_relations(self, query,aliases): ''' Returns list of the names of all positive relations in the query ''' return [aliases[rel.get_name()] for rel in query.get_relations() if not rel.is_negated()]
[ "def", "get_from_relations", "(", "self", ",", "query", ",", "aliases", ")", ":", "return", "[", "aliases", "[", "rel", ".", "get_name", "(", ")", "]", "for", "rel", "in", "query", ".", "get_relations", "(", ")", "if", "not", "rel", ".", "is_negated", "(", ")", "]" ]
Returns list of the names of all positive relations in the query
[ "Returns", "list", "of", "the", "names", "of", "all", "positive", "relations", "in", "the", "query" ]
755790167c350e650c1e8b15c6f9209a97be9e42
https://github.com/saltzm/yadi/blob/755790167c350e650c1e8b15c6f9209a97be9e42/yadi/datalog2sql/ast2sql/sql_generator.py#L181-L185
242,127
gear11/pypelogs
pypein/flickr.py
Flickr.photo
def photo(self, args): """ Retrieves metadata for a specific photo. flickr:(credsfile),photo,(photo_id) """ rsp = self._load_rsp(self.flickr.photos_getInfo(photo_id=args[0])) p = rsp['photo'] yield self._prep(p)
python
def photo(self, args): """ Retrieves metadata for a specific photo. flickr:(credsfile),photo,(photo_id) """ rsp = self._load_rsp(self.flickr.photos_getInfo(photo_id=args[0])) p = rsp['photo'] yield self._prep(p)
[ "def", "photo", "(", "self", ",", "args", ")", ":", "rsp", "=", "self", ".", "_load_rsp", "(", "self", ".", "flickr", ".", "photos_getInfo", "(", "photo_id", "=", "args", "[", "0", "]", ")", ")", "p", "=", "rsp", "[", "'photo'", "]", "yield", "self", ".", "_prep", "(", "p", ")" ]
Retrieves metadata for a specific photo. flickr:(credsfile),photo,(photo_id)
[ "Retrieves", "metadata", "for", "a", "specific", "photo", "." ]
da5dc0fee5373a4be294798b5e32cd0a803d8bbe
https://github.com/gear11/pypelogs/blob/da5dc0fee5373a4be294798b5e32cd0a803d8bbe/pypein/flickr.py#L58-L66
242,128
gear11/pypelogs
pypein/flickr.py
Flickr.interesting
def interesting(self, args=None): """ Gets interesting photos. flickr:(credsfile),interesting """ kwargs = {'extras': ','.join(args) if args else 'last_update,geo,owner_name,url_sq'} return self._paged_api_call(self.flickr.interestingness_getList, kwargs)
python
def interesting(self, args=None): """ Gets interesting photos. flickr:(credsfile),interesting """ kwargs = {'extras': ','.join(args) if args else 'last_update,geo,owner_name,url_sq'} return self._paged_api_call(self.flickr.interestingness_getList, kwargs)
[ "def", "interesting", "(", "self", ",", "args", "=", "None", ")", ":", "kwargs", "=", "{", "'extras'", ":", "','", ".", "join", "(", "args", ")", "if", "args", "else", "'last_update,geo,owner_name,url_sq'", "}", "return", "self", ".", "_paged_api_call", "(", "self", ".", "flickr", ".", "interestingness_getList", ",", "kwargs", ")" ]
Gets interesting photos. flickr:(credsfile),interesting
[ "Gets", "interesting", "photos", "." ]
da5dc0fee5373a4be294798b5e32cd0a803d8bbe
https://github.com/gear11/pypelogs/blob/da5dc0fee5373a4be294798b5e32cd0a803d8bbe/pypein/flickr.py#L80-L88
242,129
gear11/pypelogs
pypein/flickr.py
Flickr._paged_api_call
def _paged_api_call(self, func, kwargs, item_type='photo'): """ Takes a Flickr API function object and dict of keyword args and calls the API call repeatedly with an incrementing page value until all contents are exhausted. Flickr seems to limit to about 500 items. """ page = 1 while True: LOG.info("Fetching page %s" % page) kwargs['page'] = page rsp = self._load_rsp(func(**kwargs)) if rsp["stat"] == "ok": plural = item_type + 's' if plural in rsp: items = rsp[plural] if int(items["page"]) < page: LOG.info("End of Flickr pages (%s pages with %s per page)" % (items["pages"], items["perpage"])) break for i in items[item_type]: yield self._prep(i) else: yield rsp page += 1 else: yield [rsp] break
python
def _paged_api_call(self, func, kwargs, item_type='photo'): """ Takes a Flickr API function object and dict of keyword args and calls the API call repeatedly with an incrementing page value until all contents are exhausted. Flickr seems to limit to about 500 items. """ page = 1 while True: LOG.info("Fetching page %s" % page) kwargs['page'] = page rsp = self._load_rsp(func(**kwargs)) if rsp["stat"] == "ok": plural = item_type + 's' if plural in rsp: items = rsp[plural] if int(items["page"]) < page: LOG.info("End of Flickr pages (%s pages with %s per page)" % (items["pages"], items["perpage"])) break for i in items[item_type]: yield self._prep(i) else: yield rsp page += 1 else: yield [rsp] break
[ "def", "_paged_api_call", "(", "self", ",", "func", ",", "kwargs", ",", "item_type", "=", "'photo'", ")", ":", "page", "=", "1", "while", "True", ":", "LOG", ".", "info", "(", "\"Fetching page %s\"", "%", "page", ")", "kwargs", "[", "'page'", "]", "=", "page", "rsp", "=", "self", ".", "_load_rsp", "(", "func", "(", "*", "*", "kwargs", ")", ")", "if", "rsp", "[", "\"stat\"", "]", "==", "\"ok\"", ":", "plural", "=", "item_type", "+", "'s'", "if", "plural", "in", "rsp", ":", "items", "=", "rsp", "[", "plural", "]", "if", "int", "(", "items", "[", "\"page\"", "]", ")", "<", "page", ":", "LOG", ".", "info", "(", "\"End of Flickr pages (%s pages with %s per page)\"", "%", "(", "items", "[", "\"pages\"", "]", ",", "items", "[", "\"perpage\"", "]", ")", ")", "break", "for", "i", "in", "items", "[", "item_type", "]", ":", "yield", "self", ".", "_prep", "(", "i", ")", "else", ":", "yield", "rsp", "page", "+=", "1", "else", ":", "yield", "[", "rsp", "]", "break" ]
Takes a Flickr API function object and dict of keyword args and calls the API call repeatedly with an incrementing page value until all contents are exhausted. Flickr seems to limit to about 500 items.
[ "Takes", "a", "Flickr", "API", "function", "object", "and", "dict", "of", "keyword", "args", "and", "calls", "the", "API", "call", "repeatedly", "with", "an", "incrementing", "page", "value", "until", "all", "contents", "are", "exhausted", ".", "Flickr", "seems", "to", "limit", "to", "about", "500", "items", "." ]
da5dc0fee5373a4be294798b5e32cd0a803d8bbe
https://github.com/gear11/pypelogs/blob/da5dc0fee5373a4be294798b5e32cd0a803d8bbe/pypein/flickr.py#L108-L133
242,130
gear11/pypelogs
pypein/flickr.py
Flickr._prep
def _prep(e): """ Normalizes lastupdate to a timestamp, and constructs a URL from the embedded attributes. """ if 'lastupdate' in e: e['lastupdate'] = datetime.datetime.fromtimestamp(int(e['lastupdate'])) for k in ['farm', 'server', 'id', 'secret']: if not k in e: return e e["url"] = "https://farm%s.staticflickr.com/%s/%s_%s_b.jpg" % (e["farm"], e["server"], e["id"], e["secret"]) return e
python
def _prep(e): """ Normalizes lastupdate to a timestamp, and constructs a URL from the embedded attributes. """ if 'lastupdate' in e: e['lastupdate'] = datetime.datetime.fromtimestamp(int(e['lastupdate'])) for k in ['farm', 'server', 'id', 'secret']: if not k in e: return e e["url"] = "https://farm%s.staticflickr.com/%s/%s_%s_b.jpg" % (e["farm"], e["server"], e["id"], e["secret"]) return e
[ "def", "_prep", "(", "e", ")", ":", "if", "'lastupdate'", "in", "e", ":", "e", "[", "'lastupdate'", "]", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "int", "(", "e", "[", "'lastupdate'", "]", ")", ")", "for", "k", "in", "[", "'farm'", ",", "'server'", ",", "'id'", ",", "'secret'", "]", ":", "if", "not", "k", "in", "e", ":", "return", "e", "e", "[", "\"url\"", "]", "=", "\"https://farm%s.staticflickr.com/%s/%s_%s_b.jpg\"", "%", "(", "e", "[", "\"farm\"", "]", ",", "e", "[", "\"server\"", "]", ",", "e", "[", "\"id\"", "]", ",", "e", "[", "\"secret\"", "]", ")", "return", "e" ]
Normalizes lastupdate to a timestamp, and constructs a URL from the embedded attributes.
[ "Normalizes", "lastupdate", "to", "a", "timestamp", "and", "constructs", "a", "URL", "from", "the", "embedded", "attributes", "." ]
da5dc0fee5373a4be294798b5e32cd0a803d8bbe
https://github.com/gear11/pypelogs/blob/da5dc0fee5373a4be294798b5e32cd0a803d8bbe/pypein/flickr.py#L136-L146
242,131
gear11/pypelogs
pypein/flickr.py
Flickr._load_rsp
def _load_rsp(rsp): """ Converts raw Flickr string response to Python dict """ first = rsp.find('(') + 1 last = rsp.rfind(')') return json.loads(rsp[first:last])
python
def _load_rsp(rsp): """ Converts raw Flickr string response to Python dict """ first = rsp.find('(') + 1 last = rsp.rfind(')') return json.loads(rsp[first:last])
[ "def", "_load_rsp", "(", "rsp", ")", ":", "first", "=", "rsp", ".", "find", "(", "'('", ")", "+", "1", "last", "=", "rsp", ".", "rfind", "(", "')'", ")", "return", "json", ".", "loads", "(", "rsp", "[", "first", ":", "last", "]", ")" ]
Converts raw Flickr string response to Python dict
[ "Converts", "raw", "Flickr", "string", "response", "to", "Python", "dict" ]
da5dc0fee5373a4be294798b5e32cd0a803d8bbe
https://github.com/gear11/pypelogs/blob/da5dc0fee5373a4be294798b5e32cd0a803d8bbe/pypein/flickr.py#L149-L155
242,132
BlackEarth/bf
bf/styles.py
Styles.styleProperties
def styleProperties(Class, style): """return a properties dict from a given cssutils style """ properties = Dict() for property in style.getProperties(all=True): stylename = property.name + ':' properties[stylename] = property.value if property.priority != '': properties[stylename] = ' !'+property.priority return properties
python
def styleProperties(Class, style): """return a properties dict from a given cssutils style """ properties = Dict() for property in style.getProperties(all=True): stylename = property.name + ':' properties[stylename] = property.value if property.priority != '': properties[stylename] = ' !'+property.priority return properties
[ "def", "styleProperties", "(", "Class", ",", "style", ")", ":", "properties", "=", "Dict", "(", ")", "for", "property", "in", "style", ".", "getProperties", "(", "all", "=", "True", ")", ":", "stylename", "=", "property", ".", "name", "+", "':'", "properties", "[", "stylename", "]", "=", "property", ".", "value", "if", "property", ".", "priority", "!=", "''", ":", "properties", "[", "stylename", "]", "=", "' !'", "+", "property", ".", "priority", "return", "properties" ]
return a properties dict from a given cssutils style
[ "return", "a", "properties", "dict", "from", "a", "given", "cssutils", "style" ]
376041168874bbd6dee5ccfeece4a9e553223316
https://github.com/BlackEarth/bf/blob/376041168874bbd6dee5ccfeece4a9e553223316/bf/styles.py#L14-L23
242,133
BlackEarth/bf
bf/styles.py
Styles.from_css
def from_css(Class, csstext, encoding=None, href=None, media=None, title=None, validate=None): """parse CSS text into a Styles object, using cssutils """ styles = Class() cssStyleSheet = cssutils.parseString(csstext, encoding=encoding, href=href, media=media, title=title, validate=validate) for rule in cssStyleSheet.cssRules: if rule.type==cssutils.css.CSSRule.FONT_FACE_RULE: if styles.get('@font-face') is None: styles['@font-face'] = [] styles['@font-face'].append(Class.styleProperties(rule.style)) elif rule.type==cssutils.css.CSSRule.IMPORT_RULE: if styles.get('@import') is None: styles['@import'] = [] styles['@import'].append("url(%s)" % rule.href) elif rule.type==cssutils.css.CSSRule.NAMESPACE_RULE: if styles.get('@namespace') is None: styles['@namespace'] = {} styles['@namespace'][rule.prefix] = rule.namespaceURI elif rule.type==cssutils.css.CSSRule.MEDIA_RULE: if styles.get('@media') is None: styles['@media'] = [] styles['@media'].append(rule.cssText) elif rule.type==cssutils.css.CSSRule.PAGE_RULE: if styles.get('@page') is None: styles['@page'] = [] styles['@page'].append(rule.cssText) elif rule.type==cssutils.css.CSSRule.STYLE_RULE: for selector in rule.selectorList: sel = selector.selectorText if sel not in styles: styles[sel] = Class.styleProperties(rule.style) elif rule.type==cssutils.css.CSSRule.CHARSET_RULE: styles['@charset'] = rule.encoding elif rule.type==cssutils.css.CSSRule.COMMENT: # comments are thrown away pass elif rule.type==cssutils.css.CSSRule.VARIABLES_RULE: pass else: log.warning("Unknown rule type: %r" % rule.cssText) return styles
python
def from_css(Class, csstext, encoding=None, href=None, media=None, title=None, validate=None): """parse CSS text into a Styles object, using cssutils """ styles = Class() cssStyleSheet = cssutils.parseString(csstext, encoding=encoding, href=href, media=media, title=title, validate=validate) for rule in cssStyleSheet.cssRules: if rule.type==cssutils.css.CSSRule.FONT_FACE_RULE: if styles.get('@font-face') is None: styles['@font-face'] = [] styles['@font-face'].append(Class.styleProperties(rule.style)) elif rule.type==cssutils.css.CSSRule.IMPORT_RULE: if styles.get('@import') is None: styles['@import'] = [] styles['@import'].append("url(%s)" % rule.href) elif rule.type==cssutils.css.CSSRule.NAMESPACE_RULE: if styles.get('@namespace') is None: styles['@namespace'] = {} styles['@namespace'][rule.prefix] = rule.namespaceURI elif rule.type==cssutils.css.CSSRule.MEDIA_RULE: if styles.get('@media') is None: styles['@media'] = [] styles['@media'].append(rule.cssText) elif rule.type==cssutils.css.CSSRule.PAGE_RULE: if styles.get('@page') is None: styles['@page'] = [] styles['@page'].append(rule.cssText) elif rule.type==cssutils.css.CSSRule.STYLE_RULE: for selector in rule.selectorList: sel = selector.selectorText if sel not in styles: styles[sel] = Class.styleProperties(rule.style) elif rule.type==cssutils.css.CSSRule.CHARSET_RULE: styles['@charset'] = rule.encoding elif rule.type==cssutils.css.CSSRule.COMMENT: # comments are thrown away pass elif rule.type==cssutils.css.CSSRule.VARIABLES_RULE: pass else: log.warning("Unknown rule type: %r" % rule.cssText) return styles
[ "def", "from_css", "(", "Class", ",", "csstext", ",", "encoding", "=", "None", ",", "href", "=", "None", ",", "media", "=", "None", ",", "title", "=", "None", ",", "validate", "=", "None", ")", ":", "styles", "=", "Class", "(", ")", "cssStyleSheet", "=", "cssutils", ".", "parseString", "(", "csstext", ",", "encoding", "=", "encoding", ",", "href", "=", "href", ",", "media", "=", "media", ",", "title", "=", "title", ",", "validate", "=", "validate", ")", "for", "rule", "in", "cssStyleSheet", ".", "cssRules", ":", "if", "rule", ".", "type", "==", "cssutils", ".", "css", ".", "CSSRule", ".", "FONT_FACE_RULE", ":", "if", "styles", ".", "get", "(", "'@font-face'", ")", "is", "None", ":", "styles", "[", "'@font-face'", "]", "=", "[", "]", "styles", "[", "'@font-face'", "]", ".", "append", "(", "Class", ".", "styleProperties", "(", "rule", ".", "style", ")", ")", "elif", "rule", ".", "type", "==", "cssutils", ".", "css", ".", "CSSRule", ".", "IMPORT_RULE", ":", "if", "styles", ".", "get", "(", "'@import'", ")", "is", "None", ":", "styles", "[", "'@import'", "]", "=", "[", "]", "styles", "[", "'@import'", "]", ".", "append", "(", "\"url(%s)\"", "%", "rule", ".", "href", ")", "elif", "rule", ".", "type", "==", "cssutils", ".", "css", ".", "CSSRule", ".", "NAMESPACE_RULE", ":", "if", "styles", ".", "get", "(", "'@namespace'", ")", "is", "None", ":", "styles", "[", "'@namespace'", "]", "=", "{", "}", "styles", "[", "'@namespace'", "]", "[", "rule", ".", "prefix", "]", "=", "rule", ".", "namespaceURI", "elif", "rule", ".", "type", "==", "cssutils", ".", "css", ".", "CSSRule", ".", "MEDIA_RULE", ":", "if", "styles", ".", "get", "(", "'@media'", ")", "is", "None", ":", "styles", "[", "'@media'", "]", "=", "[", "]", "styles", "[", "'@media'", "]", ".", "append", "(", "rule", ".", "cssText", ")", "elif", "rule", ".", "type", "==", "cssutils", ".", "css", ".", "CSSRule", ".", "PAGE_RULE", ":", "if", "styles", ".", "get", "(", "'@page'", ")", "is", "None", ":", "styles", "[", "'@page'", "]", "=", "[", "]", "styles", "[", "'@page'", "]", ".", "append", "(", "rule", ".", "cssText", ")", "elif", "rule", ".", "type", "==", "cssutils", ".", "css", ".", "CSSRule", ".", "STYLE_RULE", ":", "for", "selector", "in", "rule", ".", "selectorList", ":", "sel", "=", "selector", ".", "selectorText", "if", "sel", "not", "in", "styles", ":", "styles", "[", "sel", "]", "=", "Class", ".", "styleProperties", "(", "rule", ".", "style", ")", "elif", "rule", ".", "type", "==", "cssutils", ".", "css", ".", "CSSRule", ".", "CHARSET_RULE", ":", "styles", "[", "'@charset'", "]", "=", "rule", ".", "encoding", "elif", "rule", ".", "type", "==", "cssutils", ".", "css", ".", "CSSRule", ".", "COMMENT", ":", "# comments are thrown away", "pass", "elif", "rule", ".", "type", "==", "cssutils", ".", "css", ".", "CSSRule", ".", "VARIABLES_RULE", ":", "pass", "else", ":", "log", ".", "warning", "(", "\"Unknown rule type: %r\"", "%", "rule", ".", "cssText", ")", "return", "styles" ]
parse CSS text into a Styles object, using cssutils
[ "parse", "CSS", "text", "into", "a", "Styles", "object", "using", "cssutils" ]
376041168874bbd6dee5ccfeece4a9e553223316
https://github.com/BlackEarth/bf/blob/376041168874bbd6dee5ccfeece4a9e553223316/bf/styles.py#L26-L70
242,134
aliafshar/oa2
oa2.py
get_token
def get_token(code, token_service, client_id, client_secret, redirect_uri, grant_type): """Fetches an OAuth 2 token.""" data = { 'code': code, 'client_id': client_id, 'client_secret': client_secret, 'redirect_uri': redirect_uri, 'grant_type': grant_type, } # Get the default http client resp = requests.post(token_service, data, verify=False) return resp.json()
python
def get_token(code, token_service, client_id, client_secret, redirect_uri, grant_type): """Fetches an OAuth 2 token.""" data = { 'code': code, 'client_id': client_id, 'client_secret': client_secret, 'redirect_uri': redirect_uri, 'grant_type': grant_type, } # Get the default http client resp = requests.post(token_service, data, verify=False) return resp.json()
[ "def", "get_token", "(", "code", ",", "token_service", ",", "client_id", ",", "client_secret", ",", "redirect_uri", ",", "grant_type", ")", ":", "data", "=", "{", "'code'", ":", "code", ",", "'client_id'", ":", "client_id", ",", "'client_secret'", ":", "client_secret", ",", "'redirect_uri'", ":", "redirect_uri", ",", "'grant_type'", ":", "grant_type", ",", "}", "# Get the default http client", "resp", "=", "requests", ".", "post", "(", "token_service", ",", "data", ",", "verify", "=", "False", ")", "return", "resp", ".", "json", "(", ")" ]
Fetches an OAuth 2 token.
[ "Fetches", "an", "OAuth", "2", "token", "." ]
0df67aea6b393e5a463e90d79bf2c3168e5fcc48
https://github.com/aliafshar/oa2/blob/0df67aea6b393e5a463e90d79bf2c3168e5fcc48/oa2.py#L160-L172
242,135
aliafshar/oa2
oa2.py
get_auth_uri
def get_auth_uri(auth_service, client_id, scope, redirect_uri, response_type, state, access_type, approval_prompt): """Generates an authorization uri.""" errors = [] if response_type not in VALID_RESPONSE_TYPES: errors.append( '{0} is not a valid response_type, must be {1}.'.format( response_type, VALID_RESPONSE_TYPES)) if not client_id: errors.append('client_id is missing or empty.') if not redirect_uri: errors.append('redirect_uri is missing or empty.') if not scope: errors.append('scope is missing or empty.') if access_type not in VALID_ACCESS_TYPES: errors.append('access_type is invalid.') if approval_prompt not in VALID_APPROVAL_PROMPTS: errors.append('approval_prompt is invalid') if errors: raise ValueError('Invalid parameters: {0}'.format('\n'.join(errors))) params = { 'response_type': response_type, 'client_id': client_id, 'redirect_uri': redirect_uri, 'scope': scope, 'access_type': access_type, 'approval_prompt': approval_prompt, 'state': state, } return '?'.join([auth_service, urllib.urlencode(params)])
python
def get_auth_uri(auth_service, client_id, scope, redirect_uri, response_type, state, access_type, approval_prompt): """Generates an authorization uri.""" errors = [] if response_type not in VALID_RESPONSE_TYPES: errors.append( '{0} is not a valid response_type, must be {1}.'.format( response_type, VALID_RESPONSE_TYPES)) if not client_id: errors.append('client_id is missing or empty.') if not redirect_uri: errors.append('redirect_uri is missing or empty.') if not scope: errors.append('scope is missing or empty.') if access_type not in VALID_ACCESS_TYPES: errors.append('access_type is invalid.') if approval_prompt not in VALID_APPROVAL_PROMPTS: errors.append('approval_prompt is invalid') if errors: raise ValueError('Invalid parameters: {0}'.format('\n'.join(errors))) params = { 'response_type': response_type, 'client_id': client_id, 'redirect_uri': redirect_uri, 'scope': scope, 'access_type': access_type, 'approval_prompt': approval_prompt, 'state': state, } return '?'.join([auth_service, urllib.urlencode(params)])
[ "def", "get_auth_uri", "(", "auth_service", ",", "client_id", ",", "scope", ",", "redirect_uri", ",", "response_type", ",", "state", ",", "access_type", ",", "approval_prompt", ")", ":", "errors", "=", "[", "]", "if", "response_type", "not", "in", "VALID_RESPONSE_TYPES", ":", "errors", ".", "append", "(", "'{0} is not a valid response_type, must be {1}.'", ".", "format", "(", "response_type", ",", "VALID_RESPONSE_TYPES", ")", ")", "if", "not", "client_id", ":", "errors", ".", "append", "(", "'client_id is missing or empty.'", ")", "if", "not", "redirect_uri", ":", "errors", ".", "append", "(", "'redirect_uri is missing or empty.'", ")", "if", "not", "scope", ":", "errors", ".", "append", "(", "'scope is missing or empty.'", ")", "if", "access_type", "not", "in", "VALID_ACCESS_TYPES", ":", "errors", ".", "append", "(", "'access_type is invalid.'", ")", "if", "approval_prompt", "not", "in", "VALID_APPROVAL_PROMPTS", ":", "errors", ".", "append", "(", "'approval_prompt is invalid'", ")", "if", "errors", ":", "raise", "ValueError", "(", "'Invalid parameters: {0}'", ".", "format", "(", "'\\n'", ".", "join", "(", "errors", ")", ")", ")", "params", "=", "{", "'response_type'", ":", "response_type", ",", "'client_id'", ":", "client_id", ",", "'redirect_uri'", ":", "redirect_uri", ",", "'scope'", ":", "scope", ",", "'access_type'", ":", "access_type", ",", "'approval_prompt'", ":", "approval_prompt", ",", "'state'", ":", "state", ",", "}", "return", "'?'", ".", "join", "(", "[", "auth_service", ",", "urllib", ".", "urlencode", "(", "params", ")", "]", ")" ]
Generates an authorization uri.
[ "Generates", "an", "authorization", "uri", "." ]
0df67aea6b393e5a463e90d79bf2c3168e5fcc48
https://github.com/aliafshar/oa2/blob/0df67aea6b393e5a463e90d79bf2c3168e5fcc48/oa2.py#L174-L203
242,136
aliafshar/oa2
oa2.py
refresh_token
def refresh_token(token_service, refresh_token, client_id, client_secret): """Refreshes a token.""" data = { 'client_id': client_id, 'client_secret': client_secret, 'refresh_token': refresh_token, 'grant_type': 'refresh_token', } resp = requests.post(token_service, data) print resp, 'refreshing', resp.json() return resp.json()
python
def refresh_token(token_service, refresh_token, client_id, client_secret): """Refreshes a token.""" data = { 'client_id': client_id, 'client_secret': client_secret, 'refresh_token': refresh_token, 'grant_type': 'refresh_token', } resp = requests.post(token_service, data) print resp, 'refreshing', resp.json() return resp.json()
[ "def", "refresh_token", "(", "token_service", ",", "refresh_token", ",", "client_id", ",", "client_secret", ")", ":", "data", "=", "{", "'client_id'", ":", "client_id", ",", "'client_secret'", ":", "client_secret", ",", "'refresh_token'", ":", "refresh_token", ",", "'grant_type'", ":", "'refresh_token'", ",", "}", "resp", "=", "requests", ".", "post", "(", "token_service", ",", "data", ")", "print", "resp", ",", "'refreshing'", ",", "resp", ".", "json", "(", ")", "return", "resp", ".", "json", "(", ")" ]
Refreshes a token.
[ "Refreshes", "a", "token", "." ]
0df67aea6b393e5a463e90d79bf2c3168e5fcc48
https://github.com/aliafshar/oa2/blob/0df67aea6b393e5a463e90d79bf2c3168e5fcc48/oa2.py#L205-L215
242,137
aliafshar/oa2
oa2.py
run_local
def run_local(client): """Starts a local web server and wait for a redirect.""" webbrowser.open(client.get_auth_uri()) code = wait_for_redirect() return client.get_token(code)
python
def run_local(client): """Starts a local web server and wait for a redirect.""" webbrowser.open(client.get_auth_uri()) code = wait_for_redirect() return client.get_token(code)
[ "def", "run_local", "(", "client", ")", ":", "webbrowser", ".", "open", "(", "client", ".", "get_auth_uri", "(", ")", ")", "code", "=", "wait_for_redirect", "(", ")", "return", "client", ".", "get_token", "(", "code", ")" ]
Starts a local web server and wait for a redirect.
[ "Starts", "a", "local", "web", "server", "and", "wait", "for", "a", "redirect", "." ]
0df67aea6b393e5a463e90d79bf2c3168e5fcc48
https://github.com/aliafshar/oa2/blob/0df67aea6b393e5a463e90d79bf2c3168e5fcc48/oa2.py#L323-L327
242,138
aliafshar/oa2
oa2.py
main
def main(argv): """Entry point for command line script to perform OAuth 2.0.""" p = argparse.ArgumentParser() p.add_argument('-s', '--scope', nargs='+') p.add_argument('-o', '--oauth-service', default='google') p.add_argument('-i', '--client-id') p.add_argument('-x', '--client-secret') p.add_argument('-r', '--redirect-uri') p.add_argument('-f', '--client-secrets') args = p.parse_args(argv) client_args = (args.client_id, args.client_secret, args.client_id) if any(client_args) and not all(client_args): print('Must provide none of client-id, client-secret and redirect-uri;' ' or all of them.') p.print_usage() return 1 print args.scope if not args.scope: print('Scope must be provided.') p.print_usage() return 1 config = WizardClientConfig() config.scope = ' '.join(args.scope) print(run_local(UserOAuth2(config))['access_token']) return 0
python
def main(argv): """Entry point for command line script to perform OAuth 2.0.""" p = argparse.ArgumentParser() p.add_argument('-s', '--scope', nargs='+') p.add_argument('-o', '--oauth-service', default='google') p.add_argument('-i', '--client-id') p.add_argument('-x', '--client-secret') p.add_argument('-r', '--redirect-uri') p.add_argument('-f', '--client-secrets') args = p.parse_args(argv) client_args = (args.client_id, args.client_secret, args.client_id) if any(client_args) and not all(client_args): print('Must provide none of client-id, client-secret and redirect-uri;' ' or all of them.') p.print_usage() return 1 print args.scope if not args.scope: print('Scope must be provided.') p.print_usage() return 1 config = WizardClientConfig() config.scope = ' '.join(args.scope) print(run_local(UserOAuth2(config))['access_token']) return 0
[ "def", "main", "(", "argv", ")", ":", "p", "=", "argparse", ".", "ArgumentParser", "(", ")", "p", ".", "add_argument", "(", "'-s'", ",", "'--scope'", ",", "nargs", "=", "'+'", ")", "p", ".", "add_argument", "(", "'-o'", ",", "'--oauth-service'", ",", "default", "=", "'google'", ")", "p", ".", "add_argument", "(", "'-i'", ",", "'--client-id'", ")", "p", ".", "add_argument", "(", "'-x'", ",", "'--client-secret'", ")", "p", ".", "add_argument", "(", "'-r'", ",", "'--redirect-uri'", ")", "p", ".", "add_argument", "(", "'-f'", ",", "'--client-secrets'", ")", "args", "=", "p", ".", "parse_args", "(", "argv", ")", "client_args", "=", "(", "args", ".", "client_id", ",", "args", ".", "client_secret", ",", "args", ".", "client_id", ")", "if", "any", "(", "client_args", ")", "and", "not", "all", "(", "client_args", ")", ":", "print", "(", "'Must provide none of client-id, client-secret and redirect-uri;'", "' or all of them.'", ")", "p", ".", "print_usage", "(", ")", "return", "1", "print", "args", ".", "scope", "if", "not", "args", ".", "scope", ":", "print", "(", "'Scope must be provided.'", ")", "p", ".", "print_usage", "(", ")", "return", "1", "config", "=", "WizardClientConfig", "(", ")", "config", ".", "scope", "=", "' '", ".", "join", "(", "args", ".", "scope", ")", "print", "(", "run_local", "(", "UserOAuth2", "(", "config", ")", ")", "[", "'access_token'", "]", ")", "return", "0" ]
Entry point for command line script to perform OAuth 2.0.
[ "Entry", "point", "for", "command", "line", "script", "to", "perform", "OAuth", "2", ".", "0", "." ]
0df67aea6b393e5a463e90d79bf2c3168e5fcc48
https://github.com/aliafshar/oa2/blob/0df67aea6b393e5a463e90d79bf2c3168e5fcc48/oa2.py#L382-L406
242,139
BlueHack-Core/blueforge
blueforge/util/file.py
check_and_create_directories
def check_and_create_directories(paths): """ Check and create directories. If the directory is exist, It will remove it and create new folder. :type paths: Array of string or string :param paths: the location of directory """ for path in paths: if os.path.exists(path): shutil.rmtree(path) os.mkdir(path)
python
def check_and_create_directories(paths): """ Check and create directories. If the directory is exist, It will remove it and create new folder. :type paths: Array of string or string :param paths: the location of directory """ for path in paths: if os.path.exists(path): shutil.rmtree(path) os.mkdir(path)
[ "def", "check_and_create_directories", "(", "paths", ")", ":", "for", "path", "in", "paths", ":", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "shutil", ".", "rmtree", "(", "path", ")", "os", ".", "mkdir", "(", "path", ")" ]
Check and create directories. If the directory is exist, It will remove it and create new folder. :type paths: Array of string or string :param paths: the location of directory
[ "Check", "and", "create", "directories", "." ]
ac40a888ee9c388638a8f312c51f7500b8891b6c
https://github.com/BlueHack-Core/blueforge/blob/ac40a888ee9c388638a8f312c51f7500b8891b6c/blueforge/util/file.py#L12-L24
242,140
BlueHack-Core/blueforge
blueforge/util/file.py
delete_directories
def delete_directories(paths): """ Delete directories. If the directory is exist, It will delete it including files. :type paths: Array of string or string :param paths: the location of directory """ for path in paths: if os.path.exists(path): shutil.rmtree(path)
python
def delete_directories(paths): """ Delete directories. If the directory is exist, It will delete it including files. :type paths: Array of string or string :param paths: the location of directory """ for path in paths: if os.path.exists(path): shutil.rmtree(path)
[ "def", "delete_directories", "(", "paths", ")", ":", "for", "path", "in", "paths", ":", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "shutil", ".", "rmtree", "(", "path", ")" ]
Delete directories. If the directory is exist, It will delete it including files. :type paths: Array of string or string :param paths: the location of directory
[ "Delete", "directories", "." ]
ac40a888ee9c388638a8f312c51f7500b8891b6c
https://github.com/BlueHack-Core/blueforge/blob/ac40a888ee9c388638a8f312c51f7500b8891b6c/blueforge/util/file.py#L27-L38
242,141
BlueHack-Core/blueforge
blueforge/util/file.py
delete_files
def delete_files(paths): """ Delete files. If the file is exist, It will delete it. :type paths: Array of string or string :param paths: the location of file """ for path in paths: if os.path.exists(path): os.remove(path)
python
def delete_files(paths): """ Delete files. If the file is exist, It will delete it. :type paths: Array of string or string :param paths: the location of file """ for path in paths: if os.path.exists(path): os.remove(path)
[ "def", "delete_files", "(", "paths", ")", ":", "for", "path", "in", "paths", ":", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "os", ".", "remove", "(", "path", ")" ]
Delete files. If the file is exist, It will delete it. :type paths: Array of string or string :param paths: the location of file
[ "Delete", "files", "." ]
ac40a888ee9c388638a8f312c51f7500b8891b6c
https://github.com/BlueHack-Core/blueforge/blob/ac40a888ee9c388638a8f312c51f7500b8891b6c/blueforge/util/file.py#L41-L52
242,142
firstprayer/monsql
monsql/wrapper_postgresql.py
PostgreSQLDatabase.truncate_table
def truncate_table(self, tablename): """ Use 'TRUNCATE TABLE' to truncate the given table """ self.cursor.execute('TRUNCATE TABLE %s' %tablename) self.db.commit()
python
def truncate_table(self, tablename): """ Use 'TRUNCATE TABLE' to truncate the given table """ self.cursor.execute('TRUNCATE TABLE %s' %tablename) self.db.commit()
[ "def", "truncate_table", "(", "self", ",", "tablename", ")", ":", "self", ".", "cursor", ".", "execute", "(", "'TRUNCATE TABLE %s'", "%", "tablename", ")", "self", ".", "db", ".", "commit", "(", ")" ]
Use 'TRUNCATE TABLE' to truncate the given table
[ "Use", "TRUNCATE", "TABLE", "to", "truncate", "the", "given", "table" ]
6285c15b574c8664046eae2edfeb548c7b173efd
https://github.com/firstprayer/monsql/blob/6285c15b574c8664046eae2edfeb548c7b173efd/monsql/wrapper_postgresql.py#L52-L57
242,143
firstprayer/monsql
monsql/wrapper_postgresql.py
PostgreSQLDatabase.create_schema
def create_schema(self, schema_name): """ Create schema. This method only implemented for this class """ try: self.cursor.execute('CREATE SCHEMA %s' % schema_name) except Exception as e: raise e finally: self.db.commit()
python
def create_schema(self, schema_name): """ Create schema. This method only implemented for this class """ try: self.cursor.execute('CREATE SCHEMA %s' % schema_name) except Exception as e: raise e finally: self.db.commit()
[ "def", "create_schema", "(", "self", ",", "schema_name", ")", ":", "try", ":", "self", ".", "cursor", ".", "execute", "(", "'CREATE SCHEMA %s'", "%", "schema_name", ")", "except", "Exception", "as", "e", ":", "raise", "e", "finally", ":", "self", ".", "db", ".", "commit", "(", ")" ]
Create schema. This method only implemented for this class
[ "Create", "schema", ".", "This", "method", "only", "implemented", "for", "this", "class" ]
6285c15b574c8664046eae2edfeb548c7b173efd
https://github.com/firstprayer/monsql/blob/6285c15b574c8664046eae2edfeb548c7b173efd/monsql/wrapper_postgresql.py#L59-L68
242,144
hobson/pug-dj
pug/dj/sqlserver.py
datatype
def datatype(dbtype, description, cursor): """Google AppEngine Helper to convert a data type into a string.""" dt = cursor.db.introspection.get_field_type(dbtype, description) if type(dt) is tuple: return dt[0] else: return dt
python
def datatype(dbtype, description, cursor): """Google AppEngine Helper to convert a data type into a string.""" dt = cursor.db.introspection.get_field_type(dbtype, description) if type(dt) is tuple: return dt[0] else: return dt
[ "def", "datatype", "(", "dbtype", ",", "description", ",", "cursor", ")", ":", "dt", "=", "cursor", ".", "db", ".", "introspection", ".", "get_field_type", "(", "dbtype", ",", "description", ")", "if", "type", "(", "dt", ")", "is", "tuple", ":", "return", "dt", "[", "0", "]", "else", ":", "return", "dt" ]
Google AppEngine Helper to convert a data type into a string.
[ "Google", "AppEngine", "Helper", "to", "convert", "a", "data", "type", "into", "a", "string", "." ]
55678b08755a55366ce18e7d3b8ea8fa4491ab04
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/sqlserver.py#L90-L96
242,145
flytrap/flytrap-base
flytrap/base/backends.py
MongoDjangoFilterBackend.get_filter_class
def get_filter_class(self, view, queryset=None): """ Return the django-filters `FilterSet` used to filter the queryset. """ filter_class = getattr(view, 'filter_class', None) if filter_class: filter_model = filter_class.Meta.model assert issubclass(queryset._document, filter_model), \ 'FilterSet model %s does not match queryset model %s' % \ (filter_model, queryset.model) return filter_class return
python
def get_filter_class(self, view, queryset=None): """ Return the django-filters `FilterSet` used to filter the queryset. """ filter_class = getattr(view, 'filter_class', None) if filter_class: filter_model = filter_class.Meta.model assert issubclass(queryset._document, filter_model), \ 'FilterSet model %s does not match queryset model %s' % \ (filter_model, queryset.model) return filter_class return
[ "def", "get_filter_class", "(", "self", ",", "view", ",", "queryset", "=", "None", ")", ":", "filter_class", "=", "getattr", "(", "view", ",", "'filter_class'", ",", "None", ")", "if", "filter_class", ":", "filter_model", "=", "filter_class", ".", "Meta", ".", "model", "assert", "issubclass", "(", "queryset", ".", "_document", ",", "filter_model", ")", ",", "'FilterSet model %s does not match queryset model %s'", "%", "(", "filter_model", ",", "queryset", ".", "model", ")", "return", "filter_class", "return" ]
Return the django-filters `FilterSet` used to filter the queryset.
[ "Return", "the", "django", "-", "filters", "FilterSet", "used", "to", "filter", "the", "queryset", "." ]
fa5a3135ce669725f88ef61d5d9f548cedfaf0ff
https://github.com/flytrap/flytrap-base/blob/fa5a3135ce669725f88ef61d5d9f548cedfaf0ff/flytrap/base/backends.py#L11-L24
242,146
pycontribs/pyversion
version/tag_command.py
tag.run
def run(self): """Will tag the currently active git commit id with the next release tag id""" sha = VersionUtils.run_git_command(["rev-parse", "HEAD"], self.git_dir) tag = self.distribution.get_version() if self.has_tag(tag, sha): tags_sha = VersionUtils.run_git_command(["rev-parse", tag], self.git_dir) if sha != tags_sha: logger.error( "git tag {0} sha does not match the sha requesting to be tagged, you need to increment the version number, Skipped Tagging!".format( tag ) ) return else: logger.info( "git tag {0} already exists for this repo, Skipped Tagging!".format( tag ) ) return logger.info("Adding tag {0} for commit {1}".format(tag, sha)) if not self.dry_run: VersionUtils.run_git_command( ["tag", "-m", '""', tag, sha], self.git_dir, throw_on_error=True ) logger.info("Pushing tag {0} to remote {1}".format(tag, self.remote)) VersionUtils.run_git_command( ["push", self.remote, tag], self.git_dir, throw_on_error=True )
python
def run(self): """Will tag the currently active git commit id with the next release tag id""" sha = VersionUtils.run_git_command(["rev-parse", "HEAD"], self.git_dir) tag = self.distribution.get_version() if self.has_tag(tag, sha): tags_sha = VersionUtils.run_git_command(["rev-parse", tag], self.git_dir) if sha != tags_sha: logger.error( "git tag {0} sha does not match the sha requesting to be tagged, you need to increment the version number, Skipped Tagging!".format( tag ) ) return else: logger.info( "git tag {0} already exists for this repo, Skipped Tagging!".format( tag ) ) return logger.info("Adding tag {0} for commit {1}".format(tag, sha)) if not self.dry_run: VersionUtils.run_git_command( ["tag", "-m", '""', tag, sha], self.git_dir, throw_on_error=True ) logger.info("Pushing tag {0} to remote {1}".format(tag, self.remote)) VersionUtils.run_git_command( ["push", self.remote, tag], self.git_dir, throw_on_error=True )
[ "def", "run", "(", "self", ")", ":", "sha", "=", "VersionUtils", ".", "run_git_command", "(", "[", "\"rev-parse\"", ",", "\"HEAD\"", "]", ",", "self", ".", "git_dir", ")", "tag", "=", "self", ".", "distribution", ".", "get_version", "(", ")", "if", "self", ".", "has_tag", "(", "tag", ",", "sha", ")", ":", "tags_sha", "=", "VersionUtils", ".", "run_git_command", "(", "[", "\"rev-parse\"", ",", "tag", "]", ",", "self", ".", "git_dir", ")", "if", "sha", "!=", "tags_sha", ":", "logger", ".", "error", "(", "\"git tag {0} sha does not match the sha requesting to be tagged, you need to increment the version number, Skipped Tagging!\"", ".", "format", "(", "tag", ")", ")", "return", "else", ":", "logger", ".", "info", "(", "\"git tag {0} already exists for this repo, Skipped Tagging!\"", ".", "format", "(", "tag", ")", ")", "return", "logger", ".", "info", "(", "\"Adding tag {0} for commit {1}\"", ".", "format", "(", "tag", ",", "sha", ")", ")", "if", "not", "self", ".", "dry_run", ":", "VersionUtils", ".", "run_git_command", "(", "[", "\"tag\"", ",", "\"-m\"", ",", "'\"\"'", ",", "tag", ",", "sha", "]", ",", "self", ".", "git_dir", ",", "throw_on_error", "=", "True", ")", "logger", ".", "info", "(", "\"Pushing tag {0} to remote {1}\"", ".", "format", "(", "tag", ",", "self", ".", "remote", ")", ")", "VersionUtils", ".", "run_git_command", "(", "[", "\"push\"", ",", "self", ".", "remote", ",", "tag", "]", ",", "self", ".", "git_dir", ",", "throw_on_error", "=", "True", ")" ]
Will tag the currently active git commit id with the next release tag id
[ "Will", "tag", "the", "currently", "active", "git", "commit", "id", "with", "the", "next", "release", "tag", "id" ]
6bbb799846ed4e97e84a3f0f2dbe14685f2ddb39
https://github.com/pycontribs/pyversion/blob/6bbb799846ed4e97e84a3f0f2dbe14685f2ddb39/version/tag_command.py#L38-L68
242,147
CTPUG/mdx_attr_cols
mdx_attr_cols.py
AttrColExtension.extendMarkdown
def extendMarkdown(self, md, md_globals=None): """Initializes markdown extension components.""" if any( x not in md.treeprocessors for x in self.REQUIRED_EXTENSION_INTERNAL_NAMES): raise RuntimeError( "The attr_cols markdown extension depends the following" " extensions which must preceded it in the extension" " list: %s" % ", ".join(self.REQUIRED_EXTENSIONS)) processor = AttrColTreeProcessor(md, self.conf) md.treeprocessors.register( processor, 'attr_cols', 5)
python
def extendMarkdown(self, md, md_globals=None): """Initializes markdown extension components.""" if any( x not in md.treeprocessors for x in self.REQUIRED_EXTENSION_INTERNAL_NAMES): raise RuntimeError( "The attr_cols markdown extension depends the following" " extensions which must preceded it in the extension" " list: %s" % ", ".join(self.REQUIRED_EXTENSIONS)) processor = AttrColTreeProcessor(md, self.conf) md.treeprocessors.register( processor, 'attr_cols', 5)
[ "def", "extendMarkdown", "(", "self", ",", "md", ",", "md_globals", "=", "None", ")", ":", "if", "any", "(", "x", "not", "in", "md", ".", "treeprocessors", "for", "x", "in", "self", ".", "REQUIRED_EXTENSION_INTERNAL_NAMES", ")", ":", "raise", "RuntimeError", "(", "\"The attr_cols markdown extension depends the following\"", "\" extensions which must preceded it in the extension\"", "\" list: %s\"", "%", "\", \"", ".", "join", "(", "self", ".", "REQUIRED_EXTENSIONS", ")", ")", "processor", "=", "AttrColTreeProcessor", "(", "md", ",", "self", ".", "conf", ")", "md", ".", "treeprocessors", ".", "register", "(", "processor", ",", "'attr_cols'", ",", "5", ")" ]
Initializes markdown extension components.
[ "Initializes", "markdown", "extension", "components", "." ]
46329b676842205b75b368a7cf2aeba0474c2870
https://github.com/CTPUG/mdx_attr_cols/blob/46329b676842205b75b368a7cf2aeba0474c2870/mdx_attr_cols.py#L71-L83
242,148
FlorianLudwig/rueckenwind
rw/cfg.py
read_file
def read_file(paths): """read config from path or list of paths :param str|list[str] paths: path or list of paths :return dict: loaded and merged config """ if isinstance(paths, str): paths = [paths] re = {} for path in paths: cfg = yaml.load(open(path)) merge(re, cfg) return re
python
def read_file(paths): """read config from path or list of paths :param str|list[str] paths: path or list of paths :return dict: loaded and merged config """ if isinstance(paths, str): paths = [paths] re = {} for path in paths: cfg = yaml.load(open(path)) merge(re, cfg) return re
[ "def", "read_file", "(", "paths", ")", ":", "if", "isinstance", "(", "paths", ",", "str", ")", ":", "paths", "=", "[", "paths", "]", "re", "=", "{", "}", "for", "path", "in", "paths", ":", "cfg", "=", "yaml", ".", "load", "(", "open", "(", "path", ")", ")", "merge", "(", "re", ",", "cfg", ")", "return", "re" ]
read config from path or list of paths :param str|list[str] paths: path or list of paths :return dict: loaded and merged config
[ "read", "config", "from", "path", "or", "list", "of", "paths" ]
47fec7af05ea10b3cf6d59b9f7bf4d12c02dddea
https://github.com/FlorianLudwig/rueckenwind/blob/47fec7af05ea10b3cf6d59b9f7bf4d12c02dddea/rw/cfg.py#L29-L44
242,149
openp2pdesign/makerlabs
makerlabs/diybio_org.py
data_from_diybio_org
def data_from_diybio_org(): """Scrapes data from diybio.org.""" r = requests.get(diy_bio_labs_url) if r.status_code == 200: # Fix a problem in the html source while loading it data = BeautifulSoup(r.text.replace(u'\xa0', u''), "lxml") else: data = "There was an error while accessing data on diybio.org." return data
python
def data_from_diybio_org(): """Scrapes data from diybio.org.""" r = requests.get(diy_bio_labs_url) if r.status_code == 200: # Fix a problem in the html source while loading it data = BeautifulSoup(r.text.replace(u'\xa0', u''), "lxml") else: data = "There was an error while accessing data on diybio.org." return data
[ "def", "data_from_diybio_org", "(", ")", ":", "r", "=", "requests", ".", "get", "(", "diy_bio_labs_url", ")", "if", "r", ".", "status_code", "==", "200", ":", "# Fix a problem in the html source while loading it", "data", "=", "BeautifulSoup", "(", "r", ".", "text", ".", "replace", "(", "u'\\xa0'", ",", "u''", ")", ",", "\"lxml\"", ")", "else", ":", "data", "=", "\"There was an error while accessing data on diybio.org.\"", "return", "data" ]
Scrapes data from diybio.org.
[ "Scrapes", "data", "from", "diybio", ".", "org", "." ]
b5838440174f10d370abb671358db9a99d7739fd
https://github.com/openp2pdesign/makerlabs/blob/b5838440174f10d370abb671358db9a99d7739fd/makerlabs/diybio_org.py#L34-L45
242,150
jeffrimko/Auxly
lib/auxly/__init__.py
open
def open(target): """Opens the target file or URL in the default application. **Attribution**: Written by user4815162342 and originally posted on `Stack Overflow <http://stackoverflow.com/a/17317468>`_. **Examples**: :: auxly.open("myfile.txt") auxly.open("https://www.github.com/") """ if sys.platform == "win32": os.startfile(target) else: opener = "open" if sys.platform == "darwin" else "xdg-open" subprocess.call([opener, target])
python
def open(target): """Opens the target file or URL in the default application. **Attribution**: Written by user4815162342 and originally posted on `Stack Overflow <http://stackoverflow.com/a/17317468>`_. **Examples**: :: auxly.open("myfile.txt") auxly.open("https://www.github.com/") """ if sys.platform == "win32": os.startfile(target) else: opener = "open" if sys.platform == "darwin" else "xdg-open" subprocess.call([opener, target])
[ "def", "open", "(", "target", ")", ":", "if", "sys", ".", "platform", "==", "\"win32\"", ":", "os", ".", "startfile", "(", "target", ")", "else", ":", "opener", "=", "\"open\"", "if", "sys", ".", "platform", "==", "\"darwin\"", "else", "\"xdg-open\"", "subprocess", ".", "call", "(", "[", "opener", ",", "target", "]", ")" ]
Opens the target file or URL in the default application. **Attribution**: Written by user4815162342 and originally posted on `Stack Overflow <http://stackoverflow.com/a/17317468>`_. **Examples**: :: auxly.open("myfile.txt") auxly.open("https://www.github.com/")
[ "Opens", "the", "target", "file", "or", "URL", "in", "the", "default", "application", "." ]
5aae876bcb6ca117c81d904f9455764cdc78cd48
https://github.com/jeffrimko/Auxly/blob/5aae876bcb6ca117c81d904f9455764cdc78cd48/lib/auxly/__init__.py#L25-L41
242,151
jeffrimko/Auxly
lib/auxly/__init__.py
verbose
def verbose(enabled): """Returns normal print function if enable, otherwise a dummy print function is returned which will suppress output.""" def _vprint(msg, **kwargs): print(msg, **kwargs) def _nprint(msg, **kwargs): pass return _vprint if enabled else _nprint
python
def verbose(enabled): """Returns normal print function if enable, otherwise a dummy print function is returned which will suppress output.""" def _vprint(msg, **kwargs): print(msg, **kwargs) def _nprint(msg, **kwargs): pass return _vprint if enabled else _nprint
[ "def", "verbose", "(", "enabled", ")", ":", "def", "_vprint", "(", "msg", ",", "*", "*", "kwargs", ")", ":", "print", "(", "msg", ",", "*", "*", "kwargs", ")", "def", "_nprint", "(", "msg", ",", "*", "*", "kwargs", ")", ":", "pass", "return", "_vprint", "if", "enabled", "else", "_nprint" ]
Returns normal print function if enable, otherwise a dummy print function is returned which will suppress output.
[ "Returns", "normal", "print", "function", "if", "enable", "otherwise", "a", "dummy", "print", "function", "is", "returned", "which", "will", "suppress", "output", "." ]
5aae876bcb6ca117c81d904f9455764cdc78cd48
https://github.com/jeffrimko/Auxly/blob/5aae876bcb6ca117c81d904f9455764cdc78cd48/lib/auxly/__init__.py#L55-L62
242,152
jeffrimko/Auxly
lib/auxly/__init__.py
callstop
def callstop(*args, **kwargs): """Limits the number of times a function can be called. Can be used as a function decorator or as a function that accepts another function. If used as a function, it returns a new function that will be call limited. **Params**: - func (func) - Function to call. Only available when used as a function. **Examples**: :: call = callstop(myfunc, limit=3) call(myarg1, myarg2) """ limit = kwargs.get('limit', 1) def decor(func): def wrapper(*args, **kwargs): if wrapper.calls < limit: wrapper.calls += 1 return func(*args, **kwargs) wrapper.calls = 0 return wrapper if len(args) > 0 and callable(args[0]): func = args[0] return decor(func) return decor
python
def callstop(*args, **kwargs): """Limits the number of times a function can be called. Can be used as a function decorator or as a function that accepts another function. If used as a function, it returns a new function that will be call limited. **Params**: - func (func) - Function to call. Only available when used as a function. **Examples**: :: call = callstop(myfunc, limit=3) call(myarg1, myarg2) """ limit = kwargs.get('limit', 1) def decor(func): def wrapper(*args, **kwargs): if wrapper.calls < limit: wrapper.calls += 1 return func(*args, **kwargs) wrapper.calls = 0 return wrapper if len(args) > 0 and callable(args[0]): func = args[0] return decor(func) return decor
[ "def", "callstop", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "limit", "=", "kwargs", ".", "get", "(", "'limit'", ",", "1", ")", "def", "decor", "(", "func", ")", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "wrapper", ".", "calls", "<", "limit", ":", "wrapper", ".", "calls", "+=", "1", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "wrapper", ".", "calls", "=", "0", "return", "wrapper", "if", "len", "(", "args", ")", ">", "0", "and", "callable", "(", "args", "[", "0", "]", ")", ":", "func", "=", "args", "[", "0", "]", "return", "decor", "(", "func", ")", "return", "decor" ]
Limits the number of times a function can be called. Can be used as a function decorator or as a function that accepts another function. If used as a function, it returns a new function that will be call limited. **Params**: - func (func) - Function to call. Only available when used as a function. **Examples**: :: call = callstop(myfunc, limit=3) call(myarg1, myarg2)
[ "Limits", "the", "number", "of", "times", "a", "function", "can", "be", "called", ".", "Can", "be", "used", "as", "a", "function", "decorator", "or", "as", "a", "function", "that", "accepts", "another", "function", ".", "If", "used", "as", "a", "function", "it", "returns", "a", "new", "function", "that", "will", "be", "call", "limited", "." ]
5aae876bcb6ca117c81d904f9455764cdc78cd48
https://github.com/jeffrimko/Auxly/blob/5aae876bcb6ca117c81d904f9455764cdc78cd48/lib/auxly/__init__.py#L97-L122
242,153
50onRed/smr
smr/ec2.py
wait_for_instance
def wait_for_instance(instance): """ wait for instance status to be 'running' in which case return True, False otherwise """ status = None print("getting status for instance {} ...".format(instance.id)) while status is None: try: status = instance.update() if status is None: time.sleep(2) except EC2ResponseError: time.sleep(2) print("waiting for instance {} ...".format(instance.id)) while status == "pending": time.sleep(2) status = instance.update() if status != "running": print("Invalid status when starting instance {}: {}".format(instance.id, status)) return False print("New instance {} started: {}".format(instance.id, instance.ip_address)) return True
python
def wait_for_instance(instance): """ wait for instance status to be 'running' in which case return True, False otherwise """ status = None print("getting status for instance {} ...".format(instance.id)) while status is None: try: status = instance.update() if status is None: time.sleep(2) except EC2ResponseError: time.sleep(2) print("waiting for instance {} ...".format(instance.id)) while status == "pending": time.sleep(2) status = instance.update() if status != "running": print("Invalid status when starting instance {}: {}".format(instance.id, status)) return False print("New instance {} started: {}".format(instance.id, instance.ip_address)) return True
[ "def", "wait_for_instance", "(", "instance", ")", ":", "status", "=", "None", "print", "(", "\"getting status for instance {} ...\"", ".", "format", "(", "instance", ".", "id", ")", ")", "while", "status", "is", "None", ":", "try", ":", "status", "=", "instance", ".", "update", "(", ")", "if", "status", "is", "None", ":", "time", ".", "sleep", "(", "2", ")", "except", "EC2ResponseError", ":", "time", ".", "sleep", "(", "2", ")", "print", "(", "\"waiting for instance {} ...\"", ".", "format", "(", "instance", ".", "id", ")", ")", "while", "status", "==", "\"pending\"", ":", "time", ".", "sleep", "(", "2", ")", "status", "=", "instance", ".", "update", "(", ")", "if", "status", "!=", "\"running\"", ":", "print", "(", "\"Invalid status when starting instance {}: {}\"", ".", "format", "(", "instance", ".", "id", ",", "status", ")", ")", "return", "False", "print", "(", "\"New instance {} started: {}\"", ".", "format", "(", "instance", ".", "id", ",", "instance", ".", "ip_address", ")", ")", "return", "True" ]
wait for instance status to be 'running' in which case return True, False otherwise
[ "wait", "for", "instance", "status", "to", "be", "running", "in", "which", "case", "return", "True", "False", "otherwise" ]
999b33d86b6a900d7c4aadf03cf4a661acba9f1b
https://github.com/50onRed/smr/blob/999b33d86b6a900d7c4aadf03cf4a661acba9f1b/smr/ec2.py#L78-L102
242,154
openknowledge-archive/datapackage-validate-py
datapackage_validate/validate.py
validate
def validate(datapackage, schema='base'): '''Validate Data Package datapackage.json files against a jsonschema. Args: datapackage (str or dict): The Data Package descriptor file (i.e. datapackage.json) as a dict or its contents in a string. schema (str or dict): If a string, it can be the schema ID in the registry, a local path, a URL or the schema's JSON as a string. If a dict, it must be the JSON Schema itself. Returns: None Raises: DataPackageValidateException: This exception has the list of the validation errors in its `.errors` attribute. ''' errors = [] schema_obj = None datapackage_obj = None # Sanity check datapackage # If datapackage is a str, check json is well formed if isinstance(datapackage, six.string_types): try: datapackage_obj = json.loads(datapackage) except ValueError as e: errors.append(DataPackageValidateException(e)) elif not isinstance(datapackage, dict): msg = 'Data Package must be a dict or JSON string, but was a \'{0}\'' dp_type = type(datapackage).__name__ error = DataPackageValidateException(msg.format(dp_type)) errors.append(error) else: datapackage_obj = datapackage try: if isinstance(schema, six.string_types): try: schema = json.loads(schema) except ValueError: pass schema_obj = Schema(schema) except (SchemaError, RegistryError) as e: errors.append(e) # Validate datapackage against the schema if datapackage_obj is not None and schema_obj is not None: try: schema_obj.validate(datapackage_obj) except ValidationError as e: errors.append(e) if errors: exception = DataPackageValidateException() exception.errors = errors raise exception
python
def validate(datapackage, schema='base'): '''Validate Data Package datapackage.json files against a jsonschema. Args: datapackage (str or dict): The Data Package descriptor file (i.e. datapackage.json) as a dict or its contents in a string. schema (str or dict): If a string, it can be the schema ID in the registry, a local path, a URL or the schema's JSON as a string. If a dict, it must be the JSON Schema itself. Returns: None Raises: DataPackageValidateException: This exception has the list of the validation errors in its `.errors` attribute. ''' errors = [] schema_obj = None datapackage_obj = None # Sanity check datapackage # If datapackage is a str, check json is well formed if isinstance(datapackage, six.string_types): try: datapackage_obj = json.loads(datapackage) except ValueError as e: errors.append(DataPackageValidateException(e)) elif not isinstance(datapackage, dict): msg = 'Data Package must be a dict or JSON string, but was a \'{0}\'' dp_type = type(datapackage).__name__ error = DataPackageValidateException(msg.format(dp_type)) errors.append(error) else: datapackage_obj = datapackage try: if isinstance(schema, six.string_types): try: schema = json.loads(schema) except ValueError: pass schema_obj = Schema(schema) except (SchemaError, RegistryError) as e: errors.append(e) # Validate datapackage against the schema if datapackage_obj is not None and schema_obj is not None: try: schema_obj.validate(datapackage_obj) except ValidationError as e: errors.append(e) if errors: exception = DataPackageValidateException() exception.errors = errors raise exception
[ "def", "validate", "(", "datapackage", ",", "schema", "=", "'base'", ")", ":", "errors", "=", "[", "]", "schema_obj", "=", "None", "datapackage_obj", "=", "None", "# Sanity check datapackage", "# If datapackage is a str, check json is well formed", "if", "isinstance", "(", "datapackage", ",", "six", ".", "string_types", ")", ":", "try", ":", "datapackage_obj", "=", "json", ".", "loads", "(", "datapackage", ")", "except", "ValueError", "as", "e", ":", "errors", ".", "append", "(", "DataPackageValidateException", "(", "e", ")", ")", "elif", "not", "isinstance", "(", "datapackage", ",", "dict", ")", ":", "msg", "=", "'Data Package must be a dict or JSON string, but was a \\'{0}\\''", "dp_type", "=", "type", "(", "datapackage", ")", ".", "__name__", "error", "=", "DataPackageValidateException", "(", "msg", ".", "format", "(", "dp_type", ")", ")", "errors", ".", "append", "(", "error", ")", "else", ":", "datapackage_obj", "=", "datapackage", "try", ":", "if", "isinstance", "(", "schema", ",", "six", ".", "string_types", ")", ":", "try", ":", "schema", "=", "json", ".", "loads", "(", "schema", ")", "except", "ValueError", ":", "pass", "schema_obj", "=", "Schema", "(", "schema", ")", "except", "(", "SchemaError", ",", "RegistryError", ")", "as", "e", ":", "errors", ".", "append", "(", "e", ")", "# Validate datapackage against the schema", "if", "datapackage_obj", "is", "not", "None", "and", "schema_obj", "is", "not", "None", ":", "try", ":", "schema_obj", ".", "validate", "(", "datapackage_obj", ")", "except", "ValidationError", "as", "e", ":", "errors", ".", "append", "(", "e", ")", "if", "errors", ":", "exception", "=", "DataPackageValidateException", "(", ")", "exception", ".", "errors", "=", "errors", "raise", "exception" ]
Validate Data Package datapackage.json files against a jsonschema. Args: datapackage (str or dict): The Data Package descriptor file (i.e. datapackage.json) as a dict or its contents in a string. schema (str or dict): If a string, it can be the schema ID in the registry, a local path, a URL or the schema's JSON as a string. If a dict, it must be the JSON Schema itself. Returns: None Raises: DataPackageValidateException: This exception has the list of the validation errors in its `.errors` attribute.
[ "Validate", "Data", "Package", "datapackage", ".", "json", "files", "against", "a", "jsonschema", "." ]
5f906bd4e0baa78dfd45f48e7fa3c5d649e6846a
https://github.com/openknowledge-archive/datapackage-validate-py/blob/5f906bd4e0baa78dfd45f48e7fa3c5d649e6846a/datapackage_validate/validate.py#L19-L78
242,155
lextoumbourou/txstripe
txstripe/resource.py
make_request
def make_request( ins, method, url, stripe_account=None, params=None, headers=None, **kwargs ): """ Return a deferred or handle error. For overriding in various classes. """ if txstripe.api_key is None: raise error.AuthenticationError( 'No API key provided. (HINT: set your API key using ' '"stripe.api_key = <API-KEY>"). You can generate API keys ' 'from the Stripe web interface. See https://stripe.com/api ' 'for details, or email support@stripe.com if you have any ' 'questions.') abs_url = '{}{}'.format(txstripe.api_base, url) ua = { 'lang': 'python', 'publisher': 'lextoumbourou', 'httplib': 'Twisted', } headers = headers or {} headers.update({ 'X-Stripe-Client-User-Agent': util.json.dumps(ua), 'User-Agent': 'txstripe', 'Authorization': 'Bearer %s' % (txstripe.api_key,) }) if stripe_account: headers['Stripe-Account'] = stripe_account if txstripe.api_version is not None: headers['Stripe-Version'] = txstripe.api_version if method == 'get' or method == 'delete': data = None elif method == 'post': data = {k: v for (k, v) in _api_encode(params)} params = None else: raise error.APIConnectionError( 'Unrecognized HTTP method %r. This may indicate a bug in the ' 'Stripe bindings.' % (method,)) resp = yield treq.request( method, abs_url, params=params, data=data, headers=headers, **kwargs) if resp.code >= 400: yield util.handle_api_error(resp) return body = yield resp.json() defer.returnValue( convert_to_stripe_object( body, txstripe.api_key, stripe_account))
python
def make_request( ins, method, url, stripe_account=None, params=None, headers=None, **kwargs ): """ Return a deferred or handle error. For overriding in various classes. """ if txstripe.api_key is None: raise error.AuthenticationError( 'No API key provided. (HINT: set your API key using ' '"stripe.api_key = <API-KEY>"). You can generate API keys ' 'from the Stripe web interface. See https://stripe.com/api ' 'for details, or email support@stripe.com if you have any ' 'questions.') abs_url = '{}{}'.format(txstripe.api_base, url) ua = { 'lang': 'python', 'publisher': 'lextoumbourou', 'httplib': 'Twisted', } headers = headers or {} headers.update({ 'X-Stripe-Client-User-Agent': util.json.dumps(ua), 'User-Agent': 'txstripe', 'Authorization': 'Bearer %s' % (txstripe.api_key,) }) if stripe_account: headers['Stripe-Account'] = stripe_account if txstripe.api_version is not None: headers['Stripe-Version'] = txstripe.api_version if method == 'get' or method == 'delete': data = None elif method == 'post': data = {k: v for (k, v) in _api_encode(params)} params = None else: raise error.APIConnectionError( 'Unrecognized HTTP method %r. This may indicate a bug in the ' 'Stripe bindings.' % (method,)) resp = yield treq.request( method, abs_url, params=params, data=data, headers=headers, **kwargs) if resp.code >= 400: yield util.handle_api_error(resp) return body = yield resp.json() defer.returnValue( convert_to_stripe_object( body, txstripe.api_key, stripe_account))
[ "def", "make_request", "(", "ins", ",", "method", ",", "url", ",", "stripe_account", "=", "None", ",", "params", "=", "None", ",", "headers", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "txstripe", ".", "api_key", "is", "None", ":", "raise", "error", ".", "AuthenticationError", "(", "'No API key provided. (HINT: set your API key using '", "'\"stripe.api_key = <API-KEY>\"). You can generate API keys '", "'from the Stripe web interface. See https://stripe.com/api '", "'for details, or email support@stripe.com if you have any '", "'questions.'", ")", "abs_url", "=", "'{}{}'", ".", "format", "(", "txstripe", ".", "api_base", ",", "url", ")", "ua", "=", "{", "'lang'", ":", "'python'", ",", "'publisher'", ":", "'lextoumbourou'", ",", "'httplib'", ":", "'Twisted'", ",", "}", "headers", "=", "headers", "or", "{", "}", "headers", ".", "update", "(", "{", "'X-Stripe-Client-User-Agent'", ":", "util", ".", "json", ".", "dumps", "(", "ua", ")", ",", "'User-Agent'", ":", "'txstripe'", ",", "'Authorization'", ":", "'Bearer %s'", "%", "(", "txstripe", ".", "api_key", ",", ")", "}", ")", "if", "stripe_account", ":", "headers", "[", "'Stripe-Account'", "]", "=", "stripe_account", "if", "txstripe", ".", "api_version", "is", "not", "None", ":", "headers", "[", "'Stripe-Version'", "]", "=", "txstripe", ".", "api_version", "if", "method", "==", "'get'", "or", "method", "==", "'delete'", ":", "data", "=", "None", "elif", "method", "==", "'post'", ":", "data", "=", "{", "k", ":", "v", "for", "(", "k", ",", "v", ")", "in", "_api_encode", "(", "params", ")", "}", "params", "=", "None", "else", ":", "raise", "error", ".", "APIConnectionError", "(", "'Unrecognized HTTP method %r. This may indicate a bug in the '", "'Stripe bindings.'", "%", "(", "method", ",", ")", ")", "resp", "=", "yield", "treq", ".", "request", "(", "method", ",", "abs_url", ",", "params", "=", "params", ",", "data", "=", "data", ",", "headers", "=", "headers", ",", "*", "*", "kwargs", ")", "if", "resp", ".", "code", ">=", "400", ":", "yield", "util", ".", "handle_api_error", "(", "resp", ")", "return", "body", "=", "yield", "resp", ".", "json", "(", ")", "defer", ".", "returnValue", "(", "convert_to_stripe_object", "(", "body", ",", "txstripe", ".", "api_key", ",", "stripe_account", ")", ")" ]
Return a deferred or handle error. For overriding in various classes.
[ "Return", "a", "deferred", "or", "handle", "error", "." ]
a69e67f524258026fd1840655a0578311bba3b89
https://github.com/lextoumbourou/txstripe/blob/a69e67f524258026fd1840655a0578311bba3b89/txstripe/resource.py#L52-L110
242,156
jashort/SmartFileSorter
smartfilesorter/smartfilesorter.py
SmartFileSorter.create_logger
def create_logger(self, args={}): """ Create and configure the program's logger object. Log levels: DEBUG - Log everything. Hidden unless --debug is used. INFO - information only ERROR - Critical error :param args: Object containing program's parsed command line arguments :return: None """ # Set up logging logger = logging.getLogger("SmartFileSorter") logger.level = logging.INFO if '--debug' in args and args['--debug'] is True: logger.setLevel(logging.DEBUG) file_log_formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s', '%Y-%m-%d %H:%M:%S') console_log_formatter = logging.Formatter('%(message)s') # Log to stdout stdout_stream = logging.StreamHandler(stream=sys.stdout) stdout_stream.setFormatter(console_log_formatter) logger.addHandler(stdout_stream) # Log to file if the option is chosen if '--log' in args and args['--log'] is not None: logfile = open(args['--log'], 'w') logfile_stream = logging.StreamHandler(stream=logfile) logfile_stream.setFormatter(file_log_formatter) logger.addHandler(logfile_stream) if '--dry-run' in args and args['--dry-run'] is True: logger.info('Running with --dry-run parameter. Actions will not be performed.') self.logger = logger
python
def create_logger(self, args={}): """ Create and configure the program's logger object. Log levels: DEBUG - Log everything. Hidden unless --debug is used. INFO - information only ERROR - Critical error :param args: Object containing program's parsed command line arguments :return: None """ # Set up logging logger = logging.getLogger("SmartFileSorter") logger.level = logging.INFO if '--debug' in args and args['--debug'] is True: logger.setLevel(logging.DEBUG) file_log_formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s', '%Y-%m-%d %H:%M:%S') console_log_formatter = logging.Formatter('%(message)s') # Log to stdout stdout_stream = logging.StreamHandler(stream=sys.stdout) stdout_stream.setFormatter(console_log_formatter) logger.addHandler(stdout_stream) # Log to file if the option is chosen if '--log' in args and args['--log'] is not None: logfile = open(args['--log'], 'w') logfile_stream = logging.StreamHandler(stream=logfile) logfile_stream.setFormatter(file_log_formatter) logger.addHandler(logfile_stream) if '--dry-run' in args and args['--dry-run'] is True: logger.info('Running with --dry-run parameter. Actions will not be performed.') self.logger = logger
[ "def", "create_logger", "(", "self", ",", "args", "=", "{", "}", ")", ":", "# Set up logging", "logger", "=", "logging", ".", "getLogger", "(", "\"SmartFileSorter\"", ")", "logger", ".", "level", "=", "logging", ".", "INFO", "if", "'--debug'", "in", "args", "and", "args", "[", "'--debug'", "]", "is", "True", ":", "logger", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "file_log_formatter", "=", "logging", ".", "Formatter", "(", "'%(asctime)s %(name)s %(levelname)s %(message)s'", ",", "'%Y-%m-%d %H:%M:%S'", ")", "console_log_formatter", "=", "logging", ".", "Formatter", "(", "'%(message)s'", ")", "# Log to stdout", "stdout_stream", "=", "logging", ".", "StreamHandler", "(", "stream", "=", "sys", ".", "stdout", ")", "stdout_stream", ".", "setFormatter", "(", "console_log_formatter", ")", "logger", ".", "addHandler", "(", "stdout_stream", ")", "# Log to file if the option is chosen", "if", "'--log'", "in", "args", "and", "args", "[", "'--log'", "]", "is", "not", "None", ":", "logfile", "=", "open", "(", "args", "[", "'--log'", "]", ",", "'w'", ")", "logfile_stream", "=", "logging", ".", "StreamHandler", "(", "stream", "=", "logfile", ")", "logfile_stream", ".", "setFormatter", "(", "file_log_formatter", ")", "logger", ".", "addHandler", "(", "logfile_stream", ")", "if", "'--dry-run'", "in", "args", "and", "args", "[", "'--dry-run'", "]", "is", "True", ":", "logger", ".", "info", "(", "'Running with --dry-run parameter. Actions will not be performed.'", ")", "self", ".", "logger", "=", "logger" ]
Create and configure the program's logger object. Log levels: DEBUG - Log everything. Hidden unless --debug is used. INFO - information only ERROR - Critical error :param args: Object containing program's parsed command line arguments :return: None
[ "Create", "and", "configure", "the", "program", "s", "logger", "object", "." ]
77faf09e5a737da93e16e71a64707366b8307910
https://github.com/jashort/SmartFileSorter/blob/77faf09e5a737da93e16e71a64707366b8307910/smartfilesorter/smartfilesorter.py#L45-L81
242,157
jashort/SmartFileSorter
smartfilesorter/smartfilesorter.py
SmartFileSorter.load_plugins
def load_plugins(self, plugin_path): """ Loads plugins from modules in plugin_path. Looks for the config_name property in each object that's found. If so, adds that to the dictionary with the config_name as the key. config_name should be unique between different plugins. :param plugin_path: Path to load plugins from :return: dictionary of plugins by config_name """ self.logger.debug('Loading plugins from {0}'.format(plugin_path)) plugins = {} plugin_dir = os.path.realpath(plugin_path) sys.path.append(plugin_dir) for f in os.listdir(plugin_dir): if f.endswith(".py"): name = f[:-3] elif f.endswith(".pyc"): name = f[:-4] # Possible support for plugins inside directories - worth doing? # elif os.path.isdir(os.path.join(plugin_dir, f)): # name = f else: continue try: self.logger.debug('Adding plugin from: {0}'.format(f)) mod = __import__(name, globals(), locals(), [], 0) for plugin_class in inspect.getmembers(mod): if plugin_class[0][0:2] == '__': # Skip dunder members - builtins, etc continue if hasattr(plugin_class[1], 'config_name'): if plugin_class[1].config_name is not None: # Skip plugins where config_name is None, like the base classes plugins[plugin_class[1].config_name] = plugin_class[1] self.logger.debug('Added plugin: {0}'.format(plugin_class[1].config_name)) # Todo: Add error checking here. If a plugin with that name already exists, # log an error. Quit or continue? except ImportError as e: self.logger.error(e) pass # problem importing self.logger.debug('Done loading plugins') return plugins
python
def load_plugins(self, plugin_path): """ Loads plugins from modules in plugin_path. Looks for the config_name property in each object that's found. If so, adds that to the dictionary with the config_name as the key. config_name should be unique between different plugins. :param plugin_path: Path to load plugins from :return: dictionary of plugins by config_name """ self.logger.debug('Loading plugins from {0}'.format(plugin_path)) plugins = {} plugin_dir = os.path.realpath(plugin_path) sys.path.append(plugin_dir) for f in os.listdir(plugin_dir): if f.endswith(".py"): name = f[:-3] elif f.endswith(".pyc"): name = f[:-4] # Possible support for plugins inside directories - worth doing? # elif os.path.isdir(os.path.join(plugin_dir, f)): # name = f else: continue try: self.logger.debug('Adding plugin from: {0}'.format(f)) mod = __import__(name, globals(), locals(), [], 0) for plugin_class in inspect.getmembers(mod): if plugin_class[0][0:2] == '__': # Skip dunder members - builtins, etc continue if hasattr(plugin_class[1], 'config_name'): if plugin_class[1].config_name is not None: # Skip plugins where config_name is None, like the base classes plugins[plugin_class[1].config_name] = plugin_class[1] self.logger.debug('Added plugin: {0}'.format(plugin_class[1].config_name)) # Todo: Add error checking here. If a plugin with that name already exists, # log an error. Quit or continue? except ImportError as e: self.logger.error(e) pass # problem importing self.logger.debug('Done loading plugins') return plugins
[ "def", "load_plugins", "(", "self", ",", "plugin_path", ")", ":", "self", ".", "logger", ".", "debug", "(", "'Loading plugins from {0}'", ".", "format", "(", "plugin_path", ")", ")", "plugins", "=", "{", "}", "plugin_dir", "=", "os", ".", "path", ".", "realpath", "(", "plugin_path", ")", "sys", ".", "path", ".", "append", "(", "plugin_dir", ")", "for", "f", "in", "os", ".", "listdir", "(", "plugin_dir", ")", ":", "if", "f", ".", "endswith", "(", "\".py\"", ")", ":", "name", "=", "f", "[", ":", "-", "3", "]", "elif", "f", ".", "endswith", "(", "\".pyc\"", ")", ":", "name", "=", "f", "[", ":", "-", "4", "]", "# Possible support for plugins inside directories - worth doing?", "# elif os.path.isdir(os.path.join(plugin_dir, f)):", "# name = f", "else", ":", "continue", "try", ":", "self", ".", "logger", ".", "debug", "(", "'Adding plugin from: {0}'", ".", "format", "(", "f", ")", ")", "mod", "=", "__import__", "(", "name", ",", "globals", "(", ")", ",", "locals", "(", ")", ",", "[", "]", ",", "0", ")", "for", "plugin_class", "in", "inspect", ".", "getmembers", "(", "mod", ")", ":", "if", "plugin_class", "[", "0", "]", "[", "0", ":", "2", "]", "==", "'__'", ":", "# Skip dunder members - builtins, etc", "continue", "if", "hasattr", "(", "plugin_class", "[", "1", "]", ",", "'config_name'", ")", ":", "if", "plugin_class", "[", "1", "]", ".", "config_name", "is", "not", "None", ":", "# Skip plugins where config_name is None, like the base classes", "plugins", "[", "plugin_class", "[", "1", "]", ".", "config_name", "]", "=", "plugin_class", "[", "1", "]", "self", ".", "logger", ".", "debug", "(", "'Added plugin: {0}'", ".", "format", "(", "plugin_class", "[", "1", "]", ".", "config_name", ")", ")", "# Todo: Add error checking here. If a plugin with that name already exists,", "# log an error. Quit or continue?", "except", "ImportError", "as", "e", ":", "self", ".", "logger", ".", "error", "(", "e", ")", "pass", "# problem importing", "self", ".", "logger", ".", "debug", "(", "'Done loading plugins'", ")", "return", "plugins" ]
Loads plugins from modules in plugin_path. Looks for the config_name property in each object that's found. If so, adds that to the dictionary with the config_name as the key. config_name should be unique between different plugins. :param plugin_path: Path to load plugins from :return: dictionary of plugins by config_name
[ "Loads", "plugins", "from", "modules", "in", "plugin_path", ".", "Looks", "for", "the", "config_name", "property", "in", "each", "object", "that", "s", "found", ".", "If", "so", "adds", "that", "to", "the", "dictionary", "with", "the", "config_name", "as", "the", "key", ".", "config_name", "should", "be", "unique", "between", "different", "plugins", "." ]
77faf09e5a737da93e16e71a64707366b8307910
https://github.com/jashort/SmartFileSorter/blob/77faf09e5a737da93e16e71a64707366b8307910/smartfilesorter/smartfilesorter.py#L132-L178
242,158
klmitch/appathy
appathy/actions.py
ActionDescriptor.deserialize_request
def deserialize_request(self, req): """ Uses the deserializers declared on the action method and its extensions to deserialize the request. Returns the result of the deserialization. Raises `webob.HTTPUnsupportedMediaType` if the media type of the request is unsupported. """ # See if we have a body if req.content_length == 0: return None # Get the primary deserializer try: deserializer = self.method.deserializers(req.content_type) except KeyError: raise webob.exc.HTTPUnsupportedMediaType() # If it has an attacher, attach all the deserializers for the # extensions if hasattr(deserializer, 'attach'): for ext in self.extensions: try: deserializer.attach(ext.deserializers(req.content_type)) except KeyError: pass # A deserializer is simply a callable, so call it return deserializer(req.body)
python
def deserialize_request(self, req): """ Uses the deserializers declared on the action method and its extensions to deserialize the request. Returns the result of the deserialization. Raises `webob.HTTPUnsupportedMediaType` if the media type of the request is unsupported. """ # See if we have a body if req.content_length == 0: return None # Get the primary deserializer try: deserializer = self.method.deserializers(req.content_type) except KeyError: raise webob.exc.HTTPUnsupportedMediaType() # If it has an attacher, attach all the deserializers for the # extensions if hasattr(deserializer, 'attach'): for ext in self.extensions: try: deserializer.attach(ext.deserializers(req.content_type)) except KeyError: pass # A deserializer is simply a callable, so call it return deserializer(req.body)
[ "def", "deserialize_request", "(", "self", ",", "req", ")", ":", "# See if we have a body", "if", "req", ".", "content_length", "==", "0", ":", "return", "None", "# Get the primary deserializer", "try", ":", "deserializer", "=", "self", ".", "method", ".", "deserializers", "(", "req", ".", "content_type", ")", "except", "KeyError", ":", "raise", "webob", ".", "exc", ".", "HTTPUnsupportedMediaType", "(", ")", "# If it has an attacher, attach all the deserializers for the", "# extensions", "if", "hasattr", "(", "deserializer", ",", "'attach'", ")", ":", "for", "ext", "in", "self", ".", "extensions", ":", "try", ":", "deserializer", ".", "attach", "(", "ext", ".", "deserializers", "(", "req", ".", "content_type", ")", ")", "except", "KeyError", ":", "pass", "# A deserializer is simply a callable, so call it", "return", "deserializer", "(", "req", ".", "body", ")" ]
Uses the deserializers declared on the action method and its extensions to deserialize the request. Returns the result of the deserialization. Raises `webob.HTTPUnsupportedMediaType` if the media type of the request is unsupported.
[ "Uses", "the", "deserializers", "declared", "on", "the", "action", "method", "and", "its", "extensions", "to", "deserialize", "the", "request", ".", "Returns", "the", "result", "of", "the", "deserialization", ".", "Raises", "webob", ".", "HTTPUnsupportedMediaType", "if", "the", "media", "type", "of", "the", "request", "is", "unsupported", "." ]
a10aa7d21d38622e984a8fe106ab37114af90dc2
https://github.com/klmitch/appathy/blob/a10aa7d21d38622e984a8fe106ab37114af90dc2/appathy/actions.py#L99-L127
242,159
klmitch/appathy
appathy/actions.py
ActionDescriptor.serializer
def serializer(self, req): """ Selects and returns the serializer to use, based on the serializers declared on the action method and its extensions. The returned content type is selected based on the types available and the best match generated from the HTTP `Accept` header. Raises `HTTPNotAcceptable` if the request cannot be serialized to an acceptable media type. Returns a tuple of the content type and the serializer. """ # Select the best match serializer content_types = self.method.serializers.get_types() content_type = req.accept.best_match(content_types) if content_type is None: raise webob.exc.HTTPNotAcceptable() # Select the serializer to use try: serializer = self.method.serializers(content_type) except KeyError: raise webob.exc.HTTPNotAcceptable() # If it has an attacher, attach all the serializers for the # extensions if hasattr(serializer, 'attach'): for ext in reversed(self.extensions): try: serializer.attach(ext.serializers(content_type)) except KeyError: pass # Return content type and serializer return content_type, serializer
python
def serializer(self, req): """ Selects and returns the serializer to use, based on the serializers declared on the action method and its extensions. The returned content type is selected based on the types available and the best match generated from the HTTP `Accept` header. Raises `HTTPNotAcceptable` if the request cannot be serialized to an acceptable media type. Returns a tuple of the content type and the serializer. """ # Select the best match serializer content_types = self.method.serializers.get_types() content_type = req.accept.best_match(content_types) if content_type is None: raise webob.exc.HTTPNotAcceptable() # Select the serializer to use try: serializer = self.method.serializers(content_type) except KeyError: raise webob.exc.HTTPNotAcceptable() # If it has an attacher, attach all the serializers for the # extensions if hasattr(serializer, 'attach'): for ext in reversed(self.extensions): try: serializer.attach(ext.serializers(content_type)) except KeyError: pass # Return content type and serializer return content_type, serializer
[ "def", "serializer", "(", "self", ",", "req", ")", ":", "# Select the best match serializer", "content_types", "=", "self", ".", "method", ".", "serializers", ".", "get_types", "(", ")", "content_type", "=", "req", ".", "accept", ".", "best_match", "(", "content_types", ")", "if", "content_type", "is", "None", ":", "raise", "webob", ".", "exc", ".", "HTTPNotAcceptable", "(", ")", "# Select the serializer to use", "try", ":", "serializer", "=", "self", ".", "method", ".", "serializers", "(", "content_type", ")", "except", "KeyError", ":", "raise", "webob", ".", "exc", ".", "HTTPNotAcceptable", "(", ")", "# If it has an attacher, attach all the serializers for the", "# extensions", "if", "hasattr", "(", "serializer", ",", "'attach'", ")", ":", "for", "ext", "in", "reversed", "(", "self", ".", "extensions", ")", ":", "try", ":", "serializer", ".", "attach", "(", "ext", ".", "serializers", "(", "content_type", ")", ")", "except", "KeyError", ":", "pass", "# Return content type and serializer", "return", "content_type", ",", "serializer" ]
Selects and returns the serializer to use, based on the serializers declared on the action method and its extensions. The returned content type is selected based on the types available and the best match generated from the HTTP `Accept` header. Raises `HTTPNotAcceptable` if the request cannot be serialized to an acceptable media type. Returns a tuple of the content type and the serializer.
[ "Selects", "and", "returns", "the", "serializer", "to", "use", "based", "on", "the", "serializers", "declared", "on", "the", "action", "method", "and", "its", "extensions", ".", "The", "returned", "content", "type", "is", "selected", "based", "on", "the", "types", "available", "and", "the", "best", "match", "generated", "from", "the", "HTTP", "Accept", "header", ".", "Raises", "HTTPNotAcceptable", "if", "the", "request", "cannot", "be", "serialized", "to", "an", "acceptable", "media", "type", ".", "Returns", "a", "tuple", "of", "the", "content", "type", "and", "the", "serializer", "." ]
a10aa7d21d38622e984a8fe106ab37114af90dc2
https://github.com/klmitch/appathy/blob/a10aa7d21d38622e984a8fe106ab37114af90dc2/appathy/actions.py#L129-L162
242,160
klmitch/appathy
appathy/actions.py
ActionDescriptor.wrap
def wrap(self, req, result): """ Wrap method return results. The return value of the action method and of the action extensions is passed through this method before being returned to the caller. Instances of `webob.Response` are thrown, to abort the rest of action and extension processing; otherwise, objects which are not instances of ResponseObject will be wrapped in one. """ if isinstance(result, webob.exc.HTTPException): # It's a webob HTTP exception; use raise to bail out # immediately and pass it upstream raise result elif isinstance(result, webob.Response): # Straight-up webob Response object; we raise # AppathyResponse to bail out raise exceptions.AppathyResponse(result) elif isinstance(result, response.ResponseObject): # Already a ResponseObject; bind it to this descriptor result._bind(self) return result else: # Create a new, bound, ResponseObject return self.resp_type(req, result, _descriptor=self)
python
def wrap(self, req, result): """ Wrap method return results. The return value of the action method and of the action extensions is passed through this method before being returned to the caller. Instances of `webob.Response` are thrown, to abort the rest of action and extension processing; otherwise, objects which are not instances of ResponseObject will be wrapped in one. """ if isinstance(result, webob.exc.HTTPException): # It's a webob HTTP exception; use raise to bail out # immediately and pass it upstream raise result elif isinstance(result, webob.Response): # Straight-up webob Response object; we raise # AppathyResponse to bail out raise exceptions.AppathyResponse(result) elif isinstance(result, response.ResponseObject): # Already a ResponseObject; bind it to this descriptor result._bind(self) return result else: # Create a new, bound, ResponseObject return self.resp_type(req, result, _descriptor=self)
[ "def", "wrap", "(", "self", ",", "req", ",", "result", ")", ":", "if", "isinstance", "(", "result", ",", "webob", ".", "exc", ".", "HTTPException", ")", ":", "# It's a webob HTTP exception; use raise to bail out", "# immediately and pass it upstream", "raise", "result", "elif", "isinstance", "(", "result", ",", "webob", ".", "Response", ")", ":", "# Straight-up webob Response object; we raise", "# AppathyResponse to bail out", "raise", "exceptions", ".", "AppathyResponse", "(", "result", ")", "elif", "isinstance", "(", "result", ",", "response", ".", "ResponseObject", ")", ":", "# Already a ResponseObject; bind it to this descriptor", "result", ".", "_bind", "(", "self", ")", "return", "result", "else", ":", "# Create a new, bound, ResponseObject", "return", "self", ".", "resp_type", "(", "req", ",", "result", ",", "_descriptor", "=", "self", ")" ]
Wrap method return results. The return value of the action method and of the action extensions is passed through this method before being returned to the caller. Instances of `webob.Response` are thrown, to abort the rest of action and extension processing; otherwise, objects which are not instances of ResponseObject will be wrapped in one.
[ "Wrap", "method", "return", "results", ".", "The", "return", "value", "of", "the", "action", "method", "and", "of", "the", "action", "extensions", "is", "passed", "through", "this", "method", "before", "being", "returned", "to", "the", "caller", ".", "Instances", "of", "webob", ".", "Response", "are", "thrown", "to", "abort", "the", "rest", "of", "action", "and", "extension", "processing", ";", "otherwise", "objects", "which", "are", "not", "instances", "of", "ResponseObject", "will", "be", "wrapped", "in", "one", "." ]
a10aa7d21d38622e984a8fe106ab37114af90dc2
https://github.com/klmitch/appathy/blob/a10aa7d21d38622e984a8fe106ab37114af90dc2/appathy/actions.py#L231-L255
242,161
bitlabstudio/django-outlets
outlets/models.py
OutletManager.active
def active(self): """Returns all outlets that are currently active and have sales.""" qs = self.get_queryset() return qs.filter( models.Q( models.Q(start_date__isnull=True) | models.Q(start_date__lte=now().date()) ) & models.Q( models.Q(end_date__isnull=True) | models.Q(end_date__gte=now().date()) ) ).distinct()
python
def active(self): """Returns all outlets that are currently active and have sales.""" qs = self.get_queryset() return qs.filter( models.Q( models.Q(start_date__isnull=True) | models.Q(start_date__lte=now().date()) ) & models.Q( models.Q(end_date__isnull=True) | models.Q(end_date__gte=now().date()) ) ).distinct()
[ "def", "active", "(", "self", ")", ":", "qs", "=", "self", ".", "get_queryset", "(", ")", "return", "qs", ".", "filter", "(", "models", ".", "Q", "(", "models", ".", "Q", "(", "start_date__isnull", "=", "True", ")", "|", "models", ".", "Q", "(", "start_date__lte", "=", "now", "(", ")", ".", "date", "(", ")", ")", ")", "&", "models", ".", "Q", "(", "models", ".", "Q", "(", "end_date__isnull", "=", "True", ")", "|", "models", ".", "Q", "(", "end_date__gte", "=", "now", "(", ")", ".", "date", "(", ")", ")", ")", ")", ".", "distinct", "(", ")" ]
Returns all outlets that are currently active and have sales.
[ "Returns", "all", "outlets", "that", "are", "currently", "active", "and", "have", "sales", "." ]
eaecc1e8ef8fb48d6dc5886b321d9e3b0359b228
https://github.com/bitlabstudio/django-outlets/blob/eaecc1e8ef8fb48d6dc5886b321d9e3b0359b228/outlets/models.py#L14-L26
242,162
bitlabstudio/django-outlets
outlets/models.py
OutletManager.future
def future(self): """Returns all outlets that are or will be active.""" qs = self.get_queryset() return qs.filter( models.Q(end_date__isnull=True) | models.Q(end_date__gte=now().date()) )
python
def future(self): """Returns all outlets that are or will be active.""" qs = self.get_queryset() return qs.filter( models.Q(end_date__isnull=True) | models.Q(end_date__gte=now().date()) )
[ "def", "future", "(", "self", ")", ":", "qs", "=", "self", ".", "get_queryset", "(", ")", "return", "qs", ".", "filter", "(", "models", ".", "Q", "(", "end_date__isnull", "=", "True", ")", "|", "models", ".", "Q", "(", "end_date__gte", "=", "now", "(", ")", ".", "date", "(", ")", ")", ")" ]
Returns all outlets that are or will be active.
[ "Returns", "all", "outlets", "that", "are", "or", "will", "be", "active", "." ]
eaecc1e8ef8fb48d6dc5886b321d9e3b0359b228
https://github.com/bitlabstudio/django-outlets/blob/eaecc1e8ef8fb48d6dc5886b321d9e3b0359b228/outlets/models.py#L28-L34
242,163
political-memory/django-representatives
representatives/contrib/francedata/import_representatives.py
ensure_chambers
def ensure_chambers(): """ Ensures chambers are created """ france = Country.objects.get(name="France") for key in ('AN', 'SEN'): variant = FranceDataVariants[key] Chamber.objects.get_or_create(name=variant['chamber'], abbreviation=variant['abbreviation'], country=france)
python
def ensure_chambers(): """ Ensures chambers are created """ france = Country.objects.get(name="France") for key in ('AN', 'SEN'): variant = FranceDataVariants[key] Chamber.objects.get_or_create(name=variant['chamber'], abbreviation=variant['abbreviation'], country=france)
[ "def", "ensure_chambers", "(", ")", ":", "france", "=", "Country", ".", "objects", ".", "get", "(", "name", "=", "\"France\"", ")", "for", "key", "in", "(", "'AN'", ",", "'SEN'", ")", ":", "variant", "=", "FranceDataVariants", "[", "key", "]", "Chamber", ".", "objects", ".", "get_or_create", "(", "name", "=", "variant", "[", "'chamber'", "]", ",", "abbreviation", "=", "variant", "[", "'abbreviation'", "]", ",", "country", "=", "france", ")" ]
Ensures chambers are created
[ "Ensures", "chambers", "are", "created" ]
811c90d0250149e913e6196f0ab11c97d396be39
https://github.com/political-memory/django-representatives/blob/811c90d0250149e913e6196f0ab11c97d396be39/representatives/contrib/francedata/import_representatives.py#L95-L104
242,164
political-memory/django-representatives
representatives/contrib/francedata/import_representatives.py
GenericImporter.touch_model
def touch_model(self, model, **data): ''' This method create or look up a model with the given data it saves the given model if it exists, updating its updated field ''' instance, created = model.objects.get_or_create(**data) if not created: if instance.updated < self.import_start_datetime: instance.save() # Updates updated field return (instance, created)
python
def touch_model(self, model, **data): ''' This method create or look up a model with the given data it saves the given model if it exists, updating its updated field ''' instance, created = model.objects.get_or_create(**data) if not created: if instance.updated < self.import_start_datetime: instance.save() # Updates updated field return (instance, created)
[ "def", "touch_model", "(", "self", ",", "model", ",", "*", "*", "data", ")", ":", "instance", ",", "created", "=", "model", ".", "objects", ".", "get_or_create", "(", "*", "*", "data", ")", "if", "not", "created", ":", "if", "instance", ".", "updated", "<", "self", ".", "import_start_datetime", ":", "instance", ".", "save", "(", ")", "# Updates updated field", "return", "(", "instance", ",", "created", ")" ]
This method create or look up a model with the given data it saves the given model if it exists, updating its updated field
[ "This", "method", "create", "or", "look", "up", "a", "model", "with", "the", "given", "data", "it", "saves", "the", "given", "model", "if", "it", "exists", "updating", "its", "updated", "field" ]
811c90d0250149e913e6196f0ab11c97d396be39
https://github.com/political-memory/django-representatives/blob/811c90d0250149e913e6196f0ab11c97d396be39/representatives/contrib/francedata/import_representatives.py#L79-L92
242,165
political-memory/django-representatives
representatives/contrib/francedata/import_representatives.py
FranceDataImporter.add_mandates
def add_mandates(self, representative, rep_json): ''' Create mandates from rep data based on variant configuration ''' # Mandate in country group for party constituency if rep_json.get('parti_ratt_financier'): constituency, _ = Constituency.objects.get_or_create( name=rep_json.get('parti_ratt_financier'), country=self.france) group, _ = self.touch_model(model=Group, abbreviation=self.france.code, kind='country', name=self.france.name) _create_mandate(representative, group, constituency, 'membre') # Configurable mandates for mdef in self.variant['mandates']: if mdef.get('chamber', False): chamber = self.chamber else: chamber = None if 'from' in mdef: elems = mdef['from'](rep_json) else: elems = [rep_json] for elem in elems: name = _get_mdef_item(mdef, 'name', elem, '') abbr = _get_mdef_item(mdef, 'abbr', elem, '') group, _ = self.touch_model(model=Group, abbreviation=abbr, kind=mdef['kind'], chamber=chamber, name=name) role = _get_mdef_item(mdef, 'role', elem, 'membre') start = _get_mdef_item(mdef, 'start', elem, None) if start is not None: start = _parse_date(start) end = _get_mdef_item(mdef, 'end', elem, None) if end is not None: end = _parse_date(end) _create_mandate(representative, group, self.ch_constituency, role, start, end) logger.debug( '%s => %s: %s of "%s" (%s) %s-%s' % (rep_json['slug'], mdef['kind'], role, name, abbr, start, end))
python
def add_mandates(self, representative, rep_json): ''' Create mandates from rep data based on variant configuration ''' # Mandate in country group for party constituency if rep_json.get('parti_ratt_financier'): constituency, _ = Constituency.objects.get_or_create( name=rep_json.get('parti_ratt_financier'), country=self.france) group, _ = self.touch_model(model=Group, abbreviation=self.france.code, kind='country', name=self.france.name) _create_mandate(representative, group, constituency, 'membre') # Configurable mandates for mdef in self.variant['mandates']: if mdef.get('chamber', False): chamber = self.chamber else: chamber = None if 'from' in mdef: elems = mdef['from'](rep_json) else: elems = [rep_json] for elem in elems: name = _get_mdef_item(mdef, 'name', elem, '') abbr = _get_mdef_item(mdef, 'abbr', elem, '') group, _ = self.touch_model(model=Group, abbreviation=abbr, kind=mdef['kind'], chamber=chamber, name=name) role = _get_mdef_item(mdef, 'role', elem, 'membre') start = _get_mdef_item(mdef, 'start', elem, None) if start is not None: start = _parse_date(start) end = _get_mdef_item(mdef, 'end', elem, None) if end is not None: end = _parse_date(end) _create_mandate(representative, group, self.ch_constituency, role, start, end) logger.debug( '%s => %s: %s of "%s" (%s) %s-%s' % (rep_json['slug'], mdef['kind'], role, name, abbr, start, end))
[ "def", "add_mandates", "(", "self", ",", "representative", ",", "rep_json", ")", ":", "# Mandate in country group for party constituency", "if", "rep_json", ".", "get", "(", "'parti_ratt_financier'", ")", ":", "constituency", ",", "_", "=", "Constituency", ".", "objects", ".", "get_or_create", "(", "name", "=", "rep_json", ".", "get", "(", "'parti_ratt_financier'", ")", ",", "country", "=", "self", ".", "france", ")", "group", ",", "_", "=", "self", ".", "touch_model", "(", "model", "=", "Group", ",", "abbreviation", "=", "self", ".", "france", ".", "code", ",", "kind", "=", "'country'", ",", "name", "=", "self", ".", "france", ".", "name", ")", "_create_mandate", "(", "representative", ",", "group", ",", "constituency", ",", "'membre'", ")", "# Configurable mandates", "for", "mdef", "in", "self", ".", "variant", "[", "'mandates'", "]", ":", "if", "mdef", ".", "get", "(", "'chamber'", ",", "False", ")", ":", "chamber", "=", "self", ".", "chamber", "else", ":", "chamber", "=", "None", "if", "'from'", "in", "mdef", ":", "elems", "=", "mdef", "[", "'from'", "]", "(", "rep_json", ")", "else", ":", "elems", "=", "[", "rep_json", "]", "for", "elem", "in", "elems", ":", "name", "=", "_get_mdef_item", "(", "mdef", ",", "'name'", ",", "elem", ",", "''", ")", "abbr", "=", "_get_mdef_item", "(", "mdef", ",", "'abbr'", ",", "elem", ",", "''", ")", "group", ",", "_", "=", "self", ".", "touch_model", "(", "model", "=", "Group", ",", "abbreviation", "=", "abbr", ",", "kind", "=", "mdef", "[", "'kind'", "]", ",", "chamber", "=", "chamber", ",", "name", "=", "name", ")", "role", "=", "_get_mdef_item", "(", "mdef", ",", "'role'", ",", "elem", ",", "'membre'", ")", "start", "=", "_get_mdef_item", "(", "mdef", ",", "'start'", ",", "elem", ",", "None", ")", "if", "start", "is", "not", "None", ":", "start", "=", "_parse_date", "(", "start", ")", "end", "=", "_get_mdef_item", "(", "mdef", ",", "'end'", ",", "elem", ",", "None", ")", "if", "end", "is", "not", "None", ":", "end", "=", "_parse_date", "(", "end", ")", "_create_mandate", "(", "representative", ",", "group", ",", "self", ".", "ch_constituency", ",", "role", ",", "start", ",", "end", ")", "logger", ".", "debug", "(", "'%s => %s: %s of \"%s\" (%s) %s-%s'", "%", "(", "rep_json", "[", "'slug'", "]", ",", "mdef", "[", "'kind'", "]", ",", "role", ",", "name", ",", "abbr", ",", "start", ",", "end", ")", ")" ]
Create mandates from rep data based on variant configuration
[ "Create", "mandates", "from", "rep", "data", "based", "on", "variant", "configuration" ]
811c90d0250149e913e6196f0ab11c97d396be39
https://github.com/political-memory/django-representatives/blob/811c90d0250149e913e6196f0ab11c97d396be39/representatives/contrib/francedata/import_representatives.py#L218-L270
242,166
hobson/pug-dj
pug/dj/crawlnmine/crawlnmine/settings/master_common.py
env
def env(var_name, default=False): """ Get the environment variable. If not found use a default or False, but print to stderr a warning about the missing env variable.""" try: value = os.environ[var_name] if str(value).strip().lower() in ['false', 'f', 'no', 'off' '0', 'none', 'null', '', ]: return None return value except: from traceback import format_exc msg = "Unable to find the %s environment variable.\nUsing the value %s (the default) instead.\n" % (var_name, default) sys.stderr.write(format_exc()) sys.stderr.write(msg) return default
python
def env(var_name, default=False): """ Get the environment variable. If not found use a default or False, but print to stderr a warning about the missing env variable.""" try: value = os.environ[var_name] if str(value).strip().lower() in ['false', 'f', 'no', 'off' '0', 'none', 'null', '', ]: return None return value except: from traceback import format_exc msg = "Unable to find the %s environment variable.\nUsing the value %s (the default) instead.\n" % (var_name, default) sys.stderr.write(format_exc()) sys.stderr.write(msg) return default
[ "def", "env", "(", "var_name", ",", "default", "=", "False", ")", ":", "try", ":", "value", "=", "os", ".", "environ", "[", "var_name", "]", "if", "str", "(", "value", ")", ".", "strip", "(", ")", ".", "lower", "(", ")", "in", "[", "'false'", ",", "'f'", ",", "'no'", ",", "'off'", "'0'", ",", "'none'", ",", "'null'", ",", "''", ",", "]", ":", "return", "None", "return", "value", "except", ":", "from", "traceback", "import", "format_exc", "msg", "=", "\"Unable to find the %s environment variable.\\nUsing the value %s (the default) instead.\\n\"", "%", "(", "var_name", ",", "default", ")", "sys", ".", "stderr", ".", "write", "(", "format_exc", "(", ")", ")", "sys", ".", "stderr", ".", "write", "(", "msg", ")", "return", "default" ]
Get the environment variable. If not found use a default or False, but print to stderr a warning about the missing env variable.
[ "Get", "the", "environment", "variable", ".", "If", "not", "found", "use", "a", "default", "or", "False", "but", "print", "to", "stderr", "a", "warning", "about", "the", "missing", "env", "variable", "." ]
55678b08755a55366ce18e7d3b8ea8fa4491ab04
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/crawlnmine/crawlnmine/settings/master_common.py#L21-L33
242,167
esterhui/pypu
scripts/build_json_from_gps.py
lookupfile
def lookupfile(filename): """Returns dictionary of file content as well as google reverse GEO data""" logger.info('Looking up %s'%(filename)) # First open cache file to see if we already looked up this stuff dirname=os.path.dirname(filename) basefilename=os.path.basename(filename) CACHE_FILE = os.path.join(dirname,'.'+basefilename+'.gpscache') cache=loadCacheFile(CACHE_FILE) # Get the input file positions=parsePositionFile(filename) # If load didn't work, read again and lookup if not cache: logger.info("%s - No cache file found, looking up location"\ %(basefilename)) cache=lookupGeoInfo(positions) # Save to DB json.dump(cache,open(CACHE_FILE,'w')) else: logger.info("%s - Found cache file for locations"\ %(basefilename)) return positions,cache
python
def lookupfile(filename): """Returns dictionary of file content as well as google reverse GEO data""" logger.info('Looking up %s'%(filename)) # First open cache file to see if we already looked up this stuff dirname=os.path.dirname(filename) basefilename=os.path.basename(filename) CACHE_FILE = os.path.join(dirname,'.'+basefilename+'.gpscache') cache=loadCacheFile(CACHE_FILE) # Get the input file positions=parsePositionFile(filename) # If load didn't work, read again and lookup if not cache: logger.info("%s - No cache file found, looking up location"\ %(basefilename)) cache=lookupGeoInfo(positions) # Save to DB json.dump(cache,open(CACHE_FILE,'w')) else: logger.info("%s - Found cache file for locations"\ %(basefilename)) return positions,cache
[ "def", "lookupfile", "(", "filename", ")", ":", "logger", ".", "info", "(", "'Looking up %s'", "%", "(", "filename", ")", ")", "# First open cache file to see if we already looked up this stuff", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "filename", ")", "basefilename", "=", "os", ".", "path", ".", "basename", "(", "filename", ")", "CACHE_FILE", "=", "os", ".", "path", ".", "join", "(", "dirname", ",", "'.'", "+", "basefilename", "+", "'.gpscache'", ")", "cache", "=", "loadCacheFile", "(", "CACHE_FILE", ")", "# Get the input file", "positions", "=", "parsePositionFile", "(", "filename", ")", "# If load didn't work, read again and lookup", "if", "not", "cache", ":", "logger", ".", "info", "(", "\"%s - No cache file found, looking up location\"", "%", "(", "basefilename", ")", ")", "cache", "=", "lookupGeoInfo", "(", "positions", ")", "# Save to DB", "json", ".", "dump", "(", "cache", ",", "open", "(", "CACHE_FILE", ",", "'w'", ")", ")", "else", ":", "logger", ".", "info", "(", "\"%s - Found cache file for locations\"", "%", "(", "basefilename", ")", ")", "return", "positions", ",", "cache" ]
Returns dictionary of file content as well as google reverse GEO data
[ "Returns", "dictionary", "of", "file", "content", "as", "well", "as", "google", "reverse", "GEO", "data" ]
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/scripts/build_json_from_gps.py#L75-L101
242,168
esterhui/pypu
scripts/build_json_from_gps.py
parsePositionFile
def parsePositionFile(filename): """ Parses Android GPS logger csv file and returns list of dictionaries """ l=[] with open( filename, "rb" ) as theFile: reader = csv.DictReader( theFile ) for line in reader: # Convert the time string to something # a bit more human readable mytime=dateparser.parse(line['time']) line['strtime']=mytime.strftime("%d %b %Y, %H:%M UTC") l.append(line) return l
python
def parsePositionFile(filename): """ Parses Android GPS logger csv file and returns list of dictionaries """ l=[] with open( filename, "rb" ) as theFile: reader = csv.DictReader( theFile ) for line in reader: # Convert the time string to something # a bit more human readable mytime=dateparser.parse(line['time']) line['strtime']=mytime.strftime("%d %b %Y, %H:%M UTC") l.append(line) return l
[ "def", "parsePositionFile", "(", "filename", ")", ":", "l", "=", "[", "]", "with", "open", "(", "filename", ",", "\"rb\"", ")", "as", "theFile", ":", "reader", "=", "csv", ".", "DictReader", "(", "theFile", ")", "for", "line", "in", "reader", ":", "# Convert the time string to something", "# a bit more human readable", "mytime", "=", "dateparser", ".", "parse", "(", "line", "[", "'time'", "]", ")", "line", "[", "'strtime'", "]", "=", "mytime", ".", "strftime", "(", "\"%d %b %Y, %H:%M UTC\"", ")", "l", ".", "append", "(", "line", ")", "return", "l" ]
Parses Android GPS logger csv file and returns list of dictionaries
[ "Parses", "Android", "GPS", "logger", "csv", "file", "and", "returns", "list", "of", "dictionaries" ]
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/scripts/build_json_from_gps.py#L154-L167
242,169
datakortet/dkfileutils
dkfileutils/changed.py
changed
def changed(dirname, filename='.md5', args=None, glob=None): """Has `glob` changed in `dirname` Args: dirname: directory to measure filename: filename to store checksum """ root = Path(dirname) if not root.exists(): # if dirname doesn't exist it is changed (by definition) return True cachefile = root / filename current_digest = cachefile.open().read() if cachefile.exists() else "" _digest = digest(dirname, glob=glob) if args and args.verbose: # pragma: nocover print("md5:", _digest) has_changed = current_digest != _digest if has_changed: with open(os.path.join(dirname, filename), 'w') as fp: fp.write(_digest) return has_changed
python
def changed(dirname, filename='.md5', args=None, glob=None): """Has `glob` changed in `dirname` Args: dirname: directory to measure filename: filename to store checksum """ root = Path(dirname) if not root.exists(): # if dirname doesn't exist it is changed (by definition) return True cachefile = root / filename current_digest = cachefile.open().read() if cachefile.exists() else "" _digest = digest(dirname, glob=glob) if args and args.verbose: # pragma: nocover print("md5:", _digest) has_changed = current_digest != _digest if has_changed: with open(os.path.join(dirname, filename), 'w') as fp: fp.write(_digest) return has_changed
[ "def", "changed", "(", "dirname", ",", "filename", "=", "'.md5'", ",", "args", "=", "None", ",", "glob", "=", "None", ")", ":", "root", "=", "Path", "(", "dirname", ")", "if", "not", "root", ".", "exists", "(", ")", ":", "# if dirname doesn't exist it is changed (by definition)", "return", "True", "cachefile", "=", "root", "/", "filename", "current_digest", "=", "cachefile", ".", "open", "(", ")", ".", "read", "(", ")", "if", "cachefile", ".", "exists", "(", ")", "else", "\"\"", "_digest", "=", "digest", "(", "dirname", ",", "glob", "=", "glob", ")", "if", "args", "and", "args", ".", "verbose", ":", "# pragma: nocover", "print", "(", "\"md5:\"", ",", "_digest", ")", "has_changed", "=", "current_digest", "!=", "_digest", "if", "has_changed", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "dirname", ",", "filename", ")", ",", "'w'", ")", "as", "fp", ":", "fp", ".", "write", "(", "_digest", ")", "return", "has_changed" ]
Has `glob` changed in `dirname` Args: dirname: directory to measure filename: filename to store checksum
[ "Has", "glob", "changed", "in", "dirname" ]
924098d6e2edf88ad9b3ffdec9c74530f80a7d77
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/dkfileutils/changed.py#L28-L52
242,170
datakortet/dkfileutils
dkfileutils/changed.py
main
def main(): # pragma: nocover """Return exit code of zero iff directory is not changed. """ p = argparse.ArgumentParser() p.add_argument( 'directory', help="Directory to check" ) p.add_argument( '--verbose', '-v', action='store_true', help="increase verbosity" ) args = p.parse_args() import sys _changed = changed(sys.argv[1], args=args) sys.exit(_changed)
python
def main(): # pragma: nocover """Return exit code of zero iff directory is not changed. """ p = argparse.ArgumentParser() p.add_argument( 'directory', help="Directory to check" ) p.add_argument( '--verbose', '-v', action='store_true', help="increase verbosity" ) args = p.parse_args() import sys _changed = changed(sys.argv[1], args=args) sys.exit(_changed)
[ "def", "main", "(", ")", ":", "# pragma: nocover", "p", "=", "argparse", ".", "ArgumentParser", "(", ")", "p", ".", "add_argument", "(", "'directory'", ",", "help", "=", "\"Directory to check\"", ")", "p", ".", "add_argument", "(", "'--verbose'", ",", "'-v'", ",", "action", "=", "'store_true'", ",", "help", "=", "\"increase verbosity\"", ")", "args", "=", "p", ".", "parse_args", "(", ")", "import", "sys", "_changed", "=", "changed", "(", "sys", ".", "argv", "[", "1", "]", ",", "args", "=", "args", ")", "sys", ".", "exit", "(", "_changed", ")" ]
Return exit code of zero iff directory is not changed.
[ "Return", "exit", "code", "of", "zero", "iff", "directory", "is", "not", "changed", "." ]
924098d6e2edf88ad9b3ffdec9c74530f80a7d77
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/dkfileutils/changed.py#L67-L83
242,171
datakortet/dkfileutils
dkfileutils/changed.py
Directory.changed
def changed(self, filename='.md5', glob=None): """Are any of the files matched by ``glob`` changed? """ if glob is not None: filename += '.glob-' + ''.join(ch.lower() for ch in glob if ch.isalpha()) return changed(self, filename, glob=glob)
python
def changed(self, filename='.md5', glob=None): """Are any of the files matched by ``glob`` changed? """ if glob is not None: filename += '.glob-' + ''.join(ch.lower() for ch in glob if ch.isalpha()) return changed(self, filename, glob=glob)
[ "def", "changed", "(", "self", ",", "filename", "=", "'.md5'", ",", "glob", "=", "None", ")", ":", "if", "glob", "is", "not", "None", ":", "filename", "+=", "'.glob-'", "+", "''", ".", "join", "(", "ch", ".", "lower", "(", ")", "for", "ch", "in", "glob", "if", "ch", ".", "isalpha", "(", ")", ")", "return", "changed", "(", "self", ",", "filename", ",", "glob", "=", "glob", ")" ]
Are any of the files matched by ``glob`` changed?
[ "Are", "any", "of", "the", "files", "matched", "by", "glob", "changed?" ]
924098d6e2edf88ad9b3ffdec9c74530f80a7d77
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/dkfileutils/changed.py#L58-L64
242,172
mikkeljans/pyconomic
pyconomic/client.py
EconomicsFacade.create_order
def create_order(self, debtor, is_vat_included=True, due_date=None, heading='', text_line1='', text_line2='', debtor_data=None, delivery_data=None, products=None, project=None, other_reference='', model=models.Order, **extra ): """Create a new Order. Args: debtor (Debtor): the debtor of the order debtor_data (mapping): map of debtor data {'postal_code: .., 'city': .., 'ean': ..} defaults to values on debitor instance for missing values delivery_data (mapping): map of delivery data {'address': ..., 'postal_code': ...} defaults to values on debitor instance for missing values due_date (datetime): due date heading (string): heading to be displayed in the order pdf text_line1 (string): first order description line text_line2 (string): second order description line other_reference (string): custom string to be used for identification extra (mapping): mapping of extra values to be passed in to the server call Returns: Order instance """ debtor_data = debtor_data or {} delivery_data = delivery_data or {} delivery_date = delivery_data.get('date', datetime.datetime.now()) our_reference = extra.get('our_reference', debtor.our_reference) currency = extra.get('currency', debtor.currency) layout = extra.get('layout', debtor.layout) term_of_payment = extra.get('term_of_payment', debtor.term_of_payment) date = extra.get('date', datetime.datetime.now()) order_input = { 'debtor': debtor, 'number': extra.get('number', 1), 'project': project, } for dd in ['name', 'address', 'postal_code', 'city', 'country', 'ean']: order_input['debtor_%s' % dd] = debtor_data.get(dd, getattr(debtor, dd)) for dd in ['address', 'postal_code', 'city', 'country']: order_input['delivery_%s' % dd] = delivery_data.get(dd, getattr(debtor, dd)) order_input.update({ 'delivery_date': delivery_date or datetime.datetime.now(), 'heading': heading, 'text_line1': text_line1, 'text_line2': text_line2, 'is_archived': extra.get('is_archived', 0), 'is_sent': extra.get('is_sent', 0), 'net_amount': extra.get('net_amount', 0), 'vat_amount': extra.get('vat_amount', 0), 'gross_amount': extra.get('gross_amount', 0), 'margin': extra.get('margin', 0), 'margin_as_percent': extra.get('margin_as_percent', 0), 'date': date, 'our_reference': our_reference, 'other_reference': other_reference, 'currency': currency, 'exchange_rate': extra.get('exchange_rate', 1.0), 'is_vat_included': is_vat_included, 'layout': layout, 'due_date': due_date or datetime.datetime.now(), 'term_of_payment': term_of_payment }) order_input.update(extra) order = self.create(model, **order_input) if products: for product in products: self.create_orderline(order, product) return order
python
def create_order(self, debtor, is_vat_included=True, due_date=None, heading='', text_line1='', text_line2='', debtor_data=None, delivery_data=None, products=None, project=None, other_reference='', model=models.Order, **extra ): """Create a new Order. Args: debtor (Debtor): the debtor of the order debtor_data (mapping): map of debtor data {'postal_code: .., 'city': .., 'ean': ..} defaults to values on debitor instance for missing values delivery_data (mapping): map of delivery data {'address': ..., 'postal_code': ...} defaults to values on debitor instance for missing values due_date (datetime): due date heading (string): heading to be displayed in the order pdf text_line1 (string): first order description line text_line2 (string): second order description line other_reference (string): custom string to be used for identification extra (mapping): mapping of extra values to be passed in to the server call Returns: Order instance """ debtor_data = debtor_data or {} delivery_data = delivery_data or {} delivery_date = delivery_data.get('date', datetime.datetime.now()) our_reference = extra.get('our_reference', debtor.our_reference) currency = extra.get('currency', debtor.currency) layout = extra.get('layout', debtor.layout) term_of_payment = extra.get('term_of_payment', debtor.term_of_payment) date = extra.get('date', datetime.datetime.now()) order_input = { 'debtor': debtor, 'number': extra.get('number', 1), 'project': project, } for dd in ['name', 'address', 'postal_code', 'city', 'country', 'ean']: order_input['debtor_%s' % dd] = debtor_data.get(dd, getattr(debtor, dd)) for dd in ['address', 'postal_code', 'city', 'country']: order_input['delivery_%s' % dd] = delivery_data.get(dd, getattr(debtor, dd)) order_input.update({ 'delivery_date': delivery_date or datetime.datetime.now(), 'heading': heading, 'text_line1': text_line1, 'text_line2': text_line2, 'is_archived': extra.get('is_archived', 0), 'is_sent': extra.get('is_sent', 0), 'net_amount': extra.get('net_amount', 0), 'vat_amount': extra.get('vat_amount', 0), 'gross_amount': extra.get('gross_amount', 0), 'margin': extra.get('margin', 0), 'margin_as_percent': extra.get('margin_as_percent', 0), 'date': date, 'our_reference': our_reference, 'other_reference': other_reference, 'currency': currency, 'exchange_rate': extra.get('exchange_rate', 1.0), 'is_vat_included': is_vat_included, 'layout': layout, 'due_date': due_date or datetime.datetime.now(), 'term_of_payment': term_of_payment }) order_input.update(extra) order = self.create(model, **order_input) if products: for product in products: self.create_orderline(order, product) return order
[ "def", "create_order", "(", "self", ",", "debtor", ",", "is_vat_included", "=", "True", ",", "due_date", "=", "None", ",", "heading", "=", "''", ",", "text_line1", "=", "''", ",", "text_line2", "=", "''", ",", "debtor_data", "=", "None", ",", "delivery_data", "=", "None", ",", "products", "=", "None", ",", "project", "=", "None", ",", "other_reference", "=", "''", ",", "model", "=", "models", ".", "Order", ",", "*", "*", "extra", ")", ":", "debtor_data", "=", "debtor_data", "or", "{", "}", "delivery_data", "=", "delivery_data", "or", "{", "}", "delivery_date", "=", "delivery_data", ".", "get", "(", "'date'", ",", "datetime", ".", "datetime", ".", "now", "(", ")", ")", "our_reference", "=", "extra", ".", "get", "(", "'our_reference'", ",", "debtor", ".", "our_reference", ")", "currency", "=", "extra", ".", "get", "(", "'currency'", ",", "debtor", ".", "currency", ")", "layout", "=", "extra", ".", "get", "(", "'layout'", ",", "debtor", ".", "layout", ")", "term_of_payment", "=", "extra", ".", "get", "(", "'term_of_payment'", ",", "debtor", ".", "term_of_payment", ")", "date", "=", "extra", ".", "get", "(", "'date'", ",", "datetime", ".", "datetime", ".", "now", "(", ")", ")", "order_input", "=", "{", "'debtor'", ":", "debtor", ",", "'number'", ":", "extra", ".", "get", "(", "'number'", ",", "1", ")", ",", "'project'", ":", "project", ",", "}", "for", "dd", "in", "[", "'name'", ",", "'address'", ",", "'postal_code'", ",", "'city'", ",", "'country'", ",", "'ean'", "]", ":", "order_input", "[", "'debtor_%s'", "%", "dd", "]", "=", "debtor_data", ".", "get", "(", "dd", ",", "getattr", "(", "debtor", ",", "dd", ")", ")", "for", "dd", "in", "[", "'address'", ",", "'postal_code'", ",", "'city'", ",", "'country'", "]", ":", "order_input", "[", "'delivery_%s'", "%", "dd", "]", "=", "delivery_data", ".", "get", "(", "dd", ",", "getattr", "(", "debtor", ",", "dd", ")", ")", "order_input", ".", "update", "(", "{", "'delivery_date'", ":", "delivery_date", "or", "datetime", ".", "datetime", ".", "now", "(", ")", ",", "'heading'", ":", "heading", ",", "'text_line1'", ":", "text_line1", ",", "'text_line2'", ":", "text_line2", ",", "'is_archived'", ":", "extra", ".", "get", "(", "'is_archived'", ",", "0", ")", ",", "'is_sent'", ":", "extra", ".", "get", "(", "'is_sent'", ",", "0", ")", ",", "'net_amount'", ":", "extra", ".", "get", "(", "'net_amount'", ",", "0", ")", ",", "'vat_amount'", ":", "extra", ".", "get", "(", "'vat_amount'", ",", "0", ")", ",", "'gross_amount'", ":", "extra", ".", "get", "(", "'gross_amount'", ",", "0", ")", ",", "'margin'", ":", "extra", ".", "get", "(", "'margin'", ",", "0", ")", ",", "'margin_as_percent'", ":", "extra", ".", "get", "(", "'margin_as_percent'", ",", "0", ")", ",", "'date'", ":", "date", ",", "'our_reference'", ":", "our_reference", ",", "'other_reference'", ":", "other_reference", ",", "'currency'", ":", "currency", ",", "'exchange_rate'", ":", "extra", ".", "get", "(", "'exchange_rate'", ",", "1.0", ")", ",", "'is_vat_included'", ":", "is_vat_included", ",", "'layout'", ":", "layout", ",", "'due_date'", ":", "due_date", "or", "datetime", ".", "datetime", ".", "now", "(", ")", ",", "'term_of_payment'", ":", "term_of_payment", "}", ")", "order_input", ".", "update", "(", "extra", ")", "order", "=", "self", ".", "create", "(", "model", ",", "*", "*", "order_input", ")", "if", "products", ":", "for", "product", "in", "products", ":", "self", ".", "create_orderline", "(", "order", ",", "product", ")", "return", "order" ]
Create a new Order. Args: debtor (Debtor): the debtor of the order debtor_data (mapping): map of debtor data {'postal_code: .., 'city': .., 'ean': ..} defaults to values on debitor instance for missing values delivery_data (mapping): map of delivery data {'address': ..., 'postal_code': ...} defaults to values on debitor instance for missing values due_date (datetime): due date heading (string): heading to be displayed in the order pdf text_line1 (string): first order description line text_line2 (string): second order description line other_reference (string): custom string to be used for identification extra (mapping): mapping of extra values to be passed in to the server call Returns: Order instance
[ "Create", "a", "new", "Order", "." ]
845b8148a364cf5be9065f8a70133d4f16ab645d
https://github.com/mikkeljans/pyconomic/blob/845b8148a364cf5be9065f8a70133d4f16ab645d/pyconomic/client.py#L61-L130
242,173
bwesterb/py-joyce
src/comet.py
CometJoyceServerRelay.__flush
def __flush(self, async=True): """ Flushes messages through current HttpRequest and closes it. It assumes a current requesthandler and requires a lock on self.lock """ rh = self.rh messages = list(self.messages) stream_notices = list(self.stream_notices) self.stream_notices = [] self.messages = [] args = (rh, messages, stream_notices) if async: self.hub.threadPool.execute_named(self.__inner_flush, '%s __inner__flush' % self.hub.l.name, *args) else: self.__inner_flush(*args) self.rh = None self._set_timeout(int(time.time() + self.hub.timeout))
python
def __flush(self, async=True): """ Flushes messages through current HttpRequest and closes it. It assumes a current requesthandler and requires a lock on self.lock """ rh = self.rh messages = list(self.messages) stream_notices = list(self.stream_notices) self.stream_notices = [] self.messages = [] args = (rh, messages, stream_notices) if async: self.hub.threadPool.execute_named(self.__inner_flush, '%s __inner__flush' % self.hub.l.name, *args) else: self.__inner_flush(*args) self.rh = None self._set_timeout(int(time.time() + self.hub.timeout))
[ "def", "__flush", "(", "self", ",", "async", "=", "True", ")", ":", "rh", "=", "self", ".", "rh", "messages", "=", "list", "(", "self", ".", "messages", ")", "stream_notices", "=", "list", "(", "self", ".", "stream_notices", ")", "self", ".", "stream_notices", "=", "[", "]", "self", ".", "messages", "=", "[", "]", "args", "=", "(", "rh", ",", "messages", ",", "stream_notices", ")", "if", "async", ":", "self", ".", "hub", ".", "threadPool", ".", "execute_named", "(", "self", ".", "__inner_flush", ",", "'%s __inner__flush'", "%", "self", ".", "hub", ".", "l", ".", "name", ",", "*", "args", ")", "else", ":", "self", ".", "__inner_flush", "(", "*", "args", ")", "self", ".", "rh", "=", "None", "self", ".", "_set_timeout", "(", "int", "(", "time", ".", "time", "(", ")", "+", "self", ".", "hub", ".", "timeout", ")", ")" ]
Flushes messages through current HttpRequest and closes it. It assumes a current requesthandler and requires a lock on self.lock
[ "Flushes", "messages", "through", "current", "HttpRequest", "and", "closes", "it", ".", "It", "assumes", "a", "current", "requesthandler", "and", "requires", "a", "lock", "on", "self", ".", "lock" ]
ad1c99ad3939e70b247a18a1a0ef537b037979a0
https://github.com/bwesterb/py-joyce/blob/ad1c99ad3939e70b247a18a1a0ef537b037979a0/src/comet.py#L280-L296
242,174
maceoutliner/django-fiction-outlines
fiction_outlines/views.py
OutlineExport.return_opml_response
def return_opml_response(self, context, **response_kwargs): ''' Returns export data as an opml file. ''' self.template_name = 'fiction_outlines/outline.opml' response = super().render_to_response(context, content_type='text/xml', **response_kwargs) response['Content-Disposition'] = 'attachment; filename="{}.opml"'.format(slugify(self.object.title)) return response
python
def return_opml_response(self, context, **response_kwargs): ''' Returns export data as an opml file. ''' self.template_name = 'fiction_outlines/outline.opml' response = super().render_to_response(context, content_type='text/xml', **response_kwargs) response['Content-Disposition'] = 'attachment; filename="{}.opml"'.format(slugify(self.object.title)) return response
[ "def", "return_opml_response", "(", "self", ",", "context", ",", "*", "*", "response_kwargs", ")", ":", "self", ".", "template_name", "=", "'fiction_outlines/outline.opml'", "response", "=", "super", "(", ")", ".", "render_to_response", "(", "context", ",", "content_type", "=", "'text/xml'", ",", "*", "*", "response_kwargs", ")", "response", "[", "'Content-Disposition'", "]", "=", "'attachment; filename=\"{}.opml\"'", ".", "format", "(", "slugify", "(", "self", ".", "object", ".", "title", ")", ")", "return", "response" ]
Returns export data as an opml file.
[ "Returns", "export", "data", "as", "an", "opml", "file", "." ]
6c58e356af3fbe7b23557643ba27e46eaef9d4e3
https://github.com/maceoutliner/django-fiction-outlines/blob/6c58e356af3fbe7b23557643ba27e46eaef9d4e3/fiction_outlines/views.py#L1261-L1268
242,175
maceoutliner/django-fiction-outlines
fiction_outlines/views.py
OutlineExport.render_to_response
def render_to_response(self, context, **response_kwargs): ''' Compares requested format to supported formats and routes the response. :attribute switcher: A dictionary of format types and their respective response methods. ''' switcher = { 'json': self.return_json_response, 'opml': self.return_opml_response, 'md': self.return_md_response, 'textbundle': self.not_implemented, 'xlsx': self.not_implemented, } if self.format not in switcher.keys(): return self.not_implemented(context, **response_kwargs) return switcher[self.format](context, **response_kwargs)
python
def render_to_response(self, context, **response_kwargs): ''' Compares requested format to supported formats and routes the response. :attribute switcher: A dictionary of format types and their respective response methods. ''' switcher = { 'json': self.return_json_response, 'opml': self.return_opml_response, 'md': self.return_md_response, 'textbundle': self.not_implemented, 'xlsx': self.not_implemented, } if self.format not in switcher.keys(): return self.not_implemented(context, **response_kwargs) return switcher[self.format](context, **response_kwargs)
[ "def", "render_to_response", "(", "self", ",", "context", ",", "*", "*", "response_kwargs", ")", ":", "switcher", "=", "{", "'json'", ":", "self", ".", "return_json_response", ",", "'opml'", ":", "self", ".", "return_opml_response", ",", "'md'", ":", "self", ".", "return_md_response", ",", "'textbundle'", ":", "self", ".", "not_implemented", ",", "'xlsx'", ":", "self", ".", "not_implemented", ",", "}", "if", "self", ".", "format", "not", "in", "switcher", ".", "keys", "(", ")", ":", "return", "self", ".", "not_implemented", "(", "context", ",", "*", "*", "response_kwargs", ")", "return", "switcher", "[", "self", ".", "format", "]", "(", "context", ",", "*", "*", "response_kwargs", ")" ]
Compares requested format to supported formats and routes the response. :attribute switcher: A dictionary of format types and their respective response methods.
[ "Compares", "requested", "format", "to", "supported", "formats", "and", "routes", "the", "response", "." ]
6c58e356af3fbe7b23557643ba27e46eaef9d4e3
https://github.com/maceoutliner/django-fiction-outlines/blob/6c58e356af3fbe7b23557643ba27e46eaef9d4e3/fiction_outlines/views.py#L1340-L1355
242,176
anjos/rrbob
rr/analysis.py
CER
def CER(prediction, true_labels): """ Calculates the classification error rate for an N-class classification problem Parameters: prediction (numpy.ndarray): A 1D :py:class:`numpy.ndarray` containing your prediction true_labels (numpy.ndarray): A 1D :py:class:`numpy.ndarray` containing the ground truth labels for the input array, organized in the same order. """ errors = (prediction != true_labels).sum() return float(errors)/len(prediction)
python
def CER(prediction, true_labels): """ Calculates the classification error rate for an N-class classification problem Parameters: prediction (numpy.ndarray): A 1D :py:class:`numpy.ndarray` containing your prediction true_labels (numpy.ndarray): A 1D :py:class:`numpy.ndarray` containing the ground truth labels for the input array, organized in the same order. """ errors = (prediction != true_labels).sum() return float(errors)/len(prediction)
[ "def", "CER", "(", "prediction", ",", "true_labels", ")", ":", "errors", "=", "(", "prediction", "!=", "true_labels", ")", ".", "sum", "(", ")", "return", "float", "(", "errors", ")", "/", "len", "(", "prediction", ")" ]
Calculates the classification error rate for an N-class classification problem Parameters: prediction (numpy.ndarray): A 1D :py:class:`numpy.ndarray` containing your prediction true_labels (numpy.ndarray): A 1D :py:class:`numpy.ndarray` containing the ground truth labels for the input array, organized in the same order.
[ "Calculates", "the", "classification", "error", "rate", "for", "an", "N", "-", "class", "classification", "problem" ]
d32d35bab2aa2698d3caa923fd02afb6d67f3235
https://github.com/anjos/rrbob/blob/d32d35bab2aa2698d3caa923fd02afb6d67f3235/rr/analysis.py#L6-L23
242,177
lvh/axiombench
axiombench/query.py
benchmark
def benchmark(store, n=10000): """ Iterates over all of the referreds, and then iterates over all of the referrers that refer to each one. Fairly item instantiation heavy. """ R = Referrer for referred in store.query(Referred): for _reference in store.query(R, R.reference == referred): pass
python
def benchmark(store, n=10000): """ Iterates over all of the referreds, and then iterates over all of the referrers that refer to each one. Fairly item instantiation heavy. """ R = Referrer for referred in store.query(Referred): for _reference in store.query(R, R.reference == referred): pass
[ "def", "benchmark", "(", "store", ",", "n", "=", "10000", ")", ":", "R", "=", "Referrer", "for", "referred", "in", "store", ".", "query", "(", "Referred", ")", ":", "for", "_reference", "in", "store", ".", "query", "(", "R", ",", "R", ".", "reference", "==", "referred", ")", ":", "pass" ]
Iterates over all of the referreds, and then iterates over all of the referrers that refer to each one. Fairly item instantiation heavy.
[ "Iterates", "over", "all", "of", "the", "referreds", "and", "then", "iterates", "over", "all", "of", "the", "referrers", "that", "refer", "to", "each", "one", "." ]
dd783abfde23b0c67d7a74152d372c4c51e112aa
https://github.com/lvh/axiombench/blob/dd783abfde23b0c67d7a74152d372c4c51e112aa/axiombench/query.py#L21-L32
242,178
deviantony/valigator
valigator/utils.py
generate_uuid
def generate_uuid(): """Generate a UUID.""" r_uuid = base64.urlsafe_b64encode(uuid.uuid4().bytes) return r_uuid.decode().replace('=', '')
python
def generate_uuid(): """Generate a UUID.""" r_uuid = base64.urlsafe_b64encode(uuid.uuid4().bytes) return r_uuid.decode().replace('=', '')
[ "def", "generate_uuid", "(", ")", ":", "r_uuid", "=", "base64", ".", "urlsafe_b64encode", "(", "uuid", ".", "uuid4", "(", ")", ".", "bytes", ")", "return", "r_uuid", ".", "decode", "(", ")", ".", "replace", "(", "'='", ",", "''", ")" ]
Generate a UUID.
[ "Generate", "a", "UUID", "." ]
0557029bc58ea1270e358c14ca382d3807ed5b6f
https://github.com/deviantony/valigator/blob/0557029bc58ea1270e358c14ca382d3807ed5b6f/valigator/utils.py#L9-L12
242,179
deviantony/valigator
valigator/utils.py
extract_archive
def extract_archive(archive_path, destination_path): """Extracts an archive somewhere on the filesystem.""" tar = tarfile.open(archive_path) tar.errorlevel = 1 tar.extractall(destination_path)
python
def extract_archive(archive_path, destination_path): """Extracts an archive somewhere on the filesystem.""" tar = tarfile.open(archive_path) tar.errorlevel = 1 tar.extractall(destination_path)
[ "def", "extract_archive", "(", "archive_path", ",", "destination_path", ")", ":", "tar", "=", "tarfile", ".", "open", "(", "archive_path", ")", "tar", ".", "errorlevel", "=", "1", "tar", ".", "extractall", "(", "destination_path", ")" ]
Extracts an archive somewhere on the filesystem.
[ "Extracts", "an", "archive", "somewhere", "on", "the", "filesystem", "." ]
0557029bc58ea1270e358c14ca382d3807ed5b6f
https://github.com/deviantony/valigator/blob/0557029bc58ea1270e358c14ca382d3807ed5b6f/valigator/utils.py#L22-L26
242,180
deviantony/valigator
valigator/utils.py
remove_file
def remove_file(file_path): """Remove a file from the filesystem.""" if path.exists(file_path): try: rmtree(file_path) except Exception: print('Unable to remove temporary workdir {}'.format(file_path))
python
def remove_file(file_path): """Remove a file from the filesystem.""" if path.exists(file_path): try: rmtree(file_path) except Exception: print('Unable to remove temporary workdir {}'.format(file_path))
[ "def", "remove_file", "(", "file_path", ")", ":", "if", "path", ".", "exists", "(", "file_path", ")", ":", "try", ":", "rmtree", "(", "file_path", ")", "except", "Exception", ":", "print", "(", "'Unable to remove temporary workdir {}'", ".", "format", "(", "file_path", ")", ")" ]
Remove a file from the filesystem.
[ "Remove", "a", "file", "from", "the", "filesystem", "." ]
0557029bc58ea1270e358c14ca382d3807ed5b6f
https://github.com/deviantony/valigator/blob/0557029bc58ea1270e358c14ca382d3807ed5b6f/valigator/utils.py#L29-L35
242,181
shaypal5/pdutil
pdutil/iter/iter.py
sub_dfs_by_size
def sub_dfs_by_size(df, size): """Get a generator yielding consecutive sub-dataframes of the given size. Arguments --------- df : pandas.DataFrame The dataframe for which to get sub-dataframes. size : int The size of each sub-dataframe. Returns ------- generator A generator yielding consecutive sub-dataframe of the given size. Example ------- >>> import pandas as pd; import pdutil; >>> data = [[23, "Jen"], [42, "Ray"], [15, "Fin"]] >>> df = pd.DataFrame(data, columns=['age', 'name']) >>> for subdf in pdutil.iter.sub_dfs_by_size(df, 2): print(subdf) age name 0 23 Jen 1 42 Ray age name 2 15 Fin """ for i in range(0, len(df), size): yield (df.iloc[i:i + size])
python
def sub_dfs_by_size(df, size): """Get a generator yielding consecutive sub-dataframes of the given size. Arguments --------- df : pandas.DataFrame The dataframe for which to get sub-dataframes. size : int The size of each sub-dataframe. Returns ------- generator A generator yielding consecutive sub-dataframe of the given size. Example ------- >>> import pandas as pd; import pdutil; >>> data = [[23, "Jen"], [42, "Ray"], [15, "Fin"]] >>> df = pd.DataFrame(data, columns=['age', 'name']) >>> for subdf in pdutil.iter.sub_dfs_by_size(df, 2): print(subdf) age name 0 23 Jen 1 42 Ray age name 2 15 Fin """ for i in range(0, len(df), size): yield (df.iloc[i:i + size])
[ "def", "sub_dfs_by_size", "(", "df", ",", "size", ")", ":", "for", "i", "in", "range", "(", "0", ",", "len", "(", "df", ")", ",", "size", ")", ":", "yield", "(", "df", ".", "iloc", "[", "i", ":", "i", "+", "size", "]", ")" ]
Get a generator yielding consecutive sub-dataframes of the given size. Arguments --------- df : pandas.DataFrame The dataframe for which to get sub-dataframes. size : int The size of each sub-dataframe. Returns ------- generator A generator yielding consecutive sub-dataframe of the given size. Example ------- >>> import pandas as pd; import pdutil; >>> data = [[23, "Jen"], [42, "Ray"], [15, "Fin"]] >>> df = pd.DataFrame(data, columns=['age', 'name']) >>> for subdf in pdutil.iter.sub_dfs_by_size(df, 2): print(subdf) age name 0 23 Jen 1 42 Ray age name 2 15 Fin
[ "Get", "a", "generator", "yielding", "consecutive", "sub", "-", "dataframes", "of", "the", "given", "size", "." ]
231059634643af2558d22070f89767410978cf56
https://github.com/shaypal5/pdutil/blob/231059634643af2558d22070f89767410978cf56/pdutil/iter/iter.py#L4-L32
242,182
shaypal5/pdutil
pdutil/iter/iter.py
sub_dfs_by_num
def sub_dfs_by_num(df, num): """Get a generator yielding num consecutive sub-dataframes of the given df. Arguments --------- df : pandas.DataFrame The dataframe for which to get sub-dataframes. num : int The number of sub-dataframe to divide the given dataframe into. Returns ------- generator A generator yielding n consecutive sub-dataframes of the given df. Example ------- >>> import pandas as pd; import pdutil; >>> data = [[23, "Jen"], [42, "Ray"], [15, "Fin"]] >>> df = pd.DataFrame(data, columns=['age', 'name']) >>> for subdf in pdutil.iter.sub_dfs_by_num(df, 2): print(subdf) age name 0 23 Jen 1 42 Ray age name 2 15 Fin """ size = len(df) / float(num) for i in range(num): yield df.iloc[int(round(size * i)): int(round(size * (i + 1)))]
python
def sub_dfs_by_num(df, num): """Get a generator yielding num consecutive sub-dataframes of the given df. Arguments --------- df : pandas.DataFrame The dataframe for which to get sub-dataframes. num : int The number of sub-dataframe to divide the given dataframe into. Returns ------- generator A generator yielding n consecutive sub-dataframes of the given df. Example ------- >>> import pandas as pd; import pdutil; >>> data = [[23, "Jen"], [42, "Ray"], [15, "Fin"]] >>> df = pd.DataFrame(data, columns=['age', 'name']) >>> for subdf in pdutil.iter.sub_dfs_by_num(df, 2): print(subdf) age name 0 23 Jen 1 42 Ray age name 2 15 Fin """ size = len(df) / float(num) for i in range(num): yield df.iloc[int(round(size * i)): int(round(size * (i + 1)))]
[ "def", "sub_dfs_by_num", "(", "df", ",", "num", ")", ":", "size", "=", "len", "(", "df", ")", "/", "float", "(", "num", ")", "for", "i", "in", "range", "(", "num", ")", ":", "yield", "df", ".", "iloc", "[", "int", "(", "round", "(", "size", "*", "i", ")", ")", ":", "int", "(", "round", "(", "size", "*", "(", "i", "+", "1", ")", ")", ")", "]" ]
Get a generator yielding num consecutive sub-dataframes of the given df. Arguments --------- df : pandas.DataFrame The dataframe for which to get sub-dataframes. num : int The number of sub-dataframe to divide the given dataframe into. Returns ------- generator A generator yielding n consecutive sub-dataframes of the given df. Example ------- >>> import pandas as pd; import pdutil; >>> data = [[23, "Jen"], [42, "Ray"], [15, "Fin"]] >>> df = pd.DataFrame(data, columns=['age', 'name']) >>> for subdf in pdutil.iter.sub_dfs_by_num(df, 2): print(subdf) age name 0 23 Jen 1 42 Ray age name 2 15 Fin
[ "Get", "a", "generator", "yielding", "num", "consecutive", "sub", "-", "dataframes", "of", "the", "given", "df", "." ]
231059634643af2558d22070f89767410978cf56
https://github.com/shaypal5/pdutil/blob/231059634643af2558d22070f89767410978cf56/pdutil/iter/iter.py#L35-L64
242,183
ryanjdillon/pylleo
pylleo/utils.py
predict_encoding
def predict_encoding(file_path, n_lines=20): '''Get file encoding of a text file''' import chardet # Open the file as binary data with open(file_path, 'rb') as f: # Join binary lines for specified number of lines rawdata = b''.join([f.readline() for _ in range(n_lines)]) return chardet.detect(rawdata)['encoding']
python
def predict_encoding(file_path, n_lines=20): '''Get file encoding of a text file''' import chardet # Open the file as binary data with open(file_path, 'rb') as f: # Join binary lines for specified number of lines rawdata = b''.join([f.readline() for _ in range(n_lines)]) return chardet.detect(rawdata)['encoding']
[ "def", "predict_encoding", "(", "file_path", ",", "n_lines", "=", "20", ")", ":", "import", "chardet", "# Open the file as binary data", "with", "open", "(", "file_path", ",", "'rb'", ")", "as", "f", ":", "# Join binary lines for specified number of lines", "rawdata", "=", "b''", ".", "join", "(", "[", "f", ".", "readline", "(", ")", "for", "_", "in", "range", "(", "n_lines", ")", "]", ")", "return", "chardet", ".", "detect", "(", "rawdata", ")", "[", "'encoding'", "]" ]
Get file encoding of a text file
[ "Get", "file", "encoding", "of", "a", "text", "file" ]
b9b999fef19eaeccce4f207ab1b6198287c1bfec
https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/utils.py#L15-L24
242,184
ryanjdillon/pylleo
pylleo/utils.py
get_n_header
def get_n_header(f, header_char='"'): '''Get the nummber of header rows in a Little Leonardo data file Args ---- f : file stream File handle for the file from which header rows will be read header_char: str Character array at beginning of each header line Returns ------- n_header: int Number of header rows in Little Leonardo data file ''' n_header = 0 reading_headers = True while reading_headers: line = f.readline() if line.startswith(header_char): n_header += 1 else: reading_headers = False return n_header
python
def get_n_header(f, header_char='"'): '''Get the nummber of header rows in a Little Leonardo data file Args ---- f : file stream File handle for the file from which header rows will be read header_char: str Character array at beginning of each header line Returns ------- n_header: int Number of header rows in Little Leonardo data file ''' n_header = 0 reading_headers = True while reading_headers: line = f.readline() if line.startswith(header_char): n_header += 1 else: reading_headers = False return n_header
[ "def", "get_n_header", "(", "f", ",", "header_char", "=", "'\"'", ")", ":", "n_header", "=", "0", "reading_headers", "=", "True", "while", "reading_headers", ":", "line", "=", "f", ".", "readline", "(", ")", "if", "line", ".", "startswith", "(", "header_char", ")", ":", "n_header", "+=", "1", "else", ":", "reading_headers", "=", "False", "return", "n_header" ]
Get the nummber of header rows in a Little Leonardo data file Args ---- f : file stream File handle for the file from which header rows will be read header_char: str Character array at beginning of each header line Returns ------- n_header: int Number of header rows in Little Leonardo data file
[ "Get", "the", "nummber", "of", "header", "rows", "in", "a", "Little", "Leonardo", "data", "file" ]
b9b999fef19eaeccce4f207ab1b6198287c1bfec
https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/utils.py#L27-L52
242,185
ryanjdillon/pylleo
pylleo/utils.py
get_tag_params
def get_tag_params(tag_model): '''Load param strs and n_header based on model of tag model''' tag_model = tag_model.replace('-', '') tags = dict() tags['W190PD3GT'] = ['Acceleration-X', 'Acceleration-Y', 'Acceleration-Z', 'Depth', 'Propeller', 'Temperature'] # Return tag parameters if found, else raise error if tag_model in tags: return tags[tag_model] else: raise KeyError('{} not found in tag dictionary'.format(tag_model))
python
def get_tag_params(tag_model): '''Load param strs and n_header based on model of tag model''' tag_model = tag_model.replace('-', '') tags = dict() tags['W190PD3GT'] = ['Acceleration-X', 'Acceleration-Y', 'Acceleration-Z', 'Depth', 'Propeller', 'Temperature'] # Return tag parameters if found, else raise error if tag_model in tags: return tags[tag_model] else: raise KeyError('{} not found in tag dictionary'.format(tag_model))
[ "def", "get_tag_params", "(", "tag_model", ")", ":", "tag_model", "=", "tag_model", ".", "replace", "(", "'-'", ",", "''", ")", "tags", "=", "dict", "(", ")", "tags", "[", "'W190PD3GT'", "]", "=", "[", "'Acceleration-X'", ",", "'Acceleration-Y'", ",", "'Acceleration-Z'", ",", "'Depth'", ",", "'Propeller'", ",", "'Temperature'", "]", "# Return tag parameters if found, else raise error", "if", "tag_model", "in", "tags", ":", "return", "tags", "[", "tag_model", "]", "else", ":", "raise", "KeyError", "(", "'{} not found in tag dictionary'", ".", "format", "(", "tag_model", ")", ")" ]
Load param strs and n_header based on model of tag model
[ "Load", "param", "strs", "and", "n_header", "based", "on", "model", "of", "tag", "model" ]
b9b999fef19eaeccce4f207ab1b6198287c1bfec
https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/utils.py#L55-L67
242,186
ryanjdillon/pylleo
pylleo/utils.py
find_file
def find_file(path_dir, search_str, file_ext): '''Find path of file in directory containing the search string''' import os file_path = None for file_name in os.listdir(path_dir): if (search_str in file_name) and (file_name.endswith(file_ext)): file_path = os.path.join(path_dir, file_name) break if file_path == None: raise SystemError('No file found containing string: ' '{}.'.format(search_str)) return file_path
python
def find_file(path_dir, search_str, file_ext): '''Find path of file in directory containing the search string''' import os file_path = None for file_name in os.listdir(path_dir): if (search_str in file_name) and (file_name.endswith(file_ext)): file_path = os.path.join(path_dir, file_name) break if file_path == None: raise SystemError('No file found containing string: ' '{}.'.format(search_str)) return file_path
[ "def", "find_file", "(", "path_dir", ",", "search_str", ",", "file_ext", ")", ":", "import", "os", "file_path", "=", "None", "for", "file_name", "in", "os", ".", "listdir", "(", "path_dir", ")", ":", "if", "(", "search_str", "in", "file_name", ")", "and", "(", "file_name", ".", "endswith", "(", "file_ext", ")", ")", ":", "file_path", "=", "os", ".", "path", ".", "join", "(", "path_dir", ",", "file_name", ")", "break", "if", "file_path", "==", "None", ":", "raise", "SystemError", "(", "'No file found containing string: '", "'{}.'", ".", "format", "(", "search_str", ")", ")", "return", "file_path" ]
Find path of file in directory containing the search string
[ "Find", "path", "of", "file", "in", "directory", "containing", "the", "search", "string" ]
b9b999fef19eaeccce4f207ab1b6198287c1bfec
https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/utils.py#L70-L85
242,187
ryanjdillon/pylleo
pylleo/utils.py
nearest
def nearest(items, pivot): '''Find nearest value in array, including datetimes Args ---- items: iterable List of values from which to find nearest value to `pivot` pivot: int or float Value to find nearest of in `items` Returns ------- nearest: int or float Value in items nearest to `pivot` ''' return min(items, key=lambda x: abs(x - pivot))
python
def nearest(items, pivot): '''Find nearest value in array, including datetimes Args ---- items: iterable List of values from which to find nearest value to `pivot` pivot: int or float Value to find nearest of in `items` Returns ------- nearest: int or float Value in items nearest to `pivot` ''' return min(items, key=lambda x: abs(x - pivot))
[ "def", "nearest", "(", "items", ",", "pivot", ")", ":", "return", "min", "(", "items", ",", "key", "=", "lambda", "x", ":", "abs", "(", "x", "-", "pivot", ")", ")" ]
Find nearest value in array, including datetimes Args ---- items: iterable List of values from which to find nearest value to `pivot` pivot: int or float Value to find nearest of in `items` Returns ------- nearest: int or float Value in items nearest to `pivot`
[ "Find", "nearest", "value", "in", "array", "including", "datetimes" ]
b9b999fef19eaeccce4f207ab1b6198287c1bfec
https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/utils.py#L104-L119
242,188
ryanjdillon/pylleo
pylleo/utils.py
parse_experiment_params
def parse_experiment_params(name_exp): '''Parse experiment parameters from the data directory name Args ---- name_exp: str Name of data directory with experiment parameters Returns ------- tag_params: dict of str Dictionary of parsed experiment parameters ''' if ('/' in name_exp) or ('\\' in name_exp): raise ValueError("The path {} appears to be a path. Please pass " "only the data directory's name (i.e. the " "experiment name)".format(name_exp)) tag_params = dict() tag_params['experiment'] = name_exp tag_params['tag_model'] = (name_exp.split('_')[1]).replace('-','') tag_params['tag_id'] = name_exp.split('_')[2] tag_params['animal'] = name_exp.split('_')[3] tag_params['notes'] = name_exp.split('_')[4] return tag_params
python
def parse_experiment_params(name_exp): '''Parse experiment parameters from the data directory name Args ---- name_exp: str Name of data directory with experiment parameters Returns ------- tag_params: dict of str Dictionary of parsed experiment parameters ''' if ('/' in name_exp) or ('\\' in name_exp): raise ValueError("The path {} appears to be a path. Please pass " "only the data directory's name (i.e. the " "experiment name)".format(name_exp)) tag_params = dict() tag_params['experiment'] = name_exp tag_params['tag_model'] = (name_exp.split('_')[1]).replace('-','') tag_params['tag_id'] = name_exp.split('_')[2] tag_params['animal'] = name_exp.split('_')[3] tag_params['notes'] = name_exp.split('_')[4] return tag_params
[ "def", "parse_experiment_params", "(", "name_exp", ")", ":", "if", "(", "'/'", "in", "name_exp", ")", "or", "(", "'\\\\'", "in", "name_exp", ")", ":", "raise", "ValueError", "(", "\"The path {} appears to be a path. Please pass \"", "\"only the data directory's name (i.e. the \"", "\"experiment name)\"", ".", "format", "(", "name_exp", ")", ")", "tag_params", "=", "dict", "(", ")", "tag_params", "[", "'experiment'", "]", "=", "name_exp", "tag_params", "[", "'tag_model'", "]", "=", "(", "name_exp", ".", "split", "(", "'_'", ")", "[", "1", "]", ")", ".", "replace", "(", "'-'", ",", "''", ")", "tag_params", "[", "'tag_id'", "]", "=", "name_exp", ".", "split", "(", "'_'", ")", "[", "2", "]", "tag_params", "[", "'animal'", "]", "=", "name_exp", ".", "split", "(", "'_'", ")", "[", "3", "]", "tag_params", "[", "'notes'", "]", "=", "name_exp", ".", "split", "(", "'_'", ")", "[", "4", "]", "return", "tag_params" ]
Parse experiment parameters from the data directory name Args ---- name_exp: str Name of data directory with experiment parameters Returns ------- tag_params: dict of str Dictionary of parsed experiment parameters
[ "Parse", "experiment", "parameters", "from", "the", "data", "directory", "name" ]
b9b999fef19eaeccce4f207ab1b6198287c1bfec
https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/utils.py#L122-L147
242,189
sprockets/sprockets.clients.cassandra
sprockets/clients/cassandra/__init__.py
CassandraConnection._get_cassandra_config
def _get_cassandra_config(self): """Retrieve a dict containing Cassandra client config params.""" parts = urlsplit(os.environ.get('CASSANDRA_URI', DEFAULT_URI)) if parts.scheme != 'cassandra': raise RuntimeError( 'CASSANDRA_URI scheme is not "cassandra://"!') _, _, ip_addresses = socket.gethostbyname_ex(parts.hostname) if not ip_addresses: raise RuntimeError('Unable to find Cassandra in DNS!') return { 'contact_points': ip_addresses, 'port': parts.port or DEFAULT_PORT, }
python
def _get_cassandra_config(self): """Retrieve a dict containing Cassandra client config params.""" parts = urlsplit(os.environ.get('CASSANDRA_URI', DEFAULT_URI)) if parts.scheme != 'cassandra': raise RuntimeError( 'CASSANDRA_URI scheme is not "cassandra://"!') _, _, ip_addresses = socket.gethostbyname_ex(parts.hostname) if not ip_addresses: raise RuntimeError('Unable to find Cassandra in DNS!') return { 'contact_points': ip_addresses, 'port': parts.port or DEFAULT_PORT, }
[ "def", "_get_cassandra_config", "(", "self", ")", ":", "parts", "=", "urlsplit", "(", "os", ".", "environ", ".", "get", "(", "'CASSANDRA_URI'", ",", "DEFAULT_URI", ")", ")", "if", "parts", ".", "scheme", "!=", "'cassandra'", ":", "raise", "RuntimeError", "(", "'CASSANDRA_URI scheme is not \"cassandra://\"!'", ")", "_", ",", "_", ",", "ip_addresses", "=", "socket", ".", "gethostbyname_ex", "(", "parts", ".", "hostname", ")", "if", "not", "ip_addresses", ":", "raise", "RuntimeError", "(", "'Unable to find Cassandra in DNS!'", ")", "return", "{", "'contact_points'", ":", "ip_addresses", ",", "'port'", ":", "parts", ".", "port", "or", "DEFAULT_PORT", ",", "}" ]
Retrieve a dict containing Cassandra client config params.
[ "Retrieve", "a", "dict", "containing", "Cassandra", "client", "config", "params", "." ]
c0a3ffe550ceb89b23a59959a0645d29d257e624
https://github.com/sprockets/sprockets.clients.cassandra/blob/c0a3ffe550ceb89b23a59959a0645d29d257e624/sprockets/clients/cassandra/__init__.py#L59-L73
242,190
sprockets/sprockets.clients.cassandra
sprockets/clients/cassandra/__init__.py
CassandraConnection.prepare
def prepare(self, query, name=None): """Create and cache a prepared statement using the provided query. This function will take a ``query`` and optional ``name`` parameter and will create a new prepared statement for the provided ``query``. The resulting statement object will be cached so future invocations of this function will not incur the overhead or recreating the statement. If ``name`` is provided it will be used as the key for the cache, so you'll be able to call ``execute`` using the name. :pram str query: The query to prepare. :pram str name: (Optional) name to use as a key in the cache. """ key = name or query stmt = CassandraConnection._prepared_statement_cache.get(key, None) if stmt is not None: return stmt stmt = self._session.prepare(query) CassandraConnection._prepared_statement_cache[key] = stmt return stmt
python
def prepare(self, query, name=None): """Create and cache a prepared statement using the provided query. This function will take a ``query`` and optional ``name`` parameter and will create a new prepared statement for the provided ``query``. The resulting statement object will be cached so future invocations of this function will not incur the overhead or recreating the statement. If ``name`` is provided it will be used as the key for the cache, so you'll be able to call ``execute`` using the name. :pram str query: The query to prepare. :pram str name: (Optional) name to use as a key in the cache. """ key = name or query stmt = CassandraConnection._prepared_statement_cache.get(key, None) if stmt is not None: return stmt stmt = self._session.prepare(query) CassandraConnection._prepared_statement_cache[key] = stmt return stmt
[ "def", "prepare", "(", "self", ",", "query", ",", "name", "=", "None", ")", ":", "key", "=", "name", "or", "query", "stmt", "=", "CassandraConnection", ".", "_prepared_statement_cache", ".", "get", "(", "key", ",", "None", ")", "if", "stmt", "is", "not", "None", ":", "return", "stmt", "stmt", "=", "self", ".", "_session", ".", "prepare", "(", "query", ")", "CassandraConnection", ".", "_prepared_statement_cache", "[", "key", "]", "=", "stmt", "return", "stmt" ]
Create and cache a prepared statement using the provided query. This function will take a ``query`` and optional ``name`` parameter and will create a new prepared statement for the provided ``query``. The resulting statement object will be cached so future invocations of this function will not incur the overhead or recreating the statement. If ``name`` is provided it will be used as the key for the cache, so you'll be able to call ``execute`` using the name. :pram str query: The query to prepare. :pram str name: (Optional) name to use as a key in the cache.
[ "Create", "and", "cache", "a", "prepared", "statement", "using", "the", "provided", "query", "." ]
c0a3ffe550ceb89b23a59959a0645d29d257e624
https://github.com/sprockets/sprockets.clients.cassandra/blob/c0a3ffe550ceb89b23a59959a0645d29d257e624/sprockets/clients/cassandra/__init__.py#L85-L106
242,191
sprockets/sprockets.clients.cassandra
sprockets/clients/cassandra/__init__.py
CassandraConnection.execute
def execute(self, query, *args, **kwargs): """Asynchronously execute the specified CQL query. The execute command also takes optional parameters and trace keyword arguments. See cassandra-python documentation for definition of those parameters. """ tornado_future = Future() cassandra_future = self._session.execute_async(query, *args, **kwargs) self._ioloop.add_callback( self._callback, cassandra_future, tornado_future) return tornado_future
python
def execute(self, query, *args, **kwargs): """Asynchronously execute the specified CQL query. The execute command also takes optional parameters and trace keyword arguments. See cassandra-python documentation for definition of those parameters. """ tornado_future = Future() cassandra_future = self._session.execute_async(query, *args, **kwargs) self._ioloop.add_callback( self._callback, cassandra_future, tornado_future) return tornado_future
[ "def", "execute", "(", "self", ",", "query", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "tornado_future", "=", "Future", "(", ")", "cassandra_future", "=", "self", ".", "_session", ".", "execute_async", "(", "query", ",", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "_ioloop", ".", "add_callback", "(", "self", ".", "_callback", ",", "cassandra_future", ",", "tornado_future", ")", "return", "tornado_future" ]
Asynchronously execute the specified CQL query. The execute command also takes optional parameters and trace keyword arguments. See cassandra-python documentation for definition of those parameters.
[ "Asynchronously", "execute", "the", "specified", "CQL", "query", "." ]
c0a3ffe550ceb89b23a59959a0645d29d257e624
https://github.com/sprockets/sprockets.clients.cassandra/blob/c0a3ffe550ceb89b23a59959a0645d29d257e624/sprockets/clients/cassandra/__init__.py#L108-L119
242,192
cdeboever3/cdpybio
cdpybio/plink.py
read_linear2
def read_linear2(fn, header=True): """Read a plink 2 output file of type glm.linear into a pandas DataFrame. Parameters ---------- fn : str Path to the plink file. The file can be gzipped or not. header : str True if the file has a header (this is generally the case unless the file has been processed after it was created). False if no header. Pass None if it's unknown whether the file has a header. Returns ------- res : pandas.DataFrame Dataframe with results. """ dtypes = {'#CHROM':str, 'POS':int, 'ID':str, 'REF':str, 'ALT1':str, 'TEST':str, 'OBS_CT':int, 'BETA':float, 'SE':float, 'T_STAT':float, 'P':float} if header is None: if fn[-3:] == '.gz': from gzip import open with open(fn, 'r') as f: line = f.readline() else: with open(fn, 'r') as f: line = f.readline() header = line [0] == '#' if header: res = pd.read_table(fn, index_col=2, dtype=dtypes, low_memory=False) else: cols = ['#CHROM', 'POS', 'ID', 'REF', 'ALT1', 'TEST', 'OBS_CT', 'BETA', 'SE', 'T_STAT', 'P'] res = pd.read_table(fn, index_col=2, dtype=dtypes, names=cols, low_memory=False) res.columns = [x.replace('#', '') for x in res.columns] return(res)
python
def read_linear2(fn, header=True): """Read a plink 2 output file of type glm.linear into a pandas DataFrame. Parameters ---------- fn : str Path to the plink file. The file can be gzipped or not. header : str True if the file has a header (this is generally the case unless the file has been processed after it was created). False if no header. Pass None if it's unknown whether the file has a header. Returns ------- res : pandas.DataFrame Dataframe with results. """ dtypes = {'#CHROM':str, 'POS':int, 'ID':str, 'REF':str, 'ALT1':str, 'TEST':str, 'OBS_CT':int, 'BETA':float, 'SE':float, 'T_STAT':float, 'P':float} if header is None: if fn[-3:] == '.gz': from gzip import open with open(fn, 'r') as f: line = f.readline() else: with open(fn, 'r') as f: line = f.readline() header = line [0] == '#' if header: res = pd.read_table(fn, index_col=2, dtype=dtypes, low_memory=False) else: cols = ['#CHROM', 'POS', 'ID', 'REF', 'ALT1', 'TEST', 'OBS_CT', 'BETA', 'SE', 'T_STAT', 'P'] res = pd.read_table(fn, index_col=2, dtype=dtypes, names=cols, low_memory=False) res.columns = [x.replace('#', '') for x in res.columns] return(res)
[ "def", "read_linear2", "(", "fn", ",", "header", "=", "True", ")", ":", "dtypes", "=", "{", "'#CHROM'", ":", "str", ",", "'POS'", ":", "int", ",", "'ID'", ":", "str", ",", "'REF'", ":", "str", ",", "'ALT1'", ":", "str", ",", "'TEST'", ":", "str", ",", "'OBS_CT'", ":", "int", ",", "'BETA'", ":", "float", ",", "'SE'", ":", "float", ",", "'T_STAT'", ":", "float", ",", "'P'", ":", "float", "}", "if", "header", "is", "None", ":", "if", "fn", "[", "-", "3", ":", "]", "==", "'.gz'", ":", "from", "gzip", "import", "open", "with", "open", "(", "fn", ",", "'r'", ")", "as", "f", ":", "line", "=", "f", ".", "readline", "(", ")", "else", ":", "with", "open", "(", "fn", ",", "'r'", ")", "as", "f", ":", "line", "=", "f", ".", "readline", "(", ")", "header", "=", "line", "[", "0", "]", "==", "'#'", "if", "header", ":", "res", "=", "pd", ".", "read_table", "(", "fn", ",", "index_col", "=", "2", ",", "dtype", "=", "dtypes", ",", "low_memory", "=", "False", ")", "else", ":", "cols", "=", "[", "'#CHROM'", ",", "'POS'", ",", "'ID'", ",", "'REF'", ",", "'ALT1'", ",", "'TEST'", ",", "'OBS_CT'", ",", "'BETA'", ",", "'SE'", ",", "'T_STAT'", ",", "'P'", "]", "res", "=", "pd", ".", "read_table", "(", "fn", ",", "index_col", "=", "2", ",", "dtype", "=", "dtypes", ",", "names", "=", "cols", ",", "low_memory", "=", "False", ")", "res", ".", "columns", "=", "[", "x", ".", "replace", "(", "'#'", ",", "''", ")", "for", "x", "in", "res", ".", "columns", "]", "return", "(", "res", ")" ]
Read a plink 2 output file of type glm.linear into a pandas DataFrame. Parameters ---------- fn : str Path to the plink file. The file can be gzipped or not. header : str True if the file has a header (this is generally the case unless the file has been processed after it was created). False if no header. Pass None if it's unknown whether the file has a header. Returns ------- res : pandas.DataFrame Dataframe with results.
[ "Read", "a", "plink", "2", "output", "file", "of", "type", "glm", ".", "linear", "into", "a", "pandas", "DataFrame", "." ]
38efdf0e11d01bc00a135921cb91a19c03db5d5c
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/plink.py#L6-L45
242,193
cdeboever3/cdpybio
cdpybio/plink.py
parse_log2
def parse_log2(fn): """Parse out some information from a plink 2 log. This function currently only supports log files from linear or logistic regression. Parameters ---------- fn : str Path to the plink log file. Returns ------- res : pandas.DataFrame Dataframe with log file information. """ with open(fn) as f: lines = f.readlines() if len(lines) == 0: sys.stderr.write('Empty log file: {}.\n'.format(fn)) return(None) logtype = None # TODO: Eventually, I will look for other arguments that indicate which # plink analysis was run. if len([x for x in lines if '--glm standard-beta' in x]): logtype = 'linear' elif len([x for x in lines if '--glm firth-fallback' in x]): logtype = 'logistic' if logtype is None: return(None) sys.stderr.write('Log file not supported: {}.\n'.format(fn)) try: lines = [x for x in lines if 'remaining after' in x] i = 0 x = lines[i].split() samples = int(x[0]) females = int(x[2][1:]) males = int(x[4]) i += 1 cases = np.nan controls = np.nan if logtype == 'logistic': x = lines[i].split() cases = int(x[0]) controls = int(x[3]) i += 1 variants = int(lines[i].split()[0]) except: sys.stderr.write('Error parsing log file: {}.\n'.format(fn)) return(None) se = pd.Series([samples, females, males, cases, controls, variants], index=['samples', 'females', 'males', 'cases', 'controls', 'variants']).dropna() return(se)
python
def parse_log2(fn): """Parse out some information from a plink 2 log. This function currently only supports log files from linear or logistic regression. Parameters ---------- fn : str Path to the plink log file. Returns ------- res : pandas.DataFrame Dataframe with log file information. """ with open(fn) as f: lines = f.readlines() if len(lines) == 0: sys.stderr.write('Empty log file: {}.\n'.format(fn)) return(None) logtype = None # TODO: Eventually, I will look for other arguments that indicate which # plink analysis was run. if len([x for x in lines if '--glm standard-beta' in x]): logtype = 'linear' elif len([x for x in lines if '--glm firth-fallback' in x]): logtype = 'logistic' if logtype is None: return(None) sys.stderr.write('Log file not supported: {}.\n'.format(fn)) try: lines = [x for x in lines if 'remaining after' in x] i = 0 x = lines[i].split() samples = int(x[0]) females = int(x[2][1:]) males = int(x[4]) i += 1 cases = np.nan controls = np.nan if logtype == 'logistic': x = lines[i].split() cases = int(x[0]) controls = int(x[3]) i += 1 variants = int(lines[i].split()[0]) except: sys.stderr.write('Error parsing log file: {}.\n'.format(fn)) return(None) se = pd.Series([samples, females, males, cases, controls, variants], index=['samples', 'females', 'males', 'cases', 'controls', 'variants']).dropna() return(se)
[ "def", "parse_log2", "(", "fn", ")", ":", "with", "open", "(", "fn", ")", "as", "f", ":", "lines", "=", "f", ".", "readlines", "(", ")", "if", "len", "(", "lines", ")", "==", "0", ":", "sys", ".", "stderr", ".", "write", "(", "'Empty log file: {}.\\n'", ".", "format", "(", "fn", ")", ")", "return", "(", "None", ")", "logtype", "=", "None", "# TODO: Eventually, I will look for other arguments that indicate which", "# plink analysis was run.", "if", "len", "(", "[", "x", "for", "x", "in", "lines", "if", "'--glm standard-beta'", "in", "x", "]", ")", ":", "logtype", "=", "'linear'", "elif", "len", "(", "[", "x", "for", "x", "in", "lines", "if", "'--glm firth-fallback'", "in", "x", "]", ")", ":", "logtype", "=", "'logistic'", "if", "logtype", "is", "None", ":", "return", "(", "None", ")", "sys", ".", "stderr", ".", "write", "(", "'Log file not supported: {}.\\n'", ".", "format", "(", "fn", ")", ")", "try", ":", "lines", "=", "[", "x", "for", "x", "in", "lines", "if", "'remaining after'", "in", "x", "]", "i", "=", "0", "x", "=", "lines", "[", "i", "]", ".", "split", "(", ")", "samples", "=", "int", "(", "x", "[", "0", "]", ")", "females", "=", "int", "(", "x", "[", "2", "]", "[", "1", ":", "]", ")", "males", "=", "int", "(", "x", "[", "4", "]", ")", "i", "+=", "1", "cases", "=", "np", ".", "nan", "controls", "=", "np", ".", "nan", "if", "logtype", "==", "'logistic'", ":", "x", "=", "lines", "[", "i", "]", ".", "split", "(", ")", "cases", "=", "int", "(", "x", "[", "0", "]", ")", "controls", "=", "int", "(", "x", "[", "3", "]", ")", "i", "+=", "1", "variants", "=", "int", "(", "lines", "[", "i", "]", ".", "split", "(", ")", "[", "0", "]", ")", "except", ":", "sys", ".", "stderr", ".", "write", "(", "'Error parsing log file: {}.\\n'", ".", "format", "(", "fn", ")", ")", "return", "(", "None", ")", "se", "=", "pd", ".", "Series", "(", "[", "samples", ",", "females", ",", "males", ",", "cases", ",", "controls", ",", "variants", "]", ",", "index", "=", "[", "'samples'", ",", "'females'", ",", "'males'", ",", "'cases'", ",", "'controls'", ",", "'variants'", "]", ")", ".", "dropna", "(", ")", "return", "(", "se", ")" ]
Parse out some information from a plink 2 log. This function currently only supports log files from linear or logistic regression. Parameters ---------- fn : str Path to the plink log file. Returns ------- res : pandas.DataFrame Dataframe with log file information.
[ "Parse", "out", "some", "information", "from", "a", "plink", "2", "log", ".", "This", "function", "currently", "only", "supports", "log", "files", "from", "linear", "or", "logistic", "regression", "." ]
38efdf0e11d01bc00a135921cb91a19c03db5d5c
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/plink.py#L99-L151
242,194
shaypal5/pdutil
pdutil/transform/transform.py
x_y_by_col_lbl
def x_y_by_col_lbl(df, y_col_lbl): """Returns an X dataframe and a y series by the given column name. Parameters ---------- df : pandas.DataFrame The dataframe to split. y_col_lbl : object The label of the y column. Returns ------- X, y : pandas.DataFrame, pandas.Series A dataframe made up of all columns but the column with the given name and a series made up of the same column, respectively. Example ------- >>> import pandas as pd >>> data = [[23, 'Jo', 4], [19, 'Mi', 3]] >>> df = pd.DataFrame(data, [1, 2] , ['Age', 'Name', 'D']) >>> X, y = x_y_by_col_lbl(df, 'D') >>> X Age Name 1 23 Jo 2 19 Mi >>> y 1 4 2 3 Name: D, dtype: int64 """ x_cols = [col for col in df.columns if col != y_col_lbl] return df[x_cols], df[y_col_lbl]
python
def x_y_by_col_lbl(df, y_col_lbl): """Returns an X dataframe and a y series by the given column name. Parameters ---------- df : pandas.DataFrame The dataframe to split. y_col_lbl : object The label of the y column. Returns ------- X, y : pandas.DataFrame, pandas.Series A dataframe made up of all columns but the column with the given name and a series made up of the same column, respectively. Example ------- >>> import pandas as pd >>> data = [[23, 'Jo', 4], [19, 'Mi', 3]] >>> df = pd.DataFrame(data, [1, 2] , ['Age', 'Name', 'D']) >>> X, y = x_y_by_col_lbl(df, 'D') >>> X Age Name 1 23 Jo 2 19 Mi >>> y 1 4 2 3 Name: D, dtype: int64 """ x_cols = [col for col in df.columns if col != y_col_lbl] return df[x_cols], df[y_col_lbl]
[ "def", "x_y_by_col_lbl", "(", "df", ",", "y_col_lbl", ")", ":", "x_cols", "=", "[", "col", "for", "col", "in", "df", ".", "columns", "if", "col", "!=", "y_col_lbl", "]", "return", "df", "[", "x_cols", "]", ",", "df", "[", "y_col_lbl", "]" ]
Returns an X dataframe and a y series by the given column name. Parameters ---------- df : pandas.DataFrame The dataframe to split. y_col_lbl : object The label of the y column. Returns ------- X, y : pandas.DataFrame, pandas.Series A dataframe made up of all columns but the column with the given name and a series made up of the same column, respectively. Example ------- >>> import pandas as pd >>> data = [[23, 'Jo', 4], [19, 'Mi', 3]] >>> df = pd.DataFrame(data, [1, 2] , ['Age', 'Name', 'D']) >>> X, y = x_y_by_col_lbl(df, 'D') >>> X Age Name 1 23 Jo 2 19 Mi >>> y 1 4 2 3 Name: D, dtype: int64
[ "Returns", "an", "X", "dataframe", "and", "a", "y", "series", "by", "the", "given", "column", "name", "." ]
231059634643af2558d22070f89767410978cf56
https://github.com/shaypal5/pdutil/blob/231059634643af2558d22070f89767410978cf56/pdutil/transform/transform.py#L4-L36
242,195
shaypal5/pdutil
pdutil/transform/transform.py
x_y_by_col_lbl_inplace
def x_y_by_col_lbl_inplace(df, y_col_lbl): """Breaks the given dataframe into an X frame and a y series by the given column name. The original frame is returned, without the y series column, as the X frame, so no new dataframes are created. Parameters ---------- df : pandas.DataFrame The dataframe to split. y_col_lbl : object The label of the y column. Returns ------- X, y : pandas.DataFrame, pandas.Series A dataframe made up of all columns but the column with the given name and a series made up of the same column, respectively. Example ------- >>> import pandas as pd >>> data = [[23, 'Jo', 4], [19, 'Mi', 3]] >>> df = pd.DataFrame(data, [1, 2] , ['Age', 'Name', 'D']) >>> X, y = x_y_by_col_lbl(df, 'D') >>> X Age Name 1 23 Jo 2 19 Mi >>> y 1 4 2 3 Name: D, dtype: int64 """ y = df[y_col_lbl] df.drop( labels=y_col_lbl, axis=1, inplace=True, ) return df, y
python
def x_y_by_col_lbl_inplace(df, y_col_lbl): """Breaks the given dataframe into an X frame and a y series by the given column name. The original frame is returned, without the y series column, as the X frame, so no new dataframes are created. Parameters ---------- df : pandas.DataFrame The dataframe to split. y_col_lbl : object The label of the y column. Returns ------- X, y : pandas.DataFrame, pandas.Series A dataframe made up of all columns but the column with the given name and a series made up of the same column, respectively. Example ------- >>> import pandas as pd >>> data = [[23, 'Jo', 4], [19, 'Mi', 3]] >>> df = pd.DataFrame(data, [1, 2] , ['Age', 'Name', 'D']) >>> X, y = x_y_by_col_lbl(df, 'D') >>> X Age Name 1 23 Jo 2 19 Mi >>> y 1 4 2 3 Name: D, dtype: int64 """ y = df[y_col_lbl] df.drop( labels=y_col_lbl, axis=1, inplace=True, ) return df, y
[ "def", "x_y_by_col_lbl_inplace", "(", "df", ",", "y_col_lbl", ")", ":", "y", "=", "df", "[", "y_col_lbl", "]", "df", ".", "drop", "(", "labels", "=", "y_col_lbl", ",", "axis", "=", "1", ",", "inplace", "=", "True", ",", ")", "return", "df", ",", "y" ]
Breaks the given dataframe into an X frame and a y series by the given column name. The original frame is returned, without the y series column, as the X frame, so no new dataframes are created. Parameters ---------- df : pandas.DataFrame The dataframe to split. y_col_lbl : object The label of the y column. Returns ------- X, y : pandas.DataFrame, pandas.Series A dataframe made up of all columns but the column with the given name and a series made up of the same column, respectively. Example ------- >>> import pandas as pd >>> data = [[23, 'Jo', 4], [19, 'Mi', 3]] >>> df = pd.DataFrame(data, [1, 2] , ['Age', 'Name', 'D']) >>> X, y = x_y_by_col_lbl(df, 'D') >>> X Age Name 1 23 Jo 2 19 Mi >>> y 1 4 2 3 Name: D, dtype: int64
[ "Breaks", "the", "given", "dataframe", "into", "an", "X", "frame", "and", "a", "y", "series", "by", "the", "given", "column", "name", "." ]
231059634643af2558d22070f89767410978cf56
https://github.com/shaypal5/pdutil/blob/231059634643af2558d22070f89767410978cf56/pdutil/transform/transform.py#L39-L80
242,196
shaypal5/pdutil
pdutil/transform/transform.py
or_by_masks
def or_by_masks(df, masks): """Returns a sub-dataframe by the logical or over the given masks. Parameters ---------- df : pandas.DataFrame The dataframe to take a subframe of. masks : list A list of pandas.Series of dtype bool, indexed identically to the given dataframe. Returns ------- pandas.DataFrame The sub-dataframe resulting from applying the masks to the dataframe. Example ------- >>> import pandas as pd >>> data = [[23, 'Jo'], [19, 'Mi'], [15, 'Di']] >>> df = pd.DataFrame(data, [1, 2, 3] , ['Age', 'Name']) >>> mask1 = pd.Series([False, True, True], df.index) >>> mask2 = pd.Series([False, False, True], df.index) >>> or_by_masks(df, [mask1, mask2]) Age Name 2 19 Mi 3 15 Di """ if len(masks) < 1: return df if len(masks) == 1: return df[masks[0]] overall_mask = masks[0] | masks[1] for mask in masks[2:]: overall_mask = overall_mask | mask return df[overall_mask]
python
def or_by_masks(df, masks): """Returns a sub-dataframe by the logical or over the given masks. Parameters ---------- df : pandas.DataFrame The dataframe to take a subframe of. masks : list A list of pandas.Series of dtype bool, indexed identically to the given dataframe. Returns ------- pandas.DataFrame The sub-dataframe resulting from applying the masks to the dataframe. Example ------- >>> import pandas as pd >>> data = [[23, 'Jo'], [19, 'Mi'], [15, 'Di']] >>> df = pd.DataFrame(data, [1, 2, 3] , ['Age', 'Name']) >>> mask1 = pd.Series([False, True, True], df.index) >>> mask2 = pd.Series([False, False, True], df.index) >>> or_by_masks(df, [mask1, mask2]) Age Name 2 19 Mi 3 15 Di """ if len(masks) < 1: return df if len(masks) == 1: return df[masks[0]] overall_mask = masks[0] | masks[1] for mask in masks[2:]: overall_mask = overall_mask | mask return df[overall_mask]
[ "def", "or_by_masks", "(", "df", ",", "masks", ")", ":", "if", "len", "(", "masks", ")", "<", "1", ":", "return", "df", "if", "len", "(", "masks", ")", "==", "1", ":", "return", "df", "[", "masks", "[", "0", "]", "]", "overall_mask", "=", "masks", "[", "0", "]", "|", "masks", "[", "1", "]", "for", "mask", "in", "masks", "[", "2", ":", "]", ":", "overall_mask", "=", "overall_mask", "|", "mask", "return", "df", "[", "overall_mask", "]" ]
Returns a sub-dataframe by the logical or over the given masks. Parameters ---------- df : pandas.DataFrame The dataframe to take a subframe of. masks : list A list of pandas.Series of dtype bool, indexed identically to the given dataframe. Returns ------- pandas.DataFrame The sub-dataframe resulting from applying the masks to the dataframe. Example ------- >>> import pandas as pd >>> data = [[23, 'Jo'], [19, 'Mi'], [15, 'Di']] >>> df = pd.DataFrame(data, [1, 2, 3] , ['Age', 'Name']) >>> mask1 = pd.Series([False, True, True], df.index) >>> mask2 = pd.Series([False, False, True], df.index) >>> or_by_masks(df, [mask1, mask2]) Age Name 2 19 Mi 3 15 Di
[ "Returns", "a", "sub", "-", "dataframe", "by", "the", "logical", "or", "over", "the", "given", "masks", "." ]
231059634643af2558d22070f89767410978cf56
https://github.com/shaypal5/pdutil/blob/231059634643af2558d22070f89767410978cf56/pdutil/transform/transform.py#L83-L118
242,197
MacHu-GWU/angora-project
angora/visual/timeseries.py
visualize
def visualize(x, y, xlabel=None, ylabel=None, title=None, ylim=None): """A universal function plot arbitrary time series data. """ total_seconds = (x[-1] - x[0]).total_seconds() if total_seconds <= 86400 * 1 * 3: return plot_one_day(x, y, xlabel, ylabel, title, ylim) elif total_seconds <= 86400 * 7 * 2: return plot_one_week(x, y, xlabel, ylabel, title, ylim) elif total_seconds <= 86400 * 30 * 1.5: return plot_one_month(x, y, xlabel, ylabel, title, ylim) elif total_seconds <= 86400 * 90 * 1.5: return plot_one_quarter(x, y, xlabel, ylabel, title, ylim) elif total_seconds <= 86400 * 365 * 1.5: return plot_one_year(x, y, xlabel, ylabel, title, ylim)
python
def visualize(x, y, xlabel=None, ylabel=None, title=None, ylim=None): """A universal function plot arbitrary time series data. """ total_seconds = (x[-1] - x[0]).total_seconds() if total_seconds <= 86400 * 1 * 3: return plot_one_day(x, y, xlabel, ylabel, title, ylim) elif total_seconds <= 86400 * 7 * 2: return plot_one_week(x, y, xlabel, ylabel, title, ylim) elif total_seconds <= 86400 * 30 * 1.5: return plot_one_month(x, y, xlabel, ylabel, title, ylim) elif total_seconds <= 86400 * 90 * 1.5: return plot_one_quarter(x, y, xlabel, ylabel, title, ylim) elif total_seconds <= 86400 * 365 * 1.5: return plot_one_year(x, y, xlabel, ylabel, title, ylim)
[ "def", "visualize", "(", "x", ",", "y", ",", "xlabel", "=", "None", ",", "ylabel", "=", "None", ",", "title", "=", "None", ",", "ylim", "=", "None", ")", ":", "total_seconds", "=", "(", "x", "[", "-", "1", "]", "-", "x", "[", "0", "]", ")", ".", "total_seconds", "(", ")", "if", "total_seconds", "<=", "86400", "*", "1", "*", "3", ":", "return", "plot_one_day", "(", "x", ",", "y", ",", "xlabel", ",", "ylabel", ",", "title", ",", "ylim", ")", "elif", "total_seconds", "<=", "86400", "*", "7", "*", "2", ":", "return", "plot_one_week", "(", "x", ",", "y", ",", "xlabel", ",", "ylabel", ",", "title", ",", "ylim", ")", "elif", "total_seconds", "<=", "86400", "*", "30", "*", "1.5", ":", "return", "plot_one_month", "(", "x", ",", "y", ",", "xlabel", ",", "ylabel", ",", "title", ",", "ylim", ")", "elif", "total_seconds", "<=", "86400", "*", "90", "*", "1.5", ":", "return", "plot_one_quarter", "(", "x", ",", "y", ",", "xlabel", ",", "ylabel", ",", "title", ",", "ylim", ")", "elif", "total_seconds", "<=", "86400", "*", "365", "*", "1.5", ":", "return", "plot_one_year", "(", "x", ",", "y", ",", "xlabel", ",", "ylabel", ",", "title", ",", "ylim", ")" ]
A universal function plot arbitrary time series data.
[ "A", "universal", "function", "plot", "arbitrary", "time", "series", "data", "." ]
689a60da51cd88680ddbe26e28dbe81e6b01d275
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/visual/timeseries.py#L281-L298
242,198
andy-esch/sqterritory
sqterritory/territory.py
MinCostFlow._get_demand_graph
def _get_demand_graph(self): """create demand graph""" # The number of clusters K = self.origins.shape[0] # Set the number of accounts in each cluster to be the same # as for the nearest neighbor solution demand = self.nearest_targets.groupby('origin_id')['geometry'].count().to_dict() # Set up the graph so we can extract and initialize the node labels. # For each iteration, we're going to sort all our data by their origin # label assignments in order to properly index our nodes. self.targets = self.targets.sort_values('labels').reset_index(drop=True) # Add target nodes g = nx.DiGraph() g.add_nodes_from(self.targets['target_id'], demand=-1) # Add origin nodes for idx in demand: g.add_node(int(idx), demand=demand[idx]) # Dictionary of labels (corresponding to the sales rep) for # each med center node. dict_M = { i: ( self.targets[self.targets['target_id'] == i]['labels'].values if i in self.targets.target_id.values else np.array([demand[i]]) ) for i in g.nodes } logging.info('Graph and demand dictionary created') return dict_M, demand
python
def _get_demand_graph(self): """create demand graph""" # The number of clusters K = self.origins.shape[0] # Set the number of accounts in each cluster to be the same # as for the nearest neighbor solution demand = self.nearest_targets.groupby('origin_id')['geometry'].count().to_dict() # Set up the graph so we can extract and initialize the node labels. # For each iteration, we're going to sort all our data by their origin # label assignments in order to properly index our nodes. self.targets = self.targets.sort_values('labels').reset_index(drop=True) # Add target nodes g = nx.DiGraph() g.add_nodes_from(self.targets['target_id'], demand=-1) # Add origin nodes for idx in demand: g.add_node(int(idx), demand=demand[idx]) # Dictionary of labels (corresponding to the sales rep) for # each med center node. dict_M = { i: ( self.targets[self.targets['target_id'] == i]['labels'].values if i in self.targets.target_id.values else np.array([demand[i]]) ) for i in g.nodes } logging.info('Graph and demand dictionary created') return dict_M, demand
[ "def", "_get_demand_graph", "(", "self", ")", ":", "# The number of clusters", "K", "=", "self", ".", "origins", ".", "shape", "[", "0", "]", "# Set the number of accounts in each cluster to be the same", "# as for the nearest neighbor solution", "demand", "=", "self", ".", "nearest_targets", ".", "groupby", "(", "'origin_id'", ")", "[", "'geometry'", "]", ".", "count", "(", ")", ".", "to_dict", "(", ")", "# Set up the graph so we can extract and initialize the node labels.", "# For each iteration, we're going to sort all our data by their origin", "# label assignments in order to properly index our nodes.", "self", ".", "targets", "=", "self", ".", "targets", ".", "sort_values", "(", "'labels'", ")", ".", "reset_index", "(", "drop", "=", "True", ")", "# Add target nodes", "g", "=", "nx", ".", "DiGraph", "(", ")", "g", ".", "add_nodes_from", "(", "self", ".", "targets", "[", "'target_id'", "]", ",", "demand", "=", "-", "1", ")", "# Add origin nodes", "for", "idx", "in", "demand", ":", "g", ".", "add_node", "(", "int", "(", "idx", ")", ",", "demand", "=", "demand", "[", "idx", "]", ")", "# Dictionary of labels (corresponding to the sales rep) for", "# each med center node.", "dict_M", "=", "{", "i", ":", "(", "self", ".", "targets", "[", "self", ".", "targets", "[", "'target_id'", "]", "==", "i", "]", "[", "'labels'", "]", ".", "values", "if", "i", "in", "self", ".", "targets", ".", "target_id", ".", "values", "else", "np", ".", "array", "(", "[", "demand", "[", "i", "]", "]", ")", ")", "for", "i", "in", "g", ".", "nodes", "}", "logging", ".", "info", "(", "'Graph and demand dictionary created'", ")", "return", "dict_M", ",", "demand" ]
create demand graph
[ "create", "demand", "graph" ]
53bcf7c8946f5d216d1ceccf55f9f339125b8205
https://github.com/andy-esch/sqterritory/blob/53bcf7c8946f5d216d1ceccf55f9f339125b8205/sqterritory/territory.py#L104-L137
242,199
andy-esch/sqterritory
sqterritory/territory.py
MinCostFlow.results_to_table
def results_to_table(self): """Process self.results and send to carto table""" # Get Labels baseline_labels = self.nearest_targets['origin_id'].values mcf_labels = self.results['model_labels']['labels'].values # Create the outcomes outcome = pd.DataFrame({ 'the_geom': [ 'SRID=4326;Point({lng} {lat})'.format(lng=v[0], lat=v[1]) for v in zip(self.results['model_labels']['lng'].values, self.results['model_labels']['lat'].values)], 'target_lng': self.results['model_labels']['lng'].values, 'target_lng': self.results['model_labels']['lat'].values, 'origin_lng': self.origins.reindex(baseline_labels)['lng'].values, 'origin_lat': self.origins.reindex(baseline_labels)['lat'].values, 'target_id': self.results['model_labels'].target_id, 'sales': self.results['model_labels'][self.demand_col].values, 'labels': baseline_labels } ) outcomes2 = pd.DataFrame({ 'the_geom': [ 'SRID=4326;Point({lng} {lat})'.format(lng=v[0], lat=v[1]) for v in zip( self.results['model_labels']['lng'].values, self.results['model_labels']['lat'].values ) ], 'target_lng': self.results['model_labels']['lng'].values, 'target_lat': self.results['model_labels']['lat'].values, 'origin_lng': self.origins.reindex(mcf_labels)['lng'].values, 'origin_lat': self.origins.reindex(mcf_labels)['lat'].values, 'target_id': self.results['model_labels'].target_id, 'sales': self.results['model_labels'][self.demand_col].values, 'labels': mcf_labels }, index=self.results['model_labels'].target_id ) now = datetime.datetime.now() out_table = 'mincostflow_{}'.format(now.strftime("%Y_%m_%d_%H_%M_%S")) logging.info('Writing output to {}'.format(out_table)) self.context.write(outcomes2.reset_index(drop=True), out_table) logging.info('Table {} written to CARTO'.format(out_table)) return out_table
python
def results_to_table(self): """Process self.results and send to carto table""" # Get Labels baseline_labels = self.nearest_targets['origin_id'].values mcf_labels = self.results['model_labels']['labels'].values # Create the outcomes outcome = pd.DataFrame({ 'the_geom': [ 'SRID=4326;Point({lng} {lat})'.format(lng=v[0], lat=v[1]) for v in zip(self.results['model_labels']['lng'].values, self.results['model_labels']['lat'].values)], 'target_lng': self.results['model_labels']['lng'].values, 'target_lng': self.results['model_labels']['lat'].values, 'origin_lng': self.origins.reindex(baseline_labels)['lng'].values, 'origin_lat': self.origins.reindex(baseline_labels)['lat'].values, 'target_id': self.results['model_labels'].target_id, 'sales': self.results['model_labels'][self.demand_col].values, 'labels': baseline_labels } ) outcomes2 = pd.DataFrame({ 'the_geom': [ 'SRID=4326;Point({lng} {lat})'.format(lng=v[0], lat=v[1]) for v in zip( self.results['model_labels']['lng'].values, self.results['model_labels']['lat'].values ) ], 'target_lng': self.results['model_labels']['lng'].values, 'target_lat': self.results['model_labels']['lat'].values, 'origin_lng': self.origins.reindex(mcf_labels)['lng'].values, 'origin_lat': self.origins.reindex(mcf_labels)['lat'].values, 'target_id': self.results['model_labels'].target_id, 'sales': self.results['model_labels'][self.demand_col].values, 'labels': mcf_labels }, index=self.results['model_labels'].target_id ) now = datetime.datetime.now() out_table = 'mincostflow_{}'.format(now.strftime("%Y_%m_%d_%H_%M_%S")) logging.info('Writing output to {}'.format(out_table)) self.context.write(outcomes2.reset_index(drop=True), out_table) logging.info('Table {} written to CARTO'.format(out_table)) return out_table
[ "def", "results_to_table", "(", "self", ")", ":", "# Get Labels", "baseline_labels", "=", "self", ".", "nearest_targets", "[", "'origin_id'", "]", ".", "values", "mcf_labels", "=", "self", ".", "results", "[", "'model_labels'", "]", "[", "'labels'", "]", ".", "values", "# Create the outcomes", "outcome", "=", "pd", ".", "DataFrame", "(", "{", "'the_geom'", ":", "[", "'SRID=4326;Point({lng} {lat})'", ".", "format", "(", "lng", "=", "v", "[", "0", "]", ",", "lat", "=", "v", "[", "1", "]", ")", "for", "v", "in", "zip", "(", "self", ".", "results", "[", "'model_labels'", "]", "[", "'lng'", "]", ".", "values", ",", "self", ".", "results", "[", "'model_labels'", "]", "[", "'lat'", "]", ".", "values", ")", "]", ",", "'target_lng'", ":", "self", ".", "results", "[", "'model_labels'", "]", "[", "'lng'", "]", ".", "values", ",", "'target_lng'", ":", "self", ".", "results", "[", "'model_labels'", "]", "[", "'lat'", "]", ".", "values", ",", "'origin_lng'", ":", "self", ".", "origins", ".", "reindex", "(", "baseline_labels", ")", "[", "'lng'", "]", ".", "values", ",", "'origin_lat'", ":", "self", ".", "origins", ".", "reindex", "(", "baseline_labels", ")", "[", "'lat'", "]", ".", "values", ",", "'target_id'", ":", "self", ".", "results", "[", "'model_labels'", "]", ".", "target_id", ",", "'sales'", ":", "self", ".", "results", "[", "'model_labels'", "]", "[", "self", ".", "demand_col", "]", ".", "values", ",", "'labels'", ":", "baseline_labels", "}", ")", "outcomes2", "=", "pd", ".", "DataFrame", "(", "{", "'the_geom'", ":", "[", "'SRID=4326;Point({lng} {lat})'", ".", "format", "(", "lng", "=", "v", "[", "0", "]", ",", "lat", "=", "v", "[", "1", "]", ")", "for", "v", "in", "zip", "(", "self", ".", "results", "[", "'model_labels'", "]", "[", "'lng'", "]", ".", "values", ",", "self", ".", "results", "[", "'model_labels'", "]", "[", "'lat'", "]", ".", "values", ")", "]", ",", "'target_lng'", ":", "self", ".", "results", "[", "'model_labels'", "]", "[", "'lng'", "]", ".", "values", ",", "'target_lat'", ":", "self", ".", "results", "[", "'model_labels'", "]", "[", "'lat'", "]", ".", "values", ",", "'origin_lng'", ":", "self", ".", "origins", ".", "reindex", "(", "mcf_labels", ")", "[", "'lng'", "]", ".", "values", ",", "'origin_lat'", ":", "self", ".", "origins", ".", "reindex", "(", "mcf_labels", ")", "[", "'lat'", "]", ".", "values", ",", "'target_id'", ":", "self", ".", "results", "[", "'model_labels'", "]", ".", "target_id", ",", "'sales'", ":", "self", ".", "results", "[", "'model_labels'", "]", "[", "self", ".", "demand_col", "]", ".", "values", ",", "'labels'", ":", "mcf_labels", "}", ",", "index", "=", "self", ".", "results", "[", "'model_labels'", "]", ".", "target_id", ")", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "out_table", "=", "'mincostflow_{}'", ".", "format", "(", "now", ".", "strftime", "(", "\"%Y_%m_%d_%H_%M_%S\"", ")", ")", "logging", ".", "info", "(", "'Writing output to {}'", ".", "format", "(", "out_table", ")", ")", "self", ".", "context", ".", "write", "(", "outcomes2", ".", "reset_index", "(", "drop", "=", "True", ")", ",", "out_table", ")", "logging", ".", "info", "(", "'Table {} written to CARTO'", ".", "format", "(", "out_table", ")", ")", "return", "out_table" ]
Process self.results and send to carto table
[ "Process", "self", ".", "results", "and", "send", "to", "carto", "table" ]
53bcf7c8946f5d216d1ceccf55f9f339125b8205
https://github.com/andy-esch/sqterritory/blob/53bcf7c8946f5d216d1ceccf55f9f339125b8205/sqterritory/territory.py#L280-L324