repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
deep-compute/logagg
logagg/formatters.py
elasticsearch
def elasticsearch(line): ''' >>> import pprint >>> input_line = '[2017-08-30T06:27:19,158] [WARN ][o.e.m.j.JvmGcMonitorService] [Glsuj_2] [gc][296816] overhead, spent [1.2s] collecting in the last [1.3s]' >>> output_line = elasticsearch(input_line) >>> pprint.pprint(output_line) {'data': {'garbage_collector': 'gc', 'gc_count': 296816.0, 'level': 'WARN', 'message': 'o.e.m.j.JvmGcMonitorService', 'plugin': 'Glsuj_2', 'query_time_ms': 1200.0, 'resp_time_ms': 1300.0, 'timestamp': '2017-08-30T06:27:19,158'}, 'event': 'o.e.m.j.JvmGcMonitorService', 'level': 'WARN ', 'timestamp': '2017-08-30T06:27:19,158', 'type': 'metric'} Case 2: [2017-09-13T23:15:00,415][WARN ][o.e.i.e.Engine ] [Glsuj_2] [filebeat-2017.09.09][3] failed engine [index] java.nio.file.FileSystemException: /home/user/elasticsearch/data/nodes/0/indices/jsVSO6f3Rl-wwBpQyNRCbQ/3/index/_0.fdx: Too many open files at sun.nio.fs.UnixException.translateToIOException(UnixException.java:91) ~[?:?] ''' # TODO we need to handle case2 logs elasticsearch_log = line actuallog = re.findall(r'(\[\d+\-+\d+\d+\-+\d+\w+\d+:\d+:\d+,+\d\d\d+\].*)', elasticsearch_log) if len(actuallog) == 1: keys = ['timestamp','level','message','plugin','garbage_collector','gc_count','query_time_ms', 'resp_time_ms'] values = re.findall(r'\[(.*?)\]', actuallog[0]) for index, i in enumerate(values): if not isinstance(i, str): continue if len(re.findall(r'.*ms$', i)) > 0 and 'ms' in re.findall(r'.*ms$', i)[0]: num = re.split('ms', i)[0] values[index] = float(num) continue if len(re.findall(r'.*s$', i)) > 0 and 's' in re.findall(r'.*s$', i)[0]: num = re.split('s', i)[0] values[index] = float(num) * 1000 continue data = dict(zip(keys,values)) if 'level' in data and data['level'][-1] == ' ': data['level'] = data['level'][:-1] if 'gc_count' in data: data['gc_count'] = float(data['gc_count']) event = data['message'] level=values[1] timestamp=values[0] return dict( timestamp=timestamp, level=level, type='metric', data=data, event=event ) else: return dict( timestamp=datetime.datetime.isoformat(datetime.datetime.now()), data={'raw': line} )
python
def elasticsearch(line): ''' >>> import pprint >>> input_line = '[2017-08-30T06:27:19,158] [WARN ][o.e.m.j.JvmGcMonitorService] [Glsuj_2] [gc][296816] overhead, spent [1.2s] collecting in the last [1.3s]' >>> output_line = elasticsearch(input_line) >>> pprint.pprint(output_line) {'data': {'garbage_collector': 'gc', 'gc_count': 296816.0, 'level': 'WARN', 'message': 'o.e.m.j.JvmGcMonitorService', 'plugin': 'Glsuj_2', 'query_time_ms': 1200.0, 'resp_time_ms': 1300.0, 'timestamp': '2017-08-30T06:27:19,158'}, 'event': 'o.e.m.j.JvmGcMonitorService', 'level': 'WARN ', 'timestamp': '2017-08-30T06:27:19,158', 'type': 'metric'} Case 2: [2017-09-13T23:15:00,415][WARN ][o.e.i.e.Engine ] [Glsuj_2] [filebeat-2017.09.09][3] failed engine [index] java.nio.file.FileSystemException: /home/user/elasticsearch/data/nodes/0/indices/jsVSO6f3Rl-wwBpQyNRCbQ/3/index/_0.fdx: Too many open files at sun.nio.fs.UnixException.translateToIOException(UnixException.java:91) ~[?:?] ''' # TODO we need to handle case2 logs elasticsearch_log = line actuallog = re.findall(r'(\[\d+\-+\d+\d+\-+\d+\w+\d+:\d+:\d+,+\d\d\d+\].*)', elasticsearch_log) if len(actuallog) == 1: keys = ['timestamp','level','message','plugin','garbage_collector','gc_count','query_time_ms', 'resp_time_ms'] values = re.findall(r'\[(.*?)\]', actuallog[0]) for index, i in enumerate(values): if not isinstance(i, str): continue if len(re.findall(r'.*ms$', i)) > 0 and 'ms' in re.findall(r'.*ms$', i)[0]: num = re.split('ms', i)[0] values[index] = float(num) continue if len(re.findall(r'.*s$', i)) > 0 and 's' in re.findall(r'.*s$', i)[0]: num = re.split('s', i)[0] values[index] = float(num) * 1000 continue data = dict(zip(keys,values)) if 'level' in data and data['level'][-1] == ' ': data['level'] = data['level'][:-1] if 'gc_count' in data: data['gc_count'] = float(data['gc_count']) event = data['message'] level=values[1] timestamp=values[0] return dict( timestamp=timestamp, level=level, type='metric', data=data, event=event ) else: return dict( timestamp=datetime.datetime.isoformat(datetime.datetime.now()), data={'raw': line} )
[ "def", "elasticsearch", "(", "line", ")", ":", "# TODO we need to handle case2 logs", "elasticsearch_log", "=", "line", "actuallog", "=", "re", ".", "findall", "(", "r'(\\[\\d+\\-+\\d+\\d+\\-+\\d+\\w+\\d+:\\d+:\\d+,+\\d\\d\\d+\\].*)'", ",", "elasticsearch_log", ")", "if", "len", "(", "actuallog", ")", "==", "1", ":", "keys", "=", "[", "'timestamp'", ",", "'level'", ",", "'message'", ",", "'plugin'", ",", "'garbage_collector'", ",", "'gc_count'", ",", "'query_time_ms'", ",", "'resp_time_ms'", "]", "values", "=", "re", ".", "findall", "(", "r'\\[(.*?)\\]'", ",", "actuallog", "[", "0", "]", ")", "for", "index", ",", "i", "in", "enumerate", "(", "values", ")", ":", "if", "not", "isinstance", "(", "i", ",", "str", ")", ":", "continue", "if", "len", "(", "re", ".", "findall", "(", "r'.*ms$'", ",", "i", ")", ")", ">", "0", "and", "'ms'", "in", "re", ".", "findall", "(", "r'.*ms$'", ",", "i", ")", "[", "0", "]", ":", "num", "=", "re", ".", "split", "(", "'ms'", ",", "i", ")", "[", "0", "]", "values", "[", "index", "]", "=", "float", "(", "num", ")", "continue", "if", "len", "(", "re", ".", "findall", "(", "r'.*s$'", ",", "i", ")", ")", ">", "0", "and", "'s'", "in", "re", ".", "findall", "(", "r'.*s$'", ",", "i", ")", "[", "0", "]", ":", "num", "=", "re", ".", "split", "(", "'s'", ",", "i", ")", "[", "0", "]", "values", "[", "index", "]", "=", "float", "(", "num", ")", "*", "1000", "continue", "data", "=", "dict", "(", "zip", "(", "keys", ",", "values", ")", ")", "if", "'level'", "in", "data", "and", "data", "[", "'level'", "]", "[", "-", "1", "]", "==", "' '", ":", "data", "[", "'level'", "]", "=", "data", "[", "'level'", "]", "[", ":", "-", "1", "]", "if", "'gc_count'", "in", "data", ":", "data", "[", "'gc_count'", "]", "=", "float", "(", "data", "[", "'gc_count'", "]", ")", "event", "=", "data", "[", "'message'", "]", "level", "=", "values", "[", "1", "]", "timestamp", "=", "values", "[", "0", "]", "return", "dict", "(", "timestamp", "=", "timestamp", ",", "level", "=", "level", ",", "type", "=", "'metric'", ",", "data", "=", "data", ",", "event", "=", "event", ")", "else", ":", "return", "dict", "(", "timestamp", "=", "datetime", ".", "datetime", ".", "isoformat", "(", "datetime", ".", "datetime", ".", "now", "(", ")", ")", ",", "data", "=", "{", "'raw'", ":", "line", "}", ")" ]
>>> import pprint >>> input_line = '[2017-08-30T06:27:19,158] [WARN ][o.e.m.j.JvmGcMonitorService] [Glsuj_2] [gc][296816] overhead, spent [1.2s] collecting in the last [1.3s]' >>> output_line = elasticsearch(input_line) >>> pprint.pprint(output_line) {'data': {'garbage_collector': 'gc', 'gc_count': 296816.0, 'level': 'WARN', 'message': 'o.e.m.j.JvmGcMonitorService', 'plugin': 'Glsuj_2', 'query_time_ms': 1200.0, 'resp_time_ms': 1300.0, 'timestamp': '2017-08-30T06:27:19,158'}, 'event': 'o.e.m.j.JvmGcMonitorService', 'level': 'WARN ', 'timestamp': '2017-08-30T06:27:19,158', 'type': 'metric'} Case 2: [2017-09-13T23:15:00,415][WARN ][o.e.i.e.Engine ] [Glsuj_2] [filebeat-2017.09.09][3] failed engine [index] java.nio.file.FileSystemException: /home/user/elasticsearch/data/nodes/0/indices/jsVSO6f3Rl-wwBpQyNRCbQ/3/index/_0.fdx: Too many open files at sun.nio.fs.UnixException.translateToIOException(UnixException.java:91) ~[?:?]
[ ">>>", "import", "pprint", ">>>", "input_line", "=", "[", "2017", "-", "08", "-", "30T06", ":", "27", ":", "19", "158", "]", "[", "WARN", "]", "[", "o", ".", "e", ".", "m", ".", "j", ".", "JvmGcMonitorService", "]", "[", "Glsuj_2", "]", "[", "gc", "]", "[", "296816", "]", "overhead", "spent", "[", "1", ".", "2s", "]", "collecting", "in", "the", "last", "[", "1", ".", "3s", "]", ">>>", "output_line", "=", "elasticsearch", "(", "input_line", ")", ">>>", "pprint", ".", "pprint", "(", "output_line", ")", "{", "data", ":", "{", "garbage_collector", ":", "gc", "gc_count", ":", "296816", ".", "0", "level", ":", "WARN", "message", ":", "o", ".", "e", ".", "m", ".", "j", ".", "JvmGcMonitorService", "plugin", ":", "Glsuj_2", "query_time_ms", ":", "1200", ".", "0", "resp_time_ms", ":", "1300", ".", "0", "timestamp", ":", "2017", "-", "08", "-", "30T06", ":", "27", ":", "19", "158", "}", "event", ":", "o", ".", "e", ".", "m", ".", "j", ".", "JvmGcMonitorService", "level", ":", "WARN", "timestamp", ":", "2017", "-", "08", "-", "30T06", ":", "27", ":", "19", "158", "type", ":", "metric", "}" ]
train
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/formatters.py#L306-L370
deep-compute/logagg
logagg/formatters.py
elasticsearch_ispartial_log
def elasticsearch_ispartial_log(line): ''' >>> line1 = ' [2018-04-03T00:22:38,048][DEBUG][o.e.c.u.c.QueueResizingEsThreadPoolExecutor] [search17/search]: there were [2000] tasks in [809ms], avg task time [28.4micros], EWMA task execution [790nanos], [35165.36 tasks/s], optimal queue is [35165], current capacity [1000]' >>> line2 = ' org.elasticsearch.ResourceAlreadyExistsException: index [media_corpus_refresh/6_3sRAMsRr2r63J6gbOjQw] already exists' >>> line3 = ' at org.elasticsearch.cluster.metadata.MetaDataCreateIndexService.validateIndexName(MetaDataCreateIndexService.java:151) ~[elasticsearch-6.2.0.jar:6.2.0]' >>> elasticsearch_ispartial_log(line1) False >>> elasticsearch_ispartial_log(line2) True >>> elasticsearch_ispartial_log(line3) True ''' match_result = [] for p in LOG_BEGIN_PATTERN: if re.match(p, line) != None: return False return True
python
def elasticsearch_ispartial_log(line): ''' >>> line1 = ' [2018-04-03T00:22:38,048][DEBUG][o.e.c.u.c.QueueResizingEsThreadPoolExecutor] [search17/search]: there were [2000] tasks in [809ms], avg task time [28.4micros], EWMA task execution [790nanos], [35165.36 tasks/s], optimal queue is [35165], current capacity [1000]' >>> line2 = ' org.elasticsearch.ResourceAlreadyExistsException: index [media_corpus_refresh/6_3sRAMsRr2r63J6gbOjQw] already exists' >>> line3 = ' at org.elasticsearch.cluster.metadata.MetaDataCreateIndexService.validateIndexName(MetaDataCreateIndexService.java:151) ~[elasticsearch-6.2.0.jar:6.2.0]' >>> elasticsearch_ispartial_log(line1) False >>> elasticsearch_ispartial_log(line2) True >>> elasticsearch_ispartial_log(line3) True ''' match_result = [] for p in LOG_BEGIN_PATTERN: if re.match(p, line) != None: return False return True
[ "def", "elasticsearch_ispartial_log", "(", "line", ")", ":", "match_result", "=", "[", "]", "for", "p", "in", "LOG_BEGIN_PATTERN", ":", "if", "re", ".", "match", "(", "p", ",", "line", ")", "!=", "None", ":", "return", "False", "return", "True" ]
>>> line1 = ' [2018-04-03T00:22:38,048][DEBUG][o.e.c.u.c.QueueResizingEsThreadPoolExecutor] [search17/search]: there were [2000] tasks in [809ms], avg task time [28.4micros], EWMA task execution [790nanos], [35165.36 tasks/s], optimal queue is [35165], current capacity [1000]' >>> line2 = ' org.elasticsearch.ResourceAlreadyExistsException: index [media_corpus_refresh/6_3sRAMsRr2r63J6gbOjQw] already exists' >>> line3 = ' at org.elasticsearch.cluster.metadata.MetaDataCreateIndexService.validateIndexName(MetaDataCreateIndexService.java:151) ~[elasticsearch-6.2.0.jar:6.2.0]' >>> elasticsearch_ispartial_log(line1) False >>> elasticsearch_ispartial_log(line2) True >>> elasticsearch_ispartial_log(line3) True
[ ">>>", "line1", "=", "[", "2018", "-", "04", "-", "03T00", ":", "22", ":", "38", "048", "]", "[", "DEBUG", "]", "[", "o", ".", "e", ".", "c", ".", "u", ".", "c", ".", "QueueResizingEsThreadPoolExecutor", "]", "[", "search17", "/", "search", "]", ":", "there", "were", "[", "2000", "]", "tasks", "in", "[", "809ms", "]", "avg", "task", "time", "[", "28", ".", "4micros", "]", "EWMA", "task", "execution", "[", "790nanos", "]", "[", "35165", ".", "36", "tasks", "/", "s", "]", "optimal", "queue", "is", "[", "35165", "]", "current", "capacity", "[", "1000", "]", ">>>", "line2", "=", "org", ".", "elasticsearch", ".", "ResourceAlreadyExistsException", ":", "index", "[", "media_corpus_refresh", "/", "6_3sRAMsRr2r63J6gbOjQw", "]", "already", "exists", ">>>", "line3", "=", "at", "org", ".", "elasticsearch", ".", "cluster", ".", "metadata", ".", "MetaDataCreateIndexService", ".", "validateIndexName", "(", "MetaDataCreateIndexService", ".", "java", ":", "151", ")", "~", "[", "elasticsearch", "-", "6", ".", "2", ".", "0", ".", "jar", ":", "6", ".", "2", ".", "0", "]", ">>>", "elasticsearch_ispartial_log", "(", "line1", ")", "False", ">>>", "elasticsearch_ispartial_log", "(", "line2", ")", "True", ">>>", "elasticsearch_ispartial_log", "(", "line3", ")", "True" ]
train
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/formatters.py#L374-L391
erik/alexandra
alexandra/session.py
Session.get
def get(self, attr, default=None): """Get an attribute defined by this session""" attrs = self.body.get('attributes') or {} return attrs.get(attr, default)
python
def get(self, attr, default=None): """Get an attribute defined by this session""" attrs = self.body.get('attributes') or {} return attrs.get(attr, default)
[ "def", "get", "(", "self", ",", "attr", ",", "default", "=", "None", ")", ":", "attrs", "=", "self", ".", "body", ".", "get", "(", "'attributes'", ")", "or", "{", "}", "return", "attrs", ".", "get", "(", "attr", ",", "default", ")" ]
Get an attribute defined by this session
[ "Get", "an", "attribute", "defined", "by", "this", "session" ]
train
https://github.com/erik/alexandra/blob/8bea94efa1af465254a553dc4dfea3fa552b18da/alexandra/session.py#L38-L42
zetaops/zengine
zengine/lib/catalog_data.py
CatalogData.get_all
def get_all(self, cat): """ if data can't found in cache then it will be fetched from db, parsed and stored to cache for each lang_code. :param cat: cat of catalog data :return: """ return self._get_from_local_cache(cat) or self._get_from_cache(cat) or self._get_from_db(cat)
python
def get_all(self, cat): """ if data can't found in cache then it will be fetched from db, parsed and stored to cache for each lang_code. :param cat: cat of catalog data :return: """ return self._get_from_local_cache(cat) or self._get_from_cache(cat) or self._get_from_db(cat)
[ "def", "get_all", "(", "self", ",", "cat", ")", ":", "return", "self", ".", "_get_from_local_cache", "(", "cat", ")", "or", "self", ".", "_get_from_cache", "(", "cat", ")", "or", "self", ".", "_get_from_db", "(", "cat", ")" ]
if data can't found in cache then it will be fetched from db, parsed and stored to cache for each lang_code. :param cat: cat of catalog data :return:
[ "if", "data", "can", "t", "found", "in", "cache", "then", "it", "will", "be", "fetched", "from", "db", "parsed", "and", "stored", "to", "cache", "for", "each", "lang_code", "." ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/lib/catalog_data.py#L58-L66
zetaops/zengine
zengine/lib/catalog_data.py
CatalogData._fill_get_item_cache
def _fill_get_item_cache(self, catalog, key): """ get from redis, cache locally then return :param catalog: catalog name :param key: :return: """ lang = self._get_lang() keylist = self.get_all(catalog) self.ITEM_CACHE[lang][catalog] = dict([(i['value'], i['name']) for i in keylist]) return self.ITEM_CACHE[lang][catalog].get(key)
python
def _fill_get_item_cache(self, catalog, key): """ get from redis, cache locally then return :param catalog: catalog name :param key: :return: """ lang = self._get_lang() keylist = self.get_all(catalog) self.ITEM_CACHE[lang][catalog] = dict([(i['value'], i['name']) for i in keylist]) return self.ITEM_CACHE[lang][catalog].get(key)
[ "def", "_fill_get_item_cache", "(", "self", ",", "catalog", ",", "key", ")", ":", "lang", "=", "self", ".", "_get_lang", "(", ")", "keylist", "=", "self", ".", "get_all", "(", "catalog", ")", "self", ".", "ITEM_CACHE", "[", "lang", "]", "[", "catalog", "]", "=", "dict", "(", "[", "(", "i", "[", "'value'", "]", ",", "i", "[", "'name'", "]", ")", "for", "i", "in", "keylist", "]", ")", "return", "self", ".", "ITEM_CACHE", "[", "lang", "]", "[", "catalog", "]", ".", "get", "(", "key", ")" ]
get from redis, cache locally then return :param catalog: catalog name :param key: :return:
[ "get", "from", "redis", "cache", "locally", "then", "return" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/lib/catalog_data.py#L88-L99
erik/alexandra
alexandra/app.py
Application.run
def run(self, host, port, debug=True, validate_requests=True): """Utility method to quickly get a server up and running. :param debug: turns on Werkzeug debugger, code reloading, and full logging. :param validate_requests: whether or not to ensure that requests are sent by Amazon. This can be usefulfor manually testing the server. """ if debug: # Turn on all alexandra log output logging.basicConfig(level=logging.DEBUG) app = self.create_wsgi_app(validate_requests) run_simple(host, port, app, use_reloader=debug, use_debugger=debug)
python
def run(self, host, port, debug=True, validate_requests=True): """Utility method to quickly get a server up and running. :param debug: turns on Werkzeug debugger, code reloading, and full logging. :param validate_requests: whether or not to ensure that requests are sent by Amazon. This can be usefulfor manually testing the server. """ if debug: # Turn on all alexandra log output logging.basicConfig(level=logging.DEBUG) app = self.create_wsgi_app(validate_requests) run_simple(host, port, app, use_reloader=debug, use_debugger=debug)
[ "def", "run", "(", "self", ",", "host", ",", "port", ",", "debug", "=", "True", ",", "validate_requests", "=", "True", ")", ":", "if", "debug", ":", "# Turn on all alexandra log output", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "DEBUG", ")", "app", "=", "self", ".", "create_wsgi_app", "(", "validate_requests", ")", "run_simple", "(", "host", ",", "port", ",", "app", ",", "use_reloader", "=", "debug", ",", "use_debugger", "=", "debug", ")" ]
Utility method to quickly get a server up and running. :param debug: turns on Werkzeug debugger, code reloading, and full logging. :param validate_requests: whether or not to ensure that requests are sent by Amazon. This can be usefulfor manually testing the server.
[ "Utility", "method", "to", "quickly", "get", "a", "server", "up", "and", "running", "." ]
train
https://github.com/erik/alexandra/blob/8bea94efa1af465254a553dc4dfea3fa552b18da/alexandra/app.py#L26-L40
erik/alexandra
alexandra/app.py
Application.dispatch_request
def dispatch_request(self, body): """Given a parsed JSON request object, call the correct Intent, Launch, or SessionEnded function. This function is called after request parsing and validaion and will raise a `ValueError` if an unknown request type comes in. :param body: JSON object loaded from incoming request's POST data. """ req_type = body.get('request', {}).get('type') session_obj = body.get('session') session = Session(session_obj) if session_obj else None if req_type == 'LaunchRequest': return self.launch_fn(session) elif req_type == 'IntentRequest': intent = body['request']['intent']['name'] intent_fn = self.intent_map.get(intent, self.unknown_intent_fn) slots = { slot['name']: slot.get('value') for _, slot in body['request']['intent'].get('slots', {}).items() } arity = intent_fn.__code__.co_argcount if arity == 2: return intent_fn(slots, session) return intent_fn() elif req_type == 'SessionEndedRequest': return self.session_end_fn() log.error('invalid request type: %s', req_type) raise ValueError('bad request: %s', body)
python
def dispatch_request(self, body): """Given a parsed JSON request object, call the correct Intent, Launch, or SessionEnded function. This function is called after request parsing and validaion and will raise a `ValueError` if an unknown request type comes in. :param body: JSON object loaded from incoming request's POST data. """ req_type = body.get('request', {}).get('type') session_obj = body.get('session') session = Session(session_obj) if session_obj else None if req_type == 'LaunchRequest': return self.launch_fn(session) elif req_type == 'IntentRequest': intent = body['request']['intent']['name'] intent_fn = self.intent_map.get(intent, self.unknown_intent_fn) slots = { slot['name']: slot.get('value') for _, slot in body['request']['intent'].get('slots', {}).items() } arity = intent_fn.__code__.co_argcount if arity == 2: return intent_fn(slots, session) return intent_fn() elif req_type == 'SessionEndedRequest': return self.session_end_fn() log.error('invalid request type: %s', req_type) raise ValueError('bad request: %s', body)
[ "def", "dispatch_request", "(", "self", ",", "body", ")", ":", "req_type", "=", "body", ".", "get", "(", "'request'", ",", "{", "}", ")", ".", "get", "(", "'type'", ")", "session_obj", "=", "body", ".", "get", "(", "'session'", ")", "session", "=", "Session", "(", "session_obj", ")", "if", "session_obj", "else", "None", "if", "req_type", "==", "'LaunchRequest'", ":", "return", "self", ".", "launch_fn", "(", "session", ")", "elif", "req_type", "==", "'IntentRequest'", ":", "intent", "=", "body", "[", "'request'", "]", "[", "'intent'", "]", "[", "'name'", "]", "intent_fn", "=", "self", ".", "intent_map", ".", "get", "(", "intent", ",", "self", ".", "unknown_intent_fn", ")", "slots", "=", "{", "slot", "[", "'name'", "]", ":", "slot", ".", "get", "(", "'value'", ")", "for", "_", ",", "slot", "in", "body", "[", "'request'", "]", "[", "'intent'", "]", ".", "get", "(", "'slots'", ",", "{", "}", ")", ".", "items", "(", ")", "}", "arity", "=", "intent_fn", ".", "__code__", ".", "co_argcount", "if", "arity", "==", "2", ":", "return", "intent_fn", "(", "slots", ",", "session", ")", "return", "intent_fn", "(", ")", "elif", "req_type", "==", "'SessionEndedRequest'", ":", "return", "self", ".", "session_end_fn", "(", ")", "log", ".", "error", "(", "'invalid request type: %s'", ",", "req_type", ")", "raise", "ValueError", "(", "'bad request: %s'", ",", "body", ")" ]
Given a parsed JSON request object, call the correct Intent, Launch, or SessionEnded function. This function is called after request parsing and validaion and will raise a `ValueError` if an unknown request type comes in. :param body: JSON object loaded from incoming request's POST data.
[ "Given", "a", "parsed", "JSON", "request", "object", "call", "the", "correct", "Intent", "Launch", "or", "SessionEnded", "function", "." ]
train
https://github.com/erik/alexandra/blob/8bea94efa1af465254a553dc4dfea3fa552b18da/alexandra/app.py#L42-L81
erik/alexandra
alexandra/app.py
Application.intent
def intent(self, intent_name): """Decorator to register a handler for the given intent. The decorated function can either take 0 or 2 arguments. If two are specified, it will be provided a dictionary of `{slot_name: value}` and a :py:class:`alexandra.session.Session` instance. If no session was provided in the request, the session object will be `None`. :: @alexa_app.intent('FooBarBaz') def foo_bar_baz_intent(slots, session): pass @alexa_app.intent('NoArgs') def noargs_intent(): pass """ # nested decorator so we can have params. def _decorator(func): arity = func.__code__.co_argcount if arity not in [0, 2]: raise ValueError("expected 0 or 2 argument function") self.intent_map[intent_name] = func return func return _decorator
python
def intent(self, intent_name): """Decorator to register a handler for the given intent. The decorated function can either take 0 or 2 arguments. If two are specified, it will be provided a dictionary of `{slot_name: value}` and a :py:class:`alexandra.session.Session` instance. If no session was provided in the request, the session object will be `None`. :: @alexa_app.intent('FooBarBaz') def foo_bar_baz_intent(slots, session): pass @alexa_app.intent('NoArgs') def noargs_intent(): pass """ # nested decorator so we can have params. def _decorator(func): arity = func.__code__.co_argcount if arity not in [0, 2]: raise ValueError("expected 0 or 2 argument function") self.intent_map[intent_name] = func return func return _decorator
[ "def", "intent", "(", "self", ",", "intent_name", ")", ":", "# nested decorator so we can have params.", "def", "_decorator", "(", "func", ")", ":", "arity", "=", "func", ".", "__code__", ".", "co_argcount", "if", "arity", "not", "in", "[", "0", ",", "2", "]", ":", "raise", "ValueError", "(", "\"expected 0 or 2 argument function\"", ")", "self", ".", "intent_map", "[", "intent_name", "]", "=", "func", "return", "func", "return", "_decorator" ]
Decorator to register a handler for the given intent. The decorated function can either take 0 or 2 arguments. If two are specified, it will be provided a dictionary of `{slot_name: value}` and a :py:class:`alexandra.session.Session` instance. If no session was provided in the request, the session object will be `None`. :: @alexa_app.intent('FooBarBaz') def foo_bar_baz_intent(slots, session): pass @alexa_app.intent('NoArgs') def noargs_intent(): pass
[ "Decorator", "to", "register", "a", "handler", "for", "the", "given", "intent", "." ]
train
https://github.com/erik/alexandra/blob/8bea94efa1af465254a553dc4dfea3fa552b18da/alexandra/app.py#L96-L125
zetaops/zengine
zengine/messaging/lib.py
BaseUser.set_password
def set_password(self, raw_password): """ Kullanıcı şifresini encrypt ederek set eder. Args: raw_password (str) """ self.password = pbkdf2_sha512.encrypt(raw_password, rounds=10000, salt_size=10)
python
def set_password(self, raw_password): """ Kullanıcı şifresini encrypt ederek set eder. Args: raw_password (str) """ self.password = pbkdf2_sha512.encrypt(raw_password, rounds=10000, salt_size=10)
[ "def", "set_password", "(", "self", ",", "raw_password", ")", ":", "self", ".", "password", "=", "pbkdf2_sha512", ".", "encrypt", "(", "raw_password", ",", "rounds", "=", "10000", ",", "salt_size", "=", "10", ")" ]
Kullanıcı şifresini encrypt ederek set eder. Args: raw_password (str)
[ "Kullanıcı", "şifresini", "encrypt", "ederek", "set", "eder", "." ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/messaging/lib.py#L56-L64
zetaops/zengine
zengine/messaging/lib.py
BaseUser.encrypt_password
def encrypt_password(self): """ encrypt password if not already encrypted """ if self.password and not self.password.startswith('$pbkdf2'): self.set_password(self.password)
python
def encrypt_password(self): """ encrypt password if not already encrypted """ if self.password and not self.password.startswith('$pbkdf2'): self.set_password(self.password)
[ "def", "encrypt_password", "(", "self", ")", ":", "if", "self", ".", "password", "and", "not", "self", ".", "password", ".", "startswith", "(", "'$pbkdf2'", ")", ":", "self", ".", "set_password", "(", "self", ".", "password", ")" ]
encrypt password if not already encrypted
[ "encrypt", "password", "if", "not", "already", "encrypted" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/messaging/lib.py#L89-L92
zetaops/zengine
zengine/messaging/lib.py
BaseUser.send_notification
def send_notification(self, title, message, typ=1, url=None, sender=None): """ sends message to users private mq exchange Args: title: message: sender: url: typ: """ self.created_channels.channel.add_message( channel_key=self.prv_exchange, body=message, title=title, typ=typ, url=url, sender=sender, receiver=self )
python
def send_notification(self, title, message, typ=1, url=None, sender=None): """ sends message to users private mq exchange Args: title: message: sender: url: typ: """ self.created_channels.channel.add_message( channel_key=self.prv_exchange, body=message, title=title, typ=typ, url=url, sender=sender, receiver=self )
[ "def", "send_notification", "(", "self", ",", "title", ",", "message", ",", "typ", "=", "1", ",", "url", "=", "None", ",", "sender", "=", "None", ")", ":", "self", ".", "created_channels", ".", "channel", ".", "add_message", "(", "channel_key", "=", "self", ".", "prv_exchange", ",", "body", "=", "message", ",", "title", "=", "title", ",", "typ", "=", "typ", ",", "url", "=", "url", ",", "sender", "=", "sender", ",", "receiver", "=", "self", ")" ]
sends message to users private mq exchange Args: title: message: sender: url: typ:
[ "sends", "message", "to", "users", "private", "mq", "exchange", "Args", ":", "title", ":", "message", ":", "sender", ":", "url", ":", "typ", ":" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/messaging/lib.py#L161-L179
zetaops/zengine
zengine/messaging/lib.py
BaseUser.send_client_cmd
def send_client_cmd(self, data, cmd=None, via_queue=None): """ Send arbitrary cmd and data to client if queue name passed by "via_queue" parameter, that queue will be used instead of users private exchange. Args: data: dict cmd: string via_queue: queue name, """ mq_channel = self._connect_mq() if cmd: data['cmd'] = cmd if via_queue: mq_channel.basic_publish(exchange='', routing_key=via_queue, body=json.dumps(data)) else: mq_channel.basic_publish(exchange=self.prv_exchange, routing_key='', body=json.dumps(data))
python
def send_client_cmd(self, data, cmd=None, via_queue=None): """ Send arbitrary cmd and data to client if queue name passed by "via_queue" parameter, that queue will be used instead of users private exchange. Args: data: dict cmd: string via_queue: queue name, """ mq_channel = self._connect_mq() if cmd: data['cmd'] = cmd if via_queue: mq_channel.basic_publish(exchange='', routing_key=via_queue, body=json.dumps(data)) else: mq_channel.basic_publish(exchange=self.prv_exchange, routing_key='', body=json.dumps(data))
[ "def", "send_client_cmd", "(", "self", ",", "data", ",", "cmd", "=", "None", ",", "via_queue", "=", "None", ")", ":", "mq_channel", "=", "self", ".", "_connect_mq", "(", ")", "if", "cmd", ":", "data", "[", "'cmd'", "]", "=", "cmd", "if", "via_queue", ":", "mq_channel", ".", "basic_publish", "(", "exchange", "=", "''", ",", "routing_key", "=", "via_queue", ",", "body", "=", "json", ".", "dumps", "(", "data", ")", ")", "else", ":", "mq_channel", ".", "basic_publish", "(", "exchange", "=", "self", ".", "prv_exchange", ",", "routing_key", "=", "''", ",", "body", "=", "json", ".", "dumps", "(", "data", ")", ")" ]
Send arbitrary cmd and data to client if queue name passed by "via_queue" parameter, that queue will be used instead of users private exchange. Args: data: dict cmd: string via_queue: queue name,
[ "Send", "arbitrary", "cmd", "and", "data", "to", "client" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/messaging/lib.py#L181-L202
cimm-kzn/CGRtools
CGRtools/files/RDFrw.py
RDFread.seek
def seek(self, offset): """ shifts on a given number of record in the original file :param offset: number of record """ if self._shifts: if 0 <= offset < len(self._shifts): current_pos = self._file.tell() new_pos = self._shifts[offset] if current_pos != new_pos: if current_pos == self._shifts[-1]: # reached the end of the file self._data = self.__reader() self.__file = iter(self._file.readline, '') self._file.seek(0) next(self._data) if offset: # move not to the beginning of the file self._file.seek(new_pos) else: if not self.__already_seeked: if self._shifts[0] < current_pos: # in the middle of the file self._data.send(True) self.__already_seeked = True self._file.seek(new_pos) else: raise IndexError('invalid offset') else: raise self._implement_error
python
def seek(self, offset): """ shifts on a given number of record in the original file :param offset: number of record """ if self._shifts: if 0 <= offset < len(self._shifts): current_pos = self._file.tell() new_pos = self._shifts[offset] if current_pos != new_pos: if current_pos == self._shifts[-1]: # reached the end of the file self._data = self.__reader() self.__file = iter(self._file.readline, '') self._file.seek(0) next(self._data) if offset: # move not to the beginning of the file self._file.seek(new_pos) else: if not self.__already_seeked: if self._shifts[0] < current_pos: # in the middle of the file self._data.send(True) self.__already_seeked = True self._file.seek(new_pos) else: raise IndexError('invalid offset') else: raise self._implement_error
[ "def", "seek", "(", "self", ",", "offset", ")", ":", "if", "self", ".", "_shifts", ":", "if", "0", "<=", "offset", "<", "len", "(", "self", ".", "_shifts", ")", ":", "current_pos", "=", "self", ".", "_file", ".", "tell", "(", ")", "new_pos", "=", "self", ".", "_shifts", "[", "offset", "]", "if", "current_pos", "!=", "new_pos", ":", "if", "current_pos", "==", "self", ".", "_shifts", "[", "-", "1", "]", ":", "# reached the end of the file", "self", ".", "_data", "=", "self", ".", "__reader", "(", ")", "self", ".", "__file", "=", "iter", "(", "self", ".", "_file", ".", "readline", ",", "''", ")", "self", ".", "_file", ".", "seek", "(", "0", ")", "next", "(", "self", ".", "_data", ")", "if", "offset", ":", "# move not to the beginning of the file", "self", ".", "_file", ".", "seek", "(", "new_pos", ")", "else", ":", "if", "not", "self", ".", "__already_seeked", ":", "if", "self", ".", "_shifts", "[", "0", "]", "<", "current_pos", ":", "# in the middle of the file", "self", ".", "_data", ".", "send", "(", "True", ")", "self", ".", "__already_seeked", "=", "True", "self", ".", "_file", ".", "seek", "(", "new_pos", ")", "else", ":", "raise", "IndexError", "(", "'invalid offset'", ")", "else", ":", "raise", "self", ".", "_implement_error" ]
shifts on a given number of record in the original file :param offset: number of record
[ "shifts", "on", "a", "given", "number", "of", "record", "in", "the", "original", "file", ":", "param", "offset", ":", "number", "of", "record" ]
train
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/files/RDFrw.py#L68-L94
cimm-kzn/CGRtools
CGRtools/files/RDFrw.py
RDFread.tell
def tell(self): """ :return: number of records processed from the original file """ if self._shifts: t = self._file.tell() if t == self._shifts[0]: return 0 elif t == self._shifts[-1]: return len(self._shifts) - 1 elif t in self._shifts: return bisect_left(self._shifts, t) else: return bisect_left(self._shifts, t) - 1 raise self._implement_error
python
def tell(self): """ :return: number of records processed from the original file """ if self._shifts: t = self._file.tell() if t == self._shifts[0]: return 0 elif t == self._shifts[-1]: return len(self._shifts) - 1 elif t in self._shifts: return bisect_left(self._shifts, t) else: return bisect_left(self._shifts, t) - 1 raise self._implement_error
[ "def", "tell", "(", "self", ")", ":", "if", "self", ".", "_shifts", ":", "t", "=", "self", ".", "_file", ".", "tell", "(", ")", "if", "t", "==", "self", ".", "_shifts", "[", "0", "]", ":", "return", "0", "elif", "t", "==", "self", ".", "_shifts", "[", "-", "1", "]", ":", "return", "len", "(", "self", ".", "_shifts", ")", "-", "1", "elif", "t", "in", "self", ".", "_shifts", ":", "return", "bisect_left", "(", "self", ".", "_shifts", ",", "t", ")", "else", ":", "return", "bisect_left", "(", "self", ".", "_shifts", ",", "t", ")", "-", "1", "raise", "self", ".", "_implement_error" ]
:return: number of records processed from the original file
[ ":", "return", ":", "number", "of", "records", "processed", "from", "the", "original", "file" ]
train
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/files/RDFrw.py#L96-L110
zetaops/zengine
zengine/views/task_manager_actions.py
TaskManagerActionsView.assign_yourself
def assign_yourself(self): """ Assigning the workflow to itself. The selected job is checked to see if there is an assigned role. If it does not have a role assigned to it, it takes the job to itself and displays a message that the process is successful. If there is a role assigned to it, it does not do any operation and the message is displayed on the screen. .. code-block:: python # request: { 'task_inv_key': string, } """ task_invitation = TaskInvitation.objects.get(self.task_invitation_key) wfi = task_invitation.instance if not wfi.current_actor.exist: wfi.current_actor = self.current.role wfi.save() [inv.delete() for inv in TaskInvitation.objects.filter(instance=wfi) if not inv == task_invitation] title = _(u"Successful") msg = _(u"You have successfully assigned the job to yourself.") else: title = _(u"Unsuccessful") msg = _(u"Unfortunately, this job is already taken by someone else.") self.current.msg_box(title=title, msg=msg)
python
def assign_yourself(self): """ Assigning the workflow to itself. The selected job is checked to see if there is an assigned role. If it does not have a role assigned to it, it takes the job to itself and displays a message that the process is successful. If there is a role assigned to it, it does not do any operation and the message is displayed on the screen. .. code-block:: python # request: { 'task_inv_key': string, } """ task_invitation = TaskInvitation.objects.get(self.task_invitation_key) wfi = task_invitation.instance if not wfi.current_actor.exist: wfi.current_actor = self.current.role wfi.save() [inv.delete() for inv in TaskInvitation.objects.filter(instance=wfi) if not inv == task_invitation] title = _(u"Successful") msg = _(u"You have successfully assigned the job to yourself.") else: title = _(u"Unsuccessful") msg = _(u"Unfortunately, this job is already taken by someone else.") self.current.msg_box(title=title, msg=msg)
[ "def", "assign_yourself", "(", "self", ")", ":", "task_invitation", "=", "TaskInvitation", ".", "objects", ".", "get", "(", "self", ".", "task_invitation_key", ")", "wfi", "=", "task_invitation", ".", "instance", "if", "not", "wfi", ".", "current_actor", ".", "exist", ":", "wfi", ".", "current_actor", "=", "self", ".", "current", ".", "role", "wfi", ".", "save", "(", ")", "[", "inv", ".", "delete", "(", ")", "for", "inv", "in", "TaskInvitation", ".", "objects", ".", "filter", "(", "instance", "=", "wfi", ")", "if", "not", "inv", "==", "task_invitation", "]", "title", "=", "_", "(", "u\"Successful\"", ")", "msg", "=", "_", "(", "u\"You have successfully assigned the job to yourself.\"", ")", "else", ":", "title", "=", "_", "(", "u\"Unsuccessful\"", ")", "msg", "=", "_", "(", "u\"Unfortunately, this job is already taken by someone else.\"", ")", "self", ".", "current", ".", "msg_box", "(", "title", "=", "title", ",", "msg", "=", "msg", ")" ]
Assigning the workflow to itself. The selected job is checked to see if there is an assigned role. If it does not have a role assigned to it, it takes the job to itself and displays a message that the process is successful. If there is a role assigned to it, it does not do any operation and the message is displayed on the screen. .. code-block:: python # request: { 'task_inv_key': string, }
[ "Assigning", "the", "workflow", "to", "itself", ".", "The", "selected", "job", "is", "checked", "to", "see", "if", "there", "is", "an", "assigned", "role", ".", "If", "it", "does", "not", "have", "a", "role", "assigned", "to", "it", "it", "takes", "the", "job", "to", "itself", "and", "displays", "a", "message", "that", "the", "process", "is", "successful", ".", "If", "there", "is", "a", "role", "assigned", "to", "it", "it", "does", "not", "do", "any", "operation", "and", "the", "message", "is", "displayed", "on", "the", "screen", "." ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/views/task_manager_actions.py#L29-L60
zetaops/zengine
zengine/views/task_manager_actions.py
TaskManagerActionsView.select_role
def select_role(self): """ The workflow method to be assigned to the person with the same role and unit as the user. .. code-block:: python # request: { 'task_inv_key': string, } """ roles = [(m.key, m.__unicode__()) for m in RoleModel.objects.filter( abstract_role=self.current.role.abstract_role, unit=self.current.role.unit) if m != self.current.role] if roles: _form = forms.JsonForm(title=_(u'Assign to workflow')) _form.select_role = fields.Integer(_(u"Chose Role"), choices=roles) _form.explain_text = fields.String(_(u"Explain Text"), required=False) _form.send_button = fields.Button(_(u"Send")) self.form_out(_form) else: title = _(u"Unsuccessful") msg = _(u"Assign role not found") self.current.msg_box(title=title, msg=msg)
python
def select_role(self): """ The workflow method to be assigned to the person with the same role and unit as the user. .. code-block:: python # request: { 'task_inv_key': string, } """ roles = [(m.key, m.__unicode__()) for m in RoleModel.objects.filter( abstract_role=self.current.role.abstract_role, unit=self.current.role.unit) if m != self.current.role] if roles: _form = forms.JsonForm(title=_(u'Assign to workflow')) _form.select_role = fields.Integer(_(u"Chose Role"), choices=roles) _form.explain_text = fields.String(_(u"Explain Text"), required=False) _form.send_button = fields.Button(_(u"Send")) self.form_out(_form) else: title = _(u"Unsuccessful") msg = _(u"Assign role not found") self.current.msg_box(title=title, msg=msg)
[ "def", "select_role", "(", "self", ")", ":", "roles", "=", "[", "(", "m", ".", "key", ",", "m", ".", "__unicode__", "(", ")", ")", "for", "m", "in", "RoleModel", ".", "objects", ".", "filter", "(", "abstract_role", "=", "self", ".", "current", ".", "role", ".", "abstract_role", ",", "unit", "=", "self", ".", "current", ".", "role", ".", "unit", ")", "if", "m", "!=", "self", ".", "current", ".", "role", "]", "if", "roles", ":", "_form", "=", "forms", ".", "JsonForm", "(", "title", "=", "_", "(", "u'Assign to workflow'", ")", ")", "_form", ".", "select_role", "=", "fields", ".", "Integer", "(", "_", "(", "u\"Chose Role\"", ")", ",", "choices", "=", "roles", ")", "_form", ".", "explain_text", "=", "fields", ".", "String", "(", "_", "(", "u\"Explain Text\"", ")", ",", "required", "=", "False", ")", "_form", ".", "send_button", "=", "fields", ".", "Button", "(", "_", "(", "u\"Send\"", ")", ")", "self", ".", "form_out", "(", "_form", ")", "else", ":", "title", "=", "_", "(", "u\"Unsuccessful\"", ")", "msg", "=", "_", "(", "u\"Assign role not found\"", ")", "self", ".", "current", ".", "msg_box", "(", "title", "=", "title", ",", "msg", "=", "msg", ")" ]
The workflow method to be assigned to the person with the same role and unit as the user. .. code-block:: python # request: { 'task_inv_key': string, }
[ "The", "workflow", "method", "to", "be", "assigned", "to", "the", "person", "with", "the", "same", "role", "and", "unit", "as", "the", "user", ".", "..", "code", "-", "block", "::", "python" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/views/task_manager_actions.py#L65-L90
zetaops/zengine
zengine/views/task_manager_actions.py
TaskManagerActionsView.send_workflow
def send_workflow(self): """ With the workflow instance and the task invitation is assigned a role. """ task_invitation = TaskInvitation.objects.get(self.task_invitation_key) wfi = task_invitation.instance select_role = self.input['form']['select_role'] if wfi.current_actor == self.current.role: task_invitation.role = RoleModel.objects.get(select_role) wfi.current_actor = RoleModel.objects.get(select_role) wfi.save() task_invitation.save() [inv.delete() for inv in TaskInvitation.objects.filter(instance=wfi) if not inv == task_invitation] title = _(u"Successful") msg = _(u"The workflow was assigned to someone else with success.") else: title = _(u"Unsuccessful") msg = _(u"This workflow does not belong to you, you cannot assign it to someone else.") self.current.msg_box(title=title, msg=msg)
python
def send_workflow(self): """ With the workflow instance and the task invitation is assigned a role. """ task_invitation = TaskInvitation.objects.get(self.task_invitation_key) wfi = task_invitation.instance select_role = self.input['form']['select_role'] if wfi.current_actor == self.current.role: task_invitation.role = RoleModel.objects.get(select_role) wfi.current_actor = RoleModel.objects.get(select_role) wfi.save() task_invitation.save() [inv.delete() for inv in TaskInvitation.objects.filter(instance=wfi) if not inv == task_invitation] title = _(u"Successful") msg = _(u"The workflow was assigned to someone else with success.") else: title = _(u"Unsuccessful") msg = _(u"This workflow does not belong to you, you cannot assign it to someone else.") self.current.msg_box(title=title, msg=msg)
[ "def", "send_workflow", "(", "self", ")", ":", "task_invitation", "=", "TaskInvitation", ".", "objects", ".", "get", "(", "self", ".", "task_invitation_key", ")", "wfi", "=", "task_invitation", ".", "instance", "select_role", "=", "self", ".", "input", "[", "'form'", "]", "[", "'select_role'", "]", "if", "wfi", ".", "current_actor", "==", "self", ".", "current", ".", "role", ":", "task_invitation", ".", "role", "=", "RoleModel", ".", "objects", ".", "get", "(", "select_role", ")", "wfi", ".", "current_actor", "=", "RoleModel", ".", "objects", ".", "get", "(", "select_role", ")", "wfi", ".", "save", "(", ")", "task_invitation", ".", "save", "(", ")", "[", "inv", ".", "delete", "(", ")", "for", "inv", "in", "TaskInvitation", ".", "objects", ".", "filter", "(", "instance", "=", "wfi", ")", "if", "not", "inv", "==", "task_invitation", "]", "title", "=", "_", "(", "u\"Successful\"", ")", "msg", "=", "_", "(", "u\"The workflow was assigned to someone else with success.\"", ")", "else", ":", "title", "=", "_", "(", "u\"Unsuccessful\"", ")", "msg", "=", "_", "(", "u\"This workflow does not belong to you, you cannot assign it to someone else.\"", ")", "self", ".", "current", ".", "msg_box", "(", "title", "=", "title", ",", "msg", "=", "msg", ")" ]
With the workflow instance and the task invitation is assigned a role.
[ "With", "the", "workflow", "instance", "and", "the", "task", "invitation", "is", "assigned", "a", "role", "." ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/views/task_manager_actions.py#L92-L112
zetaops/zengine
zengine/views/task_manager_actions.py
TaskManagerActionsView.select_postponed_date
def select_postponed_date(self): """ The time intervals at which the workflow is to be extended are determined. .. code-block:: python # request: { 'task_inv_key': string, } """ _form = forms.JsonForm(title="Postponed Workflow") _form.start_date = fields.DateTime("Start Date") _form.finish_date = fields.DateTime("Finish Date") _form.save_button = fields.Button("Save") self.form_out(_form)
python
def select_postponed_date(self): """ The time intervals at which the workflow is to be extended are determined. .. code-block:: python # request: { 'task_inv_key': string, } """ _form = forms.JsonForm(title="Postponed Workflow") _form.start_date = fields.DateTime("Start Date") _form.finish_date = fields.DateTime("Finish Date") _form.save_button = fields.Button("Save") self.form_out(_form)
[ "def", "select_postponed_date", "(", "self", ")", ":", "_form", "=", "forms", ".", "JsonForm", "(", "title", "=", "\"Postponed Workflow\"", ")", "_form", ".", "start_date", "=", "fields", ".", "DateTime", "(", "\"Start Date\"", ")", "_form", ".", "finish_date", "=", "fields", ".", "DateTime", "(", "\"Finish Date\"", ")", "_form", ".", "save_button", "=", "fields", ".", "Button", "(", "\"Save\"", ")", "self", ".", "form_out", "(", "_form", ")" ]
The time intervals at which the workflow is to be extended are determined. .. code-block:: python # request: { 'task_inv_key': string, }
[ "The", "time", "intervals", "at", "which", "the", "workflow", "is", "to", "be", "extended", "are", "determined", ".", "..", "code", "-", "block", "::", "python" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/views/task_manager_actions.py#L117-L133
zetaops/zengine
zengine/views/task_manager_actions.py
TaskManagerActionsView.save_date
def save_date(self): """ Invitations with the same workflow status are deleted. Workflow instance and invitation roles change. """ task_invitation = TaskInvitation.objects.get(self.task_invitation_key) wfi = task_invitation.instance if wfi.current_actor.exist and wfi.current_actor == self.current.role: dt_start = datetime.strptime(self.input['form']['start_date'], "%d.%m.%Y") dt_finish = datetime.strptime(self.input['form']['finish_date'], "%d.%m.%Y") task_invitation.start_date = dt_start task_invitation.finish_date = dt_finish task_invitation.save() wfi.start_date = dt_start wfi.finish_date = dt_finish wfi.save() title = _(u"Successful") msg = _(u"You've extended the workflow time.") else: title = _(u"Unsuccessful") msg = _(u"This workflow does not belong to you.") self.current.msg_box(title=title, msg=msg)
python
def save_date(self): """ Invitations with the same workflow status are deleted. Workflow instance and invitation roles change. """ task_invitation = TaskInvitation.objects.get(self.task_invitation_key) wfi = task_invitation.instance if wfi.current_actor.exist and wfi.current_actor == self.current.role: dt_start = datetime.strptime(self.input['form']['start_date'], "%d.%m.%Y") dt_finish = datetime.strptime(self.input['form']['finish_date'], "%d.%m.%Y") task_invitation.start_date = dt_start task_invitation.finish_date = dt_finish task_invitation.save() wfi.start_date = dt_start wfi.finish_date = dt_finish wfi.save() title = _(u"Successful") msg = _(u"You've extended the workflow time.") else: title = _(u"Unsuccessful") msg = _(u"This workflow does not belong to you.") self.current.msg_box(title=title, msg=msg)
[ "def", "save_date", "(", "self", ")", ":", "task_invitation", "=", "TaskInvitation", ".", "objects", ".", "get", "(", "self", ".", "task_invitation_key", ")", "wfi", "=", "task_invitation", ".", "instance", "if", "wfi", ".", "current_actor", ".", "exist", "and", "wfi", ".", "current_actor", "==", "self", ".", "current", ".", "role", ":", "dt_start", "=", "datetime", ".", "strptime", "(", "self", ".", "input", "[", "'form'", "]", "[", "'start_date'", "]", ",", "\"%d.%m.%Y\"", ")", "dt_finish", "=", "datetime", ".", "strptime", "(", "self", ".", "input", "[", "'form'", "]", "[", "'finish_date'", "]", ",", "\"%d.%m.%Y\"", ")", "task_invitation", ".", "start_date", "=", "dt_start", "task_invitation", ".", "finish_date", "=", "dt_finish", "task_invitation", ".", "save", "(", ")", "wfi", ".", "start_date", "=", "dt_start", "wfi", ".", "finish_date", "=", "dt_finish", "wfi", ".", "save", "(", ")", "title", "=", "_", "(", "u\"Successful\"", ")", "msg", "=", "_", "(", "u\"You've extended the workflow time.\"", ")", "else", ":", "title", "=", "_", "(", "u\"Unsuccessful\"", ")", "msg", "=", "_", "(", "u\"This workflow does not belong to you.\"", ")", "self", ".", "current", ".", "msg_box", "(", "title", "=", "title", ",", "msg", "=", "msg", ")" ]
Invitations with the same workflow status are deleted. Workflow instance and invitation roles change.
[ "Invitations", "with", "the", "same", "workflow", "status", "are", "deleted", ".", "Workflow", "instance", "and", "invitation", "roles", "change", "." ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/views/task_manager_actions.py#L135-L162
zetaops/zengine
zengine/views/task_manager_actions.py
TaskManagerActionsView.suspend
def suspend(self): """ If there is a role assigned to the workflow and it is the same as the user, it can drop the workflow. If it does not exist, it can not do anything. .. code-block:: python # request: { 'task_inv_key': string, } """ task_invitation = TaskInvitation.objects.get(self.task_invitation_key) wfi = task_invitation.instance if wfi.current_actor.exist and wfi.current_actor == self.current.role: for m in RoleModel.objects.filter(abstract_role=self.current.role.abstract_role, unit=self.current.role.unit): if m != self.current.role: task_invitation.key = '' task_invitation.role = m task_invitation.save() wfi.current_actor = RoleModel() wfi.save() title = _(u"Successful") msg = _(u"You left the workflow.") else: title = _(u"Unsuccessful") msg = _(u"Unfortunately, this workflow does not belong to you or is already idle.") self.current.msg_box(title=title, msg=msg)
python
def suspend(self): """ If there is a role assigned to the workflow and it is the same as the user, it can drop the workflow. If it does not exist, it can not do anything. .. code-block:: python # request: { 'task_inv_key': string, } """ task_invitation = TaskInvitation.objects.get(self.task_invitation_key) wfi = task_invitation.instance if wfi.current_actor.exist and wfi.current_actor == self.current.role: for m in RoleModel.objects.filter(abstract_role=self.current.role.abstract_role, unit=self.current.role.unit): if m != self.current.role: task_invitation.key = '' task_invitation.role = m task_invitation.save() wfi.current_actor = RoleModel() wfi.save() title = _(u"Successful") msg = _(u"You left the workflow.") else: title = _(u"Unsuccessful") msg = _(u"Unfortunately, this workflow does not belong to you or is already idle.") self.current.msg_box(title=title, msg=msg)
[ "def", "suspend", "(", "self", ")", ":", "task_invitation", "=", "TaskInvitation", ".", "objects", ".", "get", "(", "self", ".", "task_invitation_key", ")", "wfi", "=", "task_invitation", ".", "instance", "if", "wfi", ".", "current_actor", ".", "exist", "and", "wfi", ".", "current_actor", "==", "self", ".", "current", ".", "role", ":", "for", "m", "in", "RoleModel", ".", "objects", ".", "filter", "(", "abstract_role", "=", "self", ".", "current", ".", "role", ".", "abstract_role", ",", "unit", "=", "self", ".", "current", ".", "role", ".", "unit", ")", ":", "if", "m", "!=", "self", ".", "current", ".", "role", ":", "task_invitation", ".", "key", "=", "''", "task_invitation", ".", "role", "=", "m", "task_invitation", ".", "save", "(", ")", "wfi", ".", "current_actor", "=", "RoleModel", "(", ")", "wfi", ".", "save", "(", ")", "title", "=", "_", "(", "u\"Successful\"", ")", "msg", "=", "_", "(", "u\"You left the workflow.\"", ")", "else", ":", "title", "=", "_", "(", "u\"Unsuccessful\"", ")", "msg", "=", "_", "(", "u\"Unfortunately, this workflow does not belong to you or is already idle.\"", ")", "self", ".", "current", ".", "msg_box", "(", "title", "=", "title", ",", "msg", "=", "msg", ")" ]
If there is a role assigned to the workflow and it is the same as the user, it can drop the workflow. If it does not exist, it can not do anything. .. code-block:: python # request: { 'task_inv_key': string, }
[ "If", "there", "is", "a", "role", "assigned", "to", "the", "workflow", "and", "it", "is", "the", "same", "as", "the", "user", "it", "can", "drop", "the", "workflow", ".", "If", "it", "does", "not", "exist", "it", "can", "not", "do", "anything", "." ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/views/task_manager_actions.py#L167-L200
LordDarkula/chess_py
chess_py/pieces/pawn.py
Pawn.on_home_row
def on_home_row(self, location=None): """ Finds out if the piece is on the home row. :return: bool for whether piece is on home row or not """ location = location or self.location return (self.color == color.white and location.rank == 1) or \ (self.color == color.black and location.rank == 6)
python
def on_home_row(self, location=None): """ Finds out if the piece is on the home row. :return: bool for whether piece is on home row or not """ location = location or self.location return (self.color == color.white and location.rank == 1) or \ (self.color == color.black and location.rank == 6)
[ "def", "on_home_row", "(", "self", ",", "location", "=", "None", ")", ":", "location", "=", "location", "or", "self", ".", "location", "return", "(", "self", ".", "color", "==", "color", ".", "white", "and", "location", ".", "rank", "==", "1", ")", "or", "(", "self", ".", "color", "==", "color", ".", "black", "and", "location", ".", "rank", "==", "6", ")" ]
Finds out if the piece is on the home row. :return: bool for whether piece is on home row or not
[ "Finds", "out", "if", "the", "piece", "is", "on", "the", "home", "row", "." ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/pieces/pawn.py#L51-L59
LordDarkula/chess_py
chess_py/pieces/pawn.py
Pawn.would_move_be_promotion
def would_move_be_promotion(self, location=None): """ Finds if move from current get_location would result in promotion :type: location: Location :rtype: bool """ location = location or self.location return (location.rank == 1 and self.color == color.black) or \ (location.rank == 6 and self.color == color.white)
python
def would_move_be_promotion(self, location=None): """ Finds if move from current get_location would result in promotion :type: location: Location :rtype: bool """ location = location or self.location return (location.rank == 1 and self.color == color.black) or \ (location.rank == 6 and self.color == color.white)
[ "def", "would_move_be_promotion", "(", "self", ",", "location", "=", "None", ")", ":", "location", "=", "location", "or", "self", ".", "location", "return", "(", "location", ".", "rank", "==", "1", "and", "self", ".", "color", "==", "color", ".", "black", ")", "or", "(", "location", ".", "rank", "==", "6", "and", "self", ".", "color", "==", "color", ".", "white", ")" ]
Finds if move from current get_location would result in promotion :type: location: Location :rtype: bool
[ "Finds", "if", "move", "from", "current", "get_location", "would", "result", "in", "promotion" ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/pieces/pawn.py#L61-L70
LordDarkula/chess_py
chess_py/pieces/pawn.py
Pawn.square_in_front
def square_in_front(self, location=None): """ Finds square directly in front of Pawn :type: location: Location :rtype: Location """ location = location or self.location return location.shift_up() if self.color == color.white else location.shift_down()
python
def square_in_front(self, location=None): """ Finds square directly in front of Pawn :type: location: Location :rtype: Location """ location = location or self.location return location.shift_up() if self.color == color.white else location.shift_down()
[ "def", "square_in_front", "(", "self", ",", "location", "=", "None", ")", ":", "location", "=", "location", "or", "self", ".", "location", "return", "location", ".", "shift_up", "(", ")", "if", "self", ".", "color", "==", "color", ".", "white", "else", "location", ".", "shift_down", "(", ")" ]
Finds square directly in front of Pawn :type: location: Location :rtype: Location
[ "Finds", "square", "directly", "in", "front", "of", "Pawn" ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/pieces/pawn.py#L72-L80
LordDarkula/chess_py
chess_py/pieces/pawn.py
Pawn.forward_moves
def forward_moves(self, position): """ Finds possible moves one step and two steps in front of Pawn. :type: position: Board :rtype: list """ if position.is_square_empty(self.square_in_front(self.location)): """ If square in front is empty add the move """ if self.would_move_be_promotion(): for move in self.create_promotion_moves(notation_const.PROMOTE): yield move else: yield self.create_move(end_loc=self.square_in_front(self.location), status=notation_const.MOVEMENT) if self.on_home_row() and \ position.is_square_empty(self.two_squares_in_front(self.location)): """ If pawn is on home row and two squares in front of the pawn is empty add the move """ yield self.create_move( end_loc=self.square_in_front(self.square_in_front(self.location)), status=notation_const.MOVEMENT )
python
def forward_moves(self, position): """ Finds possible moves one step and two steps in front of Pawn. :type: position: Board :rtype: list """ if position.is_square_empty(self.square_in_front(self.location)): """ If square in front is empty add the move """ if self.would_move_be_promotion(): for move in self.create_promotion_moves(notation_const.PROMOTE): yield move else: yield self.create_move(end_loc=self.square_in_front(self.location), status=notation_const.MOVEMENT) if self.on_home_row() and \ position.is_square_empty(self.two_squares_in_front(self.location)): """ If pawn is on home row and two squares in front of the pawn is empty add the move """ yield self.create_move( end_loc=self.square_in_front(self.square_in_front(self.location)), status=notation_const.MOVEMENT )
[ "def", "forward_moves", "(", "self", ",", "position", ")", ":", "if", "position", ".", "is_square_empty", "(", "self", ".", "square_in_front", "(", "self", ".", "location", ")", ")", ":", "\"\"\"\n If square in front is empty add the move\n \"\"\"", "if", "self", ".", "would_move_be_promotion", "(", ")", ":", "for", "move", "in", "self", ".", "create_promotion_moves", "(", "notation_const", ".", "PROMOTE", ")", ":", "yield", "move", "else", ":", "yield", "self", ".", "create_move", "(", "end_loc", "=", "self", ".", "square_in_front", "(", "self", ".", "location", ")", ",", "status", "=", "notation_const", ".", "MOVEMENT", ")", "if", "self", ".", "on_home_row", "(", ")", "and", "position", ".", "is_square_empty", "(", "self", ".", "two_squares_in_front", "(", "self", ".", "location", ")", ")", ":", "\"\"\"\n If pawn is on home row and two squares in front of the pawn is empty\n add the move\n \"\"\"", "yield", "self", ".", "create_move", "(", "end_loc", "=", "self", ".", "square_in_front", "(", "self", ".", "square_in_front", "(", "self", ".", "location", ")", ")", ",", "status", "=", "notation_const", ".", "MOVEMENT", ")" ]
Finds possible moves one step and two steps in front of Pawn. :type: position: Board :rtype: list
[ "Finds", "possible", "moves", "one", "step", "and", "two", "steps", "in", "front", "of", "Pawn", "." ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/pieces/pawn.py#L105-L133
LordDarkula/chess_py
chess_py/pieces/pawn.py
Pawn._one_diagonal_capture_square
def _one_diagonal_capture_square(self, capture_square, position): """ Adds specified diagonal as a capture move if it is one """ if self.contains_opposite_color_piece(capture_square, position): if self.would_move_be_promotion(): for move in self.create_promotion_moves(status=notation_const.CAPTURE_AND_PROMOTE, location=capture_square): yield move else: yield self.create_move(end_loc=capture_square, status=notation_const.CAPTURE)
python
def _one_diagonal_capture_square(self, capture_square, position): """ Adds specified diagonal as a capture move if it is one """ if self.contains_opposite_color_piece(capture_square, position): if self.would_move_be_promotion(): for move in self.create_promotion_moves(status=notation_const.CAPTURE_AND_PROMOTE, location=capture_square): yield move else: yield self.create_move(end_loc=capture_square, status=notation_const.CAPTURE)
[ "def", "_one_diagonal_capture_square", "(", "self", ",", "capture_square", ",", "position", ")", ":", "if", "self", ".", "contains_opposite_color_piece", "(", "capture_square", ",", "position", ")", ":", "if", "self", ".", "would_move_be_promotion", "(", ")", ":", "for", "move", "in", "self", ".", "create_promotion_moves", "(", "status", "=", "notation_const", ".", "CAPTURE_AND_PROMOTE", ",", "location", "=", "capture_square", ")", ":", "yield", "move", "else", ":", "yield", "self", ".", "create_move", "(", "end_loc", "=", "capture_square", ",", "status", "=", "notation_const", ".", "CAPTURE", ")" ]
Adds specified diagonal as a capture move if it is one
[ "Adds", "specified", "diagonal", "as", "a", "capture", "move", "if", "it", "is", "one" ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/pieces/pawn.py#L135-L148
LordDarkula/chess_py
chess_py/pieces/pawn.py
Pawn.capture_moves
def capture_moves(self, position): """ Finds out all possible capture moves :rtype: list """ try: right_diagonal = self.square_in_front(self.location.shift_right()) for move in self._one_diagonal_capture_square(right_diagonal, position): yield move except IndexError: pass try: left_diagonal = self.square_in_front(self.location.shift_left()) for move in self._one_diagonal_capture_square(left_diagonal, position): yield move except IndexError: pass
python
def capture_moves(self, position): """ Finds out all possible capture moves :rtype: list """ try: right_diagonal = self.square_in_front(self.location.shift_right()) for move in self._one_diagonal_capture_square(right_diagonal, position): yield move except IndexError: pass try: left_diagonal = self.square_in_front(self.location.shift_left()) for move in self._one_diagonal_capture_square(left_diagonal, position): yield move except IndexError: pass
[ "def", "capture_moves", "(", "self", ",", "position", ")", ":", "try", ":", "right_diagonal", "=", "self", ".", "square_in_front", "(", "self", ".", "location", ".", "shift_right", "(", ")", ")", "for", "move", "in", "self", ".", "_one_diagonal_capture_square", "(", "right_diagonal", ",", "position", ")", ":", "yield", "move", "except", "IndexError", ":", "pass", "try", ":", "left_diagonal", "=", "self", ".", "square_in_front", "(", "self", ".", "location", ".", "shift_left", "(", ")", ")", "for", "move", "in", "self", ".", "_one_diagonal_capture_square", "(", "left_diagonal", ",", "position", ")", ":", "yield", "move", "except", "IndexError", ":", "pass" ]
Finds out all possible capture moves :rtype: list
[ "Finds", "out", "all", "possible", "capture", "moves" ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/pieces/pawn.py#L150-L168
LordDarkula/chess_py
chess_py/pieces/pawn.py
Pawn.on_en_passant_valid_location
def on_en_passant_valid_location(self): """ Finds out if pawn is on enemy center rank. :rtype: bool """ return (self.color == color.white and self.location.rank == 4) or \ (self.color == color.black and self.location.rank == 3)
python
def on_en_passant_valid_location(self): """ Finds out if pawn is on enemy center rank. :rtype: bool """ return (self.color == color.white and self.location.rank == 4) or \ (self.color == color.black and self.location.rank == 3)
[ "def", "on_en_passant_valid_location", "(", "self", ")", ":", "return", "(", "self", ".", "color", "==", "color", ".", "white", "and", "self", ".", "location", ".", "rank", "==", "4", ")", "or", "(", "self", ".", "color", "==", "color", ".", "black", "and", "self", ".", "location", ".", "rank", "==", "3", ")" ]
Finds out if pawn is on enemy center rank. :rtype: bool
[ "Finds", "out", "if", "pawn", "is", "on", "enemy", "center", "rank", "." ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/pieces/pawn.py#L170-L177
LordDarkula/chess_py
chess_py/pieces/pawn.py
Pawn._is_en_passant_valid
def _is_en_passant_valid(self, opponent_pawn_location, position): """ Finds if their opponent's pawn is next to this pawn :rtype: bool """ try: pawn = position.piece_at_square(opponent_pawn_location) return pawn is not None and \ isinstance(pawn, Pawn) and \ pawn.color != self.color and \ position.piece_at_square(opponent_pawn_location).just_moved_two_steps except IndexError: return False
python
def _is_en_passant_valid(self, opponent_pawn_location, position): """ Finds if their opponent's pawn is next to this pawn :rtype: bool """ try: pawn = position.piece_at_square(opponent_pawn_location) return pawn is not None and \ isinstance(pawn, Pawn) and \ pawn.color != self.color and \ position.piece_at_square(opponent_pawn_location).just_moved_two_steps except IndexError: return False
[ "def", "_is_en_passant_valid", "(", "self", ",", "opponent_pawn_location", ",", "position", ")", ":", "try", ":", "pawn", "=", "position", ".", "piece_at_square", "(", "opponent_pawn_location", ")", "return", "pawn", "is", "not", "None", "and", "isinstance", "(", "pawn", ",", "Pawn", ")", "and", "pawn", ".", "color", "!=", "self", ".", "color", "and", "position", ".", "piece_at_square", "(", "opponent_pawn_location", ")", ".", "just_moved_two_steps", "except", "IndexError", ":", "return", "False" ]
Finds if their opponent's pawn is next to this pawn :rtype: bool
[ "Finds", "if", "their", "opponent", "s", "pawn", "is", "next", "to", "this", "pawn" ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/pieces/pawn.py#L179-L192
LordDarkula/chess_py
chess_py/pieces/pawn.py
Pawn.add_one_en_passant_move
def add_one_en_passant_move(self, direction, position): """ Yields en_passant moves in given direction if it is legal. :type: direction: function :type: position: Board :rtype: gen """ try: if self._is_en_passant_valid(direction(self.location), position): yield self.create_move( end_loc=self.square_in_front(direction(self.location)), status=notation_const.EN_PASSANT ) except IndexError: pass
python
def add_one_en_passant_move(self, direction, position): """ Yields en_passant moves in given direction if it is legal. :type: direction: function :type: position: Board :rtype: gen """ try: if self._is_en_passant_valid(direction(self.location), position): yield self.create_move( end_loc=self.square_in_front(direction(self.location)), status=notation_const.EN_PASSANT ) except IndexError: pass
[ "def", "add_one_en_passant_move", "(", "self", ",", "direction", ",", "position", ")", ":", "try", ":", "if", "self", ".", "_is_en_passant_valid", "(", "direction", "(", "self", ".", "location", ")", ",", "position", ")", ":", "yield", "self", ".", "create_move", "(", "end_loc", "=", "self", ".", "square_in_front", "(", "direction", "(", "self", ".", "location", ")", ")", ",", "status", "=", "notation_const", ".", "EN_PASSANT", ")", "except", "IndexError", ":", "pass" ]
Yields en_passant moves in given direction if it is legal. :type: direction: function :type: position: Board :rtype: gen
[ "Yields", "en_passant", "moves", "in", "given", "direction", "if", "it", "is", "legal", "." ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/pieces/pawn.py#L194-L209
LordDarkula/chess_py
chess_py/pieces/pawn.py
Pawn.en_passant_moves
def en_passant_moves(self, position): """ Finds possible en passant moves. :rtype: list """ # if pawn is not on a valid en passant get_location then return None if self.on_en_passant_valid_location(): for move in itertools.chain(self.add_one_en_passant_move(lambda x: x.shift_right(), position), self.add_one_en_passant_move(lambda x: x.shift_left(), position)): yield move
python
def en_passant_moves(self, position): """ Finds possible en passant moves. :rtype: list """ # if pawn is not on a valid en passant get_location then return None if self.on_en_passant_valid_location(): for move in itertools.chain(self.add_one_en_passant_move(lambda x: x.shift_right(), position), self.add_one_en_passant_move(lambda x: x.shift_left(), position)): yield move
[ "def", "en_passant_moves", "(", "self", ",", "position", ")", ":", "# if pawn is not on a valid en passant get_location then return None", "if", "self", ".", "on_en_passant_valid_location", "(", ")", ":", "for", "move", "in", "itertools", ".", "chain", "(", "self", ".", "add_one_en_passant_move", "(", "lambda", "x", ":", "x", ".", "shift_right", "(", ")", ",", "position", ")", ",", "self", ".", "add_one_en_passant_move", "(", "lambda", "x", ":", "x", ".", "shift_left", "(", ")", ",", "position", ")", ")", ":", "yield", "move" ]
Finds possible en passant moves. :rtype: list
[ "Finds", "possible", "en", "passant", "moves", "." ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/pieces/pawn.py#L211-L222
LordDarkula/chess_py
chess_py/pieces/pawn.py
Pawn.possible_moves
def possible_moves(self, position): """ Finds out the locations of possible moves given board.Board position. :pre get_location is on board and piece at specified get_location on position :type: position: Board :rtype: list """ for move in itertools.chain(self.forward_moves(position), self.capture_moves(position), self.en_passant_moves(position)): yield move
python
def possible_moves(self, position): """ Finds out the locations of possible moves given board.Board position. :pre get_location is on board and piece at specified get_location on position :type: position: Board :rtype: list """ for move in itertools.chain(self.forward_moves(position), self.capture_moves(position), self.en_passant_moves(position)): yield move
[ "def", "possible_moves", "(", "self", ",", "position", ")", ":", "for", "move", "in", "itertools", ".", "chain", "(", "self", ".", "forward_moves", "(", "position", ")", ",", "self", ".", "capture_moves", "(", "position", ")", ",", "self", ".", "en_passant_moves", "(", "position", ")", ")", ":", "yield", "move" ]
Finds out the locations of possible moves given board.Board position. :pre get_location is on board and piece at specified get_location on position :type: position: Board :rtype: list
[ "Finds", "out", "the", "locations", "of", "possible", "moves", "given", "board", ".", "Board", "position", ".", ":", "pre", "get_location", "is", "on", "board", "and", "piece", "at", "specified", "get_location", "on", "position" ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/pieces/pawn.py#L224-L235
LordDarkula/chess_py
main.py
main
def main(): """ Main method """ print("Creating a new game...") new_game = Game(Human(color.white), Human(color.black)) result = new_game.play() print("Result is ", result)
python
def main(): """ Main method """ print("Creating a new game...") new_game = Game(Human(color.white), Human(color.black)) result = new_game.play() print("Result is ", result)
[ "def", "main", "(", ")", ":", "print", "(", "\"Creating a new game...\"", ")", "new_game", "=", "Game", "(", "Human", "(", "color", ".", "white", ")", ",", "Human", "(", "color", ".", "black", ")", ")", "result", "=", "new_game", ".", "play", "(", ")", "print", "(", "\"Result is \"", ",", "result", ")" ]
Main method
[ "Main", "method" ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/main.py#L25-L34
erik/alexandra
alexandra/util.py
respond
def respond(text=None, ssml=None, attributes=None, reprompt_text=None, reprompt_ssml=None, end_session=True): """ Build a dict containing a valid response to an Alexa request. If speech output is desired, either of `text` or `ssml` should be specified. :param text: Plain text speech output to be said by Alexa device. :param ssml: Speech output in SSML form. :param attributes: Dictionary of attributes to store in the session. :param end_session: Should the session be terminated after this response? :param reprompt_text, reprompt_ssml: Works the same as `text`/`ssml`, but instead sets the reprompting speech output. """ obj = { 'version': '1.0', 'response': { 'outputSpeech': {'type': 'PlainText', 'text': ''}, 'shouldEndSession': end_session }, 'sessionAttributes': attributes or {} } if text: obj['response']['outputSpeech'] = {'type': 'PlainText', 'text': text} elif ssml: obj['response']['outputSpeech'] = {'type': 'SSML', 'ssml': ssml} reprompt_output = None if reprompt_text: reprompt_output = {'type': 'PlainText', 'text': reprompt_text} elif reprompt_ssml: reprompt_output = {'type': 'SSML', 'ssml': reprompt_ssml} if reprompt_output: obj['response']['reprompt'] = {'outputSpeech': reprompt_output} return obj
python
def respond(text=None, ssml=None, attributes=None, reprompt_text=None, reprompt_ssml=None, end_session=True): """ Build a dict containing a valid response to an Alexa request. If speech output is desired, either of `text` or `ssml` should be specified. :param text: Plain text speech output to be said by Alexa device. :param ssml: Speech output in SSML form. :param attributes: Dictionary of attributes to store in the session. :param end_session: Should the session be terminated after this response? :param reprompt_text, reprompt_ssml: Works the same as `text`/`ssml`, but instead sets the reprompting speech output. """ obj = { 'version': '1.0', 'response': { 'outputSpeech': {'type': 'PlainText', 'text': ''}, 'shouldEndSession': end_session }, 'sessionAttributes': attributes or {} } if text: obj['response']['outputSpeech'] = {'type': 'PlainText', 'text': text} elif ssml: obj['response']['outputSpeech'] = {'type': 'SSML', 'ssml': ssml} reprompt_output = None if reprompt_text: reprompt_output = {'type': 'PlainText', 'text': reprompt_text} elif reprompt_ssml: reprompt_output = {'type': 'SSML', 'ssml': reprompt_ssml} if reprompt_output: obj['response']['reprompt'] = {'outputSpeech': reprompt_output} return obj
[ "def", "respond", "(", "text", "=", "None", ",", "ssml", "=", "None", ",", "attributes", "=", "None", ",", "reprompt_text", "=", "None", ",", "reprompt_ssml", "=", "None", ",", "end_session", "=", "True", ")", ":", "obj", "=", "{", "'version'", ":", "'1.0'", ",", "'response'", ":", "{", "'outputSpeech'", ":", "{", "'type'", ":", "'PlainText'", ",", "'text'", ":", "''", "}", ",", "'shouldEndSession'", ":", "end_session", "}", ",", "'sessionAttributes'", ":", "attributes", "or", "{", "}", "}", "if", "text", ":", "obj", "[", "'response'", "]", "[", "'outputSpeech'", "]", "=", "{", "'type'", ":", "'PlainText'", ",", "'text'", ":", "text", "}", "elif", "ssml", ":", "obj", "[", "'response'", "]", "[", "'outputSpeech'", "]", "=", "{", "'type'", ":", "'SSML'", ",", "'ssml'", ":", "ssml", "}", "reprompt_output", "=", "None", "if", "reprompt_text", ":", "reprompt_output", "=", "{", "'type'", ":", "'PlainText'", ",", "'text'", ":", "reprompt_text", "}", "elif", "reprompt_ssml", ":", "reprompt_output", "=", "{", "'type'", ":", "'SSML'", ",", "'ssml'", ":", "reprompt_ssml", "}", "if", "reprompt_output", ":", "obj", "[", "'response'", "]", "[", "'reprompt'", "]", "=", "{", "'outputSpeech'", ":", "reprompt_output", "}", "return", "obj" ]
Build a dict containing a valid response to an Alexa request. If speech output is desired, either of `text` or `ssml` should be specified. :param text: Plain text speech output to be said by Alexa device. :param ssml: Speech output in SSML form. :param attributes: Dictionary of attributes to store in the session. :param end_session: Should the session be terminated after this response? :param reprompt_text, reprompt_ssml: Works the same as `text`/`ssml`, but instead sets the reprompting speech output.
[ "Build", "a", "dict", "containing", "a", "valid", "response", "to", "an", "Alexa", "request", "." ]
train
https://github.com/erik/alexandra/blob/8bea94efa1af465254a553dc4dfea3fa552b18da/alexandra/util.py#L25-L63
erik/alexandra
alexandra/util.py
reprompt
def reprompt(text=None, ssml=None, attributes=None): """Convenience method to save a little bit of typing for the common case of reprompting the user. Simply calls :py:func:`alexandra.util.respond` with the given arguments and holds the session open. One of either the `text` or `ssml` should be provided if any speech output is desired. :param text: Plain text speech output :param ssml: Speech output in SSML format :param attributes: Dictionary of attributes to store in the current session """ return respond( reprompt_text=text, reprompt_ssml=ssml, attributes=attributes, end_session=False )
python
def reprompt(text=None, ssml=None, attributes=None): """Convenience method to save a little bit of typing for the common case of reprompting the user. Simply calls :py:func:`alexandra.util.respond` with the given arguments and holds the session open. One of either the `text` or `ssml` should be provided if any speech output is desired. :param text: Plain text speech output :param ssml: Speech output in SSML format :param attributes: Dictionary of attributes to store in the current session """ return respond( reprompt_text=text, reprompt_ssml=ssml, attributes=attributes, end_session=False )
[ "def", "reprompt", "(", "text", "=", "None", ",", "ssml", "=", "None", ",", "attributes", "=", "None", ")", ":", "return", "respond", "(", "reprompt_text", "=", "text", ",", "reprompt_ssml", "=", "ssml", ",", "attributes", "=", "attributes", ",", "end_session", "=", "False", ")" ]
Convenience method to save a little bit of typing for the common case of reprompting the user. Simply calls :py:func:`alexandra.util.respond` with the given arguments and holds the session open. One of either the `text` or `ssml` should be provided if any speech output is desired. :param text: Plain text speech output :param ssml: Speech output in SSML format :param attributes: Dictionary of attributes to store in the current session
[ "Convenience", "method", "to", "save", "a", "little", "bit", "of", "typing", "for", "the", "common", "case", "of", "reprompting", "the", "user", ".", "Simply", "calls", ":", "py", ":", "func", ":", "alexandra", ".", "util", ".", "respond", "with", "the", "given", "arguments", "and", "holds", "the", "session", "open", "." ]
train
https://github.com/erik/alexandra/blob/8bea94efa1af465254a553dc4dfea3fa552b18da/alexandra/util.py#L66-L84
erik/alexandra
alexandra/util.py
validate_request_timestamp
def validate_request_timestamp(req_body, max_diff=150): """Ensure the request's timestamp doesn't fall outside of the app's specified tolerance. Returns True if this request is valid, False otherwise. :param req_body: JSON object parsed out of the raw POST data of a request. :param max_diff: Maximum allowable difference in seconds between request timestamp and system clock. Amazon requires <= 150 seconds for published skills. """ time_str = req_body.get('request', {}).get('timestamp') if not time_str: log.error('timestamp not present %s', req_body) return False req_ts = datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%SZ") diff = (datetime.utcnow() - req_ts).total_seconds() if abs(diff) > max_diff: log.error('timestamp difference too high: %d sec', diff) return False return True
python
def validate_request_timestamp(req_body, max_diff=150): """Ensure the request's timestamp doesn't fall outside of the app's specified tolerance. Returns True if this request is valid, False otherwise. :param req_body: JSON object parsed out of the raw POST data of a request. :param max_diff: Maximum allowable difference in seconds between request timestamp and system clock. Amazon requires <= 150 seconds for published skills. """ time_str = req_body.get('request', {}).get('timestamp') if not time_str: log.error('timestamp not present %s', req_body) return False req_ts = datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%SZ") diff = (datetime.utcnow() - req_ts).total_seconds() if abs(diff) > max_diff: log.error('timestamp difference too high: %d sec', diff) return False return True
[ "def", "validate_request_timestamp", "(", "req_body", ",", "max_diff", "=", "150", ")", ":", "time_str", "=", "req_body", ".", "get", "(", "'request'", ",", "{", "}", ")", ".", "get", "(", "'timestamp'", ")", "if", "not", "time_str", ":", "log", ".", "error", "(", "'timestamp not present %s'", ",", "req_body", ")", "return", "False", "req_ts", "=", "datetime", ".", "strptime", "(", "time_str", ",", "\"%Y-%m-%dT%H:%M:%SZ\"", ")", "diff", "=", "(", "datetime", ".", "utcnow", "(", ")", "-", "req_ts", ")", ".", "total_seconds", "(", ")", "if", "abs", "(", "diff", ")", ">", "max_diff", ":", "log", ".", "error", "(", "'timestamp difference too high: %d sec'", ",", "diff", ")", "return", "False", "return", "True" ]
Ensure the request's timestamp doesn't fall outside of the app's specified tolerance. Returns True if this request is valid, False otherwise. :param req_body: JSON object parsed out of the raw POST data of a request. :param max_diff: Maximum allowable difference in seconds between request timestamp and system clock. Amazon requires <= 150 seconds for published skills.
[ "Ensure", "the", "request", "s", "timestamp", "doesn", "t", "fall", "outside", "of", "the", "app", "s", "specified", "tolerance", "." ]
train
https://github.com/erik/alexandra/blob/8bea94efa1af465254a553dc4dfea3fa552b18da/alexandra/util.py#L87-L112
erik/alexandra
alexandra/util.py
validate_request_certificate
def validate_request_certificate(headers, data): """Ensure that the certificate and signature specified in the request headers are truely from Amazon and correctly verify. Returns True if certificate verification succeeds, False otherwise. :param headers: Dictionary (or sufficiently dictionary-like) map of request headers. :param data: Raw POST data attached to this request. """ # Make sure we have the appropriate headers. if 'SignatureCertChainUrl' not in headers or \ 'Signature' not in headers: log.error('invalid request headers') return False cert_url = headers['SignatureCertChainUrl'] sig = base64.b64decode(headers['Signature']) cert = _get_certificate(cert_url) if not cert: return False try: # ... wtf kind of API decision is this crypto.verify(cert, sig, data, 'sha1') return True except: log.error('invalid request signature') return False
python
def validate_request_certificate(headers, data): """Ensure that the certificate and signature specified in the request headers are truely from Amazon and correctly verify. Returns True if certificate verification succeeds, False otherwise. :param headers: Dictionary (or sufficiently dictionary-like) map of request headers. :param data: Raw POST data attached to this request. """ # Make sure we have the appropriate headers. if 'SignatureCertChainUrl' not in headers or \ 'Signature' not in headers: log.error('invalid request headers') return False cert_url = headers['SignatureCertChainUrl'] sig = base64.b64decode(headers['Signature']) cert = _get_certificate(cert_url) if not cert: return False try: # ... wtf kind of API decision is this crypto.verify(cert, sig, data, 'sha1') return True except: log.error('invalid request signature') return False
[ "def", "validate_request_certificate", "(", "headers", ",", "data", ")", ":", "# Make sure we have the appropriate headers.", "if", "'SignatureCertChainUrl'", "not", "in", "headers", "or", "'Signature'", "not", "in", "headers", ":", "log", ".", "error", "(", "'invalid request headers'", ")", "return", "False", "cert_url", "=", "headers", "[", "'SignatureCertChainUrl'", "]", "sig", "=", "base64", ".", "b64decode", "(", "headers", "[", "'Signature'", "]", ")", "cert", "=", "_get_certificate", "(", "cert_url", ")", "if", "not", "cert", ":", "return", "False", "try", ":", "# ... wtf kind of API decision is this", "crypto", ".", "verify", "(", "cert", ",", "sig", ",", "data", ",", "'sha1'", ")", "return", "True", "except", ":", "log", ".", "error", "(", "'invalid request signature'", ")", "return", "False" ]
Ensure that the certificate and signature specified in the request headers are truely from Amazon and correctly verify. Returns True if certificate verification succeeds, False otherwise. :param headers: Dictionary (or sufficiently dictionary-like) map of request headers. :param data: Raw POST data attached to this request.
[ "Ensure", "that", "the", "certificate", "and", "signature", "specified", "in", "the", "request", "headers", "are", "truely", "from", "Amazon", "and", "correctly", "verify", "." ]
train
https://github.com/erik/alexandra/blob/8bea94efa1af465254a553dc4dfea3fa552b18da/alexandra/util.py#L115-L146
erik/alexandra
alexandra/util.py
_get_certificate
def _get_certificate(cert_url): """Download and validate a specified Amazon PEM file.""" global _cache if cert_url in _cache: cert = _cache[cert_url] if cert.has_expired(): _cache = {} else: return cert url = urlparse(cert_url) host = url.netloc.lower() path = posixpath.normpath(url.path) # Sanity check location so we don't get some random person's cert. if url.scheme != 'https' or \ host not in ['s3.amazonaws.com', 's3.amazonaws.com:443'] or \ not path.startswith('/echo.api/'): log.error('invalid cert location %s', cert_url) return resp = urlopen(cert_url) if resp.getcode() != 200: log.error('failed to download certificate') return cert = crypto.load_certificate(crypto.FILETYPE_PEM, resp.read()) if cert.has_expired() or cert.get_subject().CN != 'echo-api.amazon.com': log.error('certificate expired or invalid') return _cache[cert_url] = cert return cert
python
def _get_certificate(cert_url): """Download and validate a specified Amazon PEM file.""" global _cache if cert_url in _cache: cert = _cache[cert_url] if cert.has_expired(): _cache = {} else: return cert url = urlparse(cert_url) host = url.netloc.lower() path = posixpath.normpath(url.path) # Sanity check location so we don't get some random person's cert. if url.scheme != 'https' or \ host not in ['s3.amazonaws.com', 's3.amazonaws.com:443'] or \ not path.startswith('/echo.api/'): log.error('invalid cert location %s', cert_url) return resp = urlopen(cert_url) if resp.getcode() != 200: log.error('failed to download certificate') return cert = crypto.load_certificate(crypto.FILETYPE_PEM, resp.read()) if cert.has_expired() or cert.get_subject().CN != 'echo-api.amazon.com': log.error('certificate expired or invalid') return _cache[cert_url] = cert return cert
[ "def", "_get_certificate", "(", "cert_url", ")", ":", "global", "_cache", "if", "cert_url", "in", "_cache", ":", "cert", "=", "_cache", "[", "cert_url", "]", "if", "cert", ".", "has_expired", "(", ")", ":", "_cache", "=", "{", "}", "else", ":", "return", "cert", "url", "=", "urlparse", "(", "cert_url", ")", "host", "=", "url", ".", "netloc", ".", "lower", "(", ")", "path", "=", "posixpath", ".", "normpath", "(", "url", ".", "path", ")", "# Sanity check location so we don't get some random person's cert.", "if", "url", ".", "scheme", "!=", "'https'", "or", "host", "not", "in", "[", "'s3.amazonaws.com'", ",", "'s3.amazonaws.com:443'", "]", "or", "not", "path", ".", "startswith", "(", "'/echo.api/'", ")", ":", "log", ".", "error", "(", "'invalid cert location %s'", ",", "cert_url", ")", "return", "resp", "=", "urlopen", "(", "cert_url", ")", "if", "resp", ".", "getcode", "(", ")", "!=", "200", ":", "log", ".", "error", "(", "'failed to download certificate'", ")", "return", "cert", "=", "crypto", ".", "load_certificate", "(", "crypto", ".", "FILETYPE_PEM", ",", "resp", ".", "read", "(", ")", ")", "if", "cert", ".", "has_expired", "(", ")", "or", "cert", ".", "get_subject", "(", ")", ".", "CN", "!=", "'echo-api.amazon.com'", ":", "log", ".", "error", "(", "'certificate expired or invalid'", ")", "return", "_cache", "[", "cert_url", "]", "=", "cert", "return", "cert" ]
Download and validate a specified Amazon PEM file.
[ "Download", "and", "validate", "a", "specified", "Amazon", "PEM", "file", "." ]
train
https://github.com/erik/alexandra/blob/8bea94efa1af465254a553dc4dfea3fa552b18da/alexandra/util.py#L149-L183
camptocamp/marabunta
marabunta/model.py
Version.is_processed
def is_processed(self, db_versions): """Check if version is already applied in the database. :param db_versions: """ return self.number in (v.number for v in db_versions if v.date_done)
python
def is_processed(self, db_versions): """Check if version is already applied in the database. :param db_versions: """ return self.number in (v.number for v in db_versions if v.date_done)
[ "def", "is_processed", "(", "self", ",", "db_versions", ")", ":", "return", "self", ".", "number", "in", "(", "v", ".", "number", "for", "v", "in", "db_versions", "if", "v", ".", "date_done", ")" ]
Check if version is already applied in the database. :param db_versions:
[ "Check", "if", "version", "is", "already", "applied", "in", "the", "database", "." ]
train
https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/model.py#L123-L128
camptocamp/marabunta
marabunta/model.py
Version.is_noop
def is_noop(self): """Check if version is a no operation version. """ has_operations = [mode.pre_operations or mode.post_operations for mode in self._version_modes.values()] has_upgrade_addons = [mode.upgrade_addons or mode.remove_addons for mode in self._version_modes.values()] noop = not any((has_upgrade_addons, has_operations)) return noop
python
def is_noop(self): """Check if version is a no operation version. """ has_operations = [mode.pre_operations or mode.post_operations for mode in self._version_modes.values()] has_upgrade_addons = [mode.upgrade_addons or mode.remove_addons for mode in self._version_modes.values()] noop = not any((has_upgrade_addons, has_operations)) return noop
[ "def", "is_noop", "(", "self", ")", ":", "has_operations", "=", "[", "mode", ".", "pre_operations", "or", "mode", ".", "post_operations", "for", "mode", "in", "self", ".", "_version_modes", ".", "values", "(", ")", "]", "has_upgrade_addons", "=", "[", "mode", ".", "upgrade_addons", "or", "mode", ".", "remove_addons", "for", "mode", "in", "self", ".", "_version_modes", ".", "values", "(", ")", "]", "noop", "=", "not", "any", "(", "(", "has_upgrade_addons", ",", "has_operations", ")", ")", "return", "noop" ]
Check if version is a no operation version.
[ "Check", "if", "version", "is", "a", "no", "operation", "version", "." ]
train
https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/model.py#L130-L138
camptocamp/marabunta
marabunta/model.py
Version._get_version_mode
def _get_version_mode(self, mode=None): """Return a VersionMode for a mode name. When the mode is None, we are working with the 'base' mode. """ version_mode = self._version_modes.get(mode) if not version_mode: version_mode = self._version_modes[mode] = VersionMode(name=mode) return version_mode
python
def _get_version_mode(self, mode=None): """Return a VersionMode for a mode name. When the mode is None, we are working with the 'base' mode. """ version_mode = self._version_modes.get(mode) if not version_mode: version_mode = self._version_modes[mode] = VersionMode(name=mode) return version_mode
[ "def", "_get_version_mode", "(", "self", ",", "mode", "=", "None", ")", ":", "version_mode", "=", "self", ".", "_version_modes", ".", "get", "(", "mode", ")", "if", "not", "version_mode", ":", "version_mode", "=", "self", ".", "_version_modes", "[", "mode", "]", "=", "VersionMode", "(", "name", "=", "mode", ")", "return", "version_mode" ]
Return a VersionMode for a mode name. When the mode is None, we are working with the 'base' mode.
[ "Return", "a", "VersionMode", "for", "a", "mode", "name", "." ]
train
https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/model.py#L145-L153
camptocamp/marabunta
marabunta/model.py
Version.add_operation
def add_operation(self, operation_type, operation, mode=None): """Add an operation to the version :param mode: Name of the mode in which the operation is executed :type mode: str :param operation_type: one of 'pre', 'post' :type operation_type: str :param operation: the operation to add :type operation: :class:`marabunta.model.Operation` """ version_mode = self._get_version_mode(mode=mode) if operation_type == 'pre': version_mode.add_pre(operation) elif operation_type == 'post': version_mode.add_post(operation) else: raise ConfigurationError( u"Type of operation must be 'pre' or 'post', got %s" % (operation_type,) )
python
def add_operation(self, operation_type, operation, mode=None): """Add an operation to the version :param mode: Name of the mode in which the operation is executed :type mode: str :param operation_type: one of 'pre', 'post' :type operation_type: str :param operation: the operation to add :type operation: :class:`marabunta.model.Operation` """ version_mode = self._get_version_mode(mode=mode) if operation_type == 'pre': version_mode.add_pre(operation) elif operation_type == 'post': version_mode.add_post(operation) else: raise ConfigurationError( u"Type of operation must be 'pre' or 'post', got %s" % (operation_type,) )
[ "def", "add_operation", "(", "self", ",", "operation_type", ",", "operation", ",", "mode", "=", "None", ")", ":", "version_mode", "=", "self", ".", "_get_version_mode", "(", "mode", "=", "mode", ")", "if", "operation_type", "==", "'pre'", ":", "version_mode", ".", "add_pre", "(", "operation", ")", "elif", "operation_type", "==", "'post'", ":", "version_mode", ".", "add_post", "(", "operation", ")", "else", ":", "raise", "ConfigurationError", "(", "u\"Type of operation must be 'pre' or 'post', got %s\"", "%", "(", "operation_type", ",", ")", ")" ]
Add an operation to the version :param mode: Name of the mode in which the operation is executed :type mode: str :param operation_type: one of 'pre', 'post' :type operation_type: str :param operation: the operation to add :type operation: :class:`marabunta.model.Operation`
[ "Add", "an", "operation", "to", "the", "version" ]
train
https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/model.py#L155-L174
camptocamp/marabunta
marabunta/model.py
Version.add_backup_operation
def add_backup_operation(self, backup, mode=None): """Add a backup operation to the version. :param backup: To either add or skip the backup :type backup: Boolean :param mode: Name of the mode in which the operation is executed For now, backups are mode-independent :type mode: String """ try: if self.options.backup: self.options.backup.ignore_if_operation().execute() except OperationError: self.backup = backup
python
def add_backup_operation(self, backup, mode=None): """Add a backup operation to the version. :param backup: To either add or skip the backup :type backup: Boolean :param mode: Name of the mode in which the operation is executed For now, backups are mode-independent :type mode: String """ try: if self.options.backup: self.options.backup.ignore_if_operation().execute() except OperationError: self.backup = backup
[ "def", "add_backup_operation", "(", "self", ",", "backup", ",", "mode", "=", "None", ")", ":", "try", ":", "if", "self", ".", "options", ".", "backup", ":", "self", ".", "options", ".", "backup", ".", "ignore_if_operation", "(", ")", ".", "execute", "(", ")", "except", "OperationError", ":", "self", ".", "backup", "=", "backup" ]
Add a backup operation to the version. :param backup: To either add or skip the backup :type backup: Boolean :param mode: Name of the mode in which the operation is executed For now, backups are mode-independent :type mode: String
[ "Add", "a", "backup", "operation", "to", "the", "version", "." ]
train
https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/model.py#L176-L189
camptocamp/marabunta
marabunta/model.py
Version.pre_operations
def pre_operations(self, mode=None): """ Return pre-operations only for the mode asked """ version_mode = self._get_version_mode(mode=mode) return version_mode.pre_operations
python
def pre_operations(self, mode=None): """ Return pre-operations only for the mode asked """ version_mode = self._get_version_mode(mode=mode) return version_mode.pre_operations
[ "def", "pre_operations", "(", "self", ",", "mode", "=", "None", ")", ":", "version_mode", "=", "self", ".", "_get_version_mode", "(", "mode", "=", "mode", ")", "return", "version_mode", ".", "pre_operations" ]
Return pre-operations only for the mode asked
[ "Return", "pre", "-", "operations", "only", "for", "the", "mode", "asked" ]
train
https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/model.py#L204-L207
camptocamp/marabunta
marabunta/model.py
Version.post_operations
def post_operations(self, mode=None): """ Return post-operations only for the mode asked """ version_mode = self._get_version_mode(mode=mode) return version_mode.post_operations
python
def post_operations(self, mode=None): """ Return post-operations only for the mode asked """ version_mode = self._get_version_mode(mode=mode) return version_mode.post_operations
[ "def", "post_operations", "(", "self", ",", "mode", "=", "None", ")", ":", "version_mode", "=", "self", ".", "_get_version_mode", "(", "mode", "=", "mode", ")", "return", "version_mode", ".", "post_operations" ]
Return post-operations only for the mode asked
[ "Return", "post", "-", "operations", "only", "for", "the", "mode", "asked" ]
train
https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/model.py#L209-L212
camptocamp/marabunta
marabunta/model.py
Version.upgrade_addons_operation
def upgrade_addons_operation(self, addons_state, mode=None): """ Return merged set of main addons and mode's addons """ installed = set(a.name for a in addons_state if a.state in ('installed', 'to upgrade')) base_mode = self._get_version_mode() addons_list = base_mode.upgrade_addons.copy() if mode: add_mode = self._get_version_mode(mode=mode) addons_list |= add_mode.upgrade_addons to_install = addons_list - installed to_upgrade = installed & addons_list return UpgradeAddonsOperation(self.options, to_install, to_upgrade)
python
def upgrade_addons_operation(self, addons_state, mode=None): """ Return merged set of main addons and mode's addons """ installed = set(a.name for a in addons_state if a.state in ('installed', 'to upgrade')) base_mode = self._get_version_mode() addons_list = base_mode.upgrade_addons.copy() if mode: add_mode = self._get_version_mode(mode=mode) addons_list |= add_mode.upgrade_addons to_install = addons_list - installed to_upgrade = installed & addons_list return UpgradeAddonsOperation(self.options, to_install, to_upgrade)
[ "def", "upgrade_addons_operation", "(", "self", ",", "addons_state", ",", "mode", "=", "None", ")", ":", "installed", "=", "set", "(", "a", ".", "name", "for", "a", "in", "addons_state", "if", "a", ".", "state", "in", "(", "'installed'", ",", "'to upgrade'", ")", ")", "base_mode", "=", "self", ".", "_get_version_mode", "(", ")", "addons_list", "=", "base_mode", ".", "upgrade_addons", ".", "copy", "(", ")", "if", "mode", ":", "add_mode", "=", "self", ".", "_get_version_mode", "(", "mode", "=", "mode", ")", "addons_list", "|=", "add_mode", ".", "upgrade_addons", "to_install", "=", "addons_list", "-", "installed", "to_upgrade", "=", "installed", "&", "addons_list", "return", "UpgradeAddonsOperation", "(", "self", ".", "options", ",", "to_install", ",", "to_upgrade", ")" ]
Return merged set of main addons and mode's addons
[ "Return", "merged", "set", "of", "main", "addons", "and", "mode", "s", "addons" ]
train
https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/model.py#L214-L228
cimm-kzn/CGRtools
CGRtools/containers/reaction.py
ReactionContainer.copy
def copy(self): """ get copy of object :return: ReactionContainer """ return type(self)(reagents=[x.copy() for x in self.__reagents], meta=self.__meta.copy(), products=[x.copy() for x in self.__products], reactants=[x.copy() for x in self.__reactants])
python
def copy(self): """ get copy of object :return: ReactionContainer """ return type(self)(reagents=[x.copy() for x in self.__reagents], meta=self.__meta.copy(), products=[x.copy() for x in self.__products], reactants=[x.copy() for x in self.__reactants])
[ "def", "copy", "(", "self", ")", ":", "return", "type", "(", "self", ")", "(", "reagents", "=", "[", "x", ".", "copy", "(", ")", "for", "x", "in", "self", ".", "__reagents", "]", ",", "meta", "=", "self", ".", "__meta", ".", "copy", "(", ")", ",", "products", "=", "[", "x", ".", "copy", "(", ")", "for", "x", "in", "self", ".", "__products", "]", ",", "reactants", "=", "[", "x", ".", "copy", "(", ")", "for", "x", "in", "self", ".", "__reactants", "]", ")" ]
get copy of object :return: ReactionContainer
[ "get", "copy", "of", "object" ]
train
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/containers/reaction.py#L135-L143
cimm-kzn/CGRtools
CGRtools/containers/reaction.py
ReactionContainer.implicify_hydrogens
def implicify_hydrogens(self): """ remove explicit hydrogens if possible :return: number of removed hydrogens """ total = 0 for ml in (self.__reagents, self.__reactants, self.__products): for m in ml: if hasattr(m, 'implicify_hydrogens'): total += m.implicify_hydrogens() if total: self.flush_cache() return total
python
def implicify_hydrogens(self): """ remove explicit hydrogens if possible :return: number of removed hydrogens """ total = 0 for ml in (self.__reagents, self.__reactants, self.__products): for m in ml: if hasattr(m, 'implicify_hydrogens'): total += m.implicify_hydrogens() if total: self.flush_cache() return total
[ "def", "implicify_hydrogens", "(", "self", ")", ":", "total", "=", "0", "for", "ml", "in", "(", "self", ".", "__reagents", ",", "self", ".", "__reactants", ",", "self", ".", "__products", ")", ":", "for", "m", "in", "ml", ":", "if", "hasattr", "(", "m", ",", "'implicify_hydrogens'", ")", ":", "total", "+=", "m", ".", "implicify_hydrogens", "(", ")", "if", "total", ":", "self", ".", "flush_cache", "(", ")", "return", "total" ]
remove explicit hydrogens if possible :return: number of removed hydrogens
[ "remove", "explicit", "hydrogens", "if", "possible" ]
train
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/containers/reaction.py#L145-L158
cimm-kzn/CGRtools
CGRtools/containers/reaction.py
ReactionContainer.reset_query_marks
def reset_query_marks(self): """ set or reset hyb and neighbors marks to atoms. """ for ml in (self.__reagents, self.__reactants, self.__products): for m in ml: if hasattr(m, 'reset_query_marks'): m.reset_query_marks() self.flush_cache()
python
def reset_query_marks(self): """ set or reset hyb and neighbors marks to atoms. """ for ml in (self.__reagents, self.__reactants, self.__products): for m in ml: if hasattr(m, 'reset_query_marks'): m.reset_query_marks() self.flush_cache()
[ "def", "reset_query_marks", "(", "self", ")", ":", "for", "ml", "in", "(", "self", ".", "__reagents", ",", "self", ".", "__reactants", ",", "self", ".", "__products", ")", ":", "for", "m", "in", "ml", ":", "if", "hasattr", "(", "m", ",", "'reset_query_marks'", ")", ":", "m", ".", "reset_query_marks", "(", ")", "self", ".", "flush_cache", "(", ")" ]
set or reset hyb and neighbors marks to atoms.
[ "set", "or", "reset", "hyb", "and", "neighbors", "marks", "to", "atoms", "." ]
train
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/containers/reaction.py#L207-L215
cimm-kzn/CGRtools
CGRtools/containers/reaction.py
ReactionContainer.compose
def compose(self): """ get CGR of reaction reagents will be presented as unchanged molecules :return: CGRContainer """ rr = self.__reagents + self.__reactants if rr: if not all(isinstance(x, (MoleculeContainer, CGRContainer)) for x in rr): raise TypeError('Queries not composable') r = reduce(or_, rr) else: r = MoleculeContainer() if self.__products: if not all(isinstance(x, (MoleculeContainer, CGRContainer)) for x in self.__products): raise TypeError('Queries not composable') p = reduce(or_, self.__products) else: p = MoleculeContainer() return r ^ p
python
def compose(self): """ get CGR of reaction reagents will be presented as unchanged molecules :return: CGRContainer """ rr = self.__reagents + self.__reactants if rr: if not all(isinstance(x, (MoleculeContainer, CGRContainer)) for x in rr): raise TypeError('Queries not composable') r = reduce(or_, rr) else: r = MoleculeContainer() if self.__products: if not all(isinstance(x, (MoleculeContainer, CGRContainer)) for x in self.__products): raise TypeError('Queries not composable') p = reduce(or_, self.__products) else: p = MoleculeContainer() return r ^ p
[ "def", "compose", "(", "self", ")", ":", "rr", "=", "self", ".", "__reagents", "+", "self", ".", "__reactants", "if", "rr", ":", "if", "not", "all", "(", "isinstance", "(", "x", ",", "(", "MoleculeContainer", ",", "CGRContainer", ")", ")", "for", "x", "in", "rr", ")", ":", "raise", "TypeError", "(", "'Queries not composable'", ")", "r", "=", "reduce", "(", "or_", ",", "rr", ")", "else", ":", "r", "=", "MoleculeContainer", "(", ")", "if", "self", ".", "__products", ":", "if", "not", "all", "(", "isinstance", "(", "x", ",", "(", "MoleculeContainer", ",", "CGRContainer", ")", ")", "for", "x", "in", "self", ".", "__products", ")", ":", "raise", "TypeError", "(", "'Queries not composable'", ")", "p", "=", "reduce", "(", "or_", ",", "self", ".", "__products", ")", "else", ":", "p", "=", "MoleculeContainer", "(", ")", "return", "r", "^", "p" ]
get CGR of reaction reagents will be presented as unchanged molecules :return: CGRContainer
[ "get", "CGR", "of", "reaction" ]
train
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/containers/reaction.py#L218-L238
cimm-kzn/CGRtools
CGRtools/containers/reaction.py
ReactionContainer.calculate2d
def calculate2d(self, force=True): """ recalculate 2d coordinates. currently rings can be calculated badly. :param force: ignore existing coordinates of atoms """ for ml in (self.__reagents, self.__reactants, self.__products): for m in ml: m.calculate2d(force) self.fix_positions()
python
def calculate2d(self, force=True): """ recalculate 2d coordinates. currently rings can be calculated badly. :param force: ignore existing coordinates of atoms """ for ml in (self.__reagents, self.__reactants, self.__products): for m in ml: m.calculate2d(force) self.fix_positions()
[ "def", "calculate2d", "(", "self", ",", "force", "=", "True", ")", ":", "for", "ml", "in", "(", "self", ".", "__reagents", ",", "self", ".", "__reactants", ",", "self", ".", "__products", ")", ":", "for", "m", "in", "ml", ":", "m", ".", "calculate2d", "(", "force", ")", "self", ".", "fix_positions", "(", ")" ]
recalculate 2d coordinates. currently rings can be calculated badly. :param force: ignore existing coordinates of atoms
[ "recalculate", "2d", "coordinates", ".", "currently", "rings", "can", "be", "calculated", "badly", "." ]
train
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/containers/reaction.py#L246-L255
cimm-kzn/CGRtools
CGRtools/containers/reaction.py
ReactionContainer.fix_positions
def fix_positions(self): """ fix coordinates of molecules in reaction """ shift_x = 0 for m in self.__reactants: max_x = self.__fix_positions(m, shift_x, 0) shift_x = max_x + 1 arrow_min = shift_x if self.__reagents: for m in self.__reagents: max_x = self.__fix_positions(m, shift_x, 1.5) shift_x = max_x + 1 else: shift_x += 3 arrow_max = shift_x - 1 for m in self.__products: max_x = self.__fix_positions(m, shift_x, 0) shift_x = max_x + 1 self._arrow = (arrow_min, arrow_max) self.flush_cache()
python
def fix_positions(self): """ fix coordinates of molecules in reaction """ shift_x = 0 for m in self.__reactants: max_x = self.__fix_positions(m, shift_x, 0) shift_x = max_x + 1 arrow_min = shift_x if self.__reagents: for m in self.__reagents: max_x = self.__fix_positions(m, shift_x, 1.5) shift_x = max_x + 1 else: shift_x += 3 arrow_max = shift_x - 1 for m in self.__products: max_x = self.__fix_positions(m, shift_x, 0) shift_x = max_x + 1 self._arrow = (arrow_min, arrow_max) self.flush_cache()
[ "def", "fix_positions", "(", "self", ")", ":", "shift_x", "=", "0", "for", "m", "in", "self", ".", "__reactants", ":", "max_x", "=", "self", ".", "__fix_positions", "(", "m", ",", "shift_x", ",", "0", ")", "shift_x", "=", "max_x", "+", "1", "arrow_min", "=", "shift_x", "if", "self", ".", "__reagents", ":", "for", "m", "in", "self", ".", "__reagents", ":", "max_x", "=", "self", ".", "__fix_positions", "(", "m", ",", "shift_x", ",", "1.5", ")", "shift_x", "=", "max_x", "+", "1", "else", ":", "shift_x", "+=", "3", "arrow_max", "=", "shift_x", "-", "1", "for", "m", "in", "self", ".", "__products", ":", "max_x", "=", "self", ".", "__fix_positions", "(", "m", ",", "shift_x", ",", "0", ")", "shift_x", "=", "max_x", "+", "1", "self", ".", "_arrow", "=", "(", "arrow_min", ",", "arrow_max", ")", "self", ".", "flush_cache", "(", ")" ]
fix coordinates of molecules in reaction
[ "fix", "coordinates", "of", "molecules", "in", "reaction" ]
train
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/containers/reaction.py#L257-L279
zetaops/zengine
zengine/models/auth.py
Unit.get_role_keys
def get_role_keys(cls, unit_key): """ :param unit_key: Parent unit key :return: role keys of subunits """ stack = Role.objects.filter(unit_id=unit_key).values_list('key', flatten=True) for unit_key in cls.objects.filter(parent_id=unit_key).values_list('key', flatten=True): stack.extend(cls.get_role_keys(unit_key)) return stack
python
def get_role_keys(cls, unit_key): """ :param unit_key: Parent unit key :return: role keys of subunits """ stack = Role.objects.filter(unit_id=unit_key).values_list('key', flatten=True) for unit_key in cls.objects.filter(parent_id=unit_key).values_list('key', flatten=True): stack.extend(cls.get_role_keys(unit_key)) return stack
[ "def", "get_role_keys", "(", "cls", ",", "unit_key", ")", ":", "stack", "=", "Role", ".", "objects", ".", "filter", "(", "unit_id", "=", "unit_key", ")", ".", "values_list", "(", "'key'", ",", "flatten", "=", "True", ")", "for", "unit_key", "in", "cls", ".", "objects", ".", "filter", "(", "parent_id", "=", "unit_key", ")", ".", "values_list", "(", "'key'", ",", "flatten", "=", "True", ")", ":", "stack", ".", "extend", "(", "cls", ".", "get_role_keys", "(", "unit_key", ")", ")", "return", "stack" ]
:param unit_key: Parent unit key :return: role keys of subunits
[ ":", "param", "unit_key", ":", "Parent", "unit", "key", ":", "return", ":", "role", "keys", "of", "subunits" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/models/auth.py#L44-L52
zetaops/zengine
zengine/models/auth.py
User.get_permissions
def get_permissions(self): """ Permissions of the user. Returns: List of Permission objects. """ user_role = self.last_login_role() if self.last_login_role_key else self.role_set[0].role return user_role.get_permissions()
python
def get_permissions(self): """ Permissions of the user. Returns: List of Permission objects. """ user_role = self.last_login_role() if self.last_login_role_key else self.role_set[0].role return user_role.get_permissions()
[ "def", "get_permissions", "(", "self", ")", ":", "user_role", "=", "self", ".", "last_login_role", "(", ")", "if", "self", ".", "last_login_role_key", "else", "self", ".", "role_set", "[", "0", "]", ".", "role", "return", "user_role", ".", "get_permissions", "(", ")" ]
Permissions of the user. Returns: List of Permission objects.
[ "Permissions", "of", "the", "user", "." ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/models/auth.py#L119-L127
zetaops/zengine
zengine/models/auth.py
AbstractRole.get_permissions
def get_permissions(self): """ Soyut role ait Permission nesnelerini bulur ve code değerlerini döner. Returns: list: Permission code değerleri """ return [p.permission.code for p in self.Permissions if p.permission.code]
python
def get_permissions(self): """ Soyut role ait Permission nesnelerini bulur ve code değerlerini döner. Returns: list: Permission code değerleri """ return [p.permission.code for p in self.Permissions if p.permission.code]
[ "def", "get_permissions", "(", "self", ")", ":", "return", "[", "p", ".", "permission", ".", "code", "for", "p", "in", "self", ".", "Permissions", "if", "p", ".", "permission", ".", "code", "]" ]
Soyut role ait Permission nesnelerini bulur ve code değerlerini döner. Returns: list: Permission code değerleri
[ "Soyut", "role", "ait", "Permission", "nesnelerini", "bulur", "ve", "code", "değerlerini", "döner", "." ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/models/auth.py#L156-L165
zetaops/zengine
zengine/models/auth.py
AbstractRole.add_permission
def add_permission(self, perm): """ Soyut Role Permission nesnesi tanımlamayı sağlar. Args: perm (object): """ self.Permissions(permission=perm) PermissionCache.flush() self.save()
python
def add_permission(self, perm): """ Soyut Role Permission nesnesi tanımlamayı sağlar. Args: perm (object): """ self.Permissions(permission=perm) PermissionCache.flush() self.save()
[ "def", "add_permission", "(", "self", ",", "perm", ")", ":", "self", ".", "Permissions", "(", "permission", "=", "perm", ")", "PermissionCache", ".", "flush", "(", ")", "self", ".", "save", "(", ")" ]
Soyut Role Permission nesnesi tanımlamayı sağlar. Args: perm (object):
[ "Soyut", "Role", "Permission", "nesnesi", "tanımlamayı", "sağlar", "." ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/models/auth.py#L167-L177
zetaops/zengine
zengine/models/auth.py
Role.add_permission_by_name
def add_permission_by_name(self, code, save=False): """ Adds a permission with given name. Args: code (str): Code name of the permission. save (bool): If False, does nothing. """ if not save: return ["%s | %s" % (p.name, p.code) for p in Permission.objects.filter(code__contains=code)] for p in Permission.objects.filter(code__contains=code): if p not in self.Permissions: self.Permissions(permission=p) if p: self.save()
python
def add_permission_by_name(self, code, save=False): """ Adds a permission with given name. Args: code (str): Code name of the permission. save (bool): If False, does nothing. """ if not save: return ["%s | %s" % (p.name, p.code) for p in Permission.objects.filter(code__contains=code)] for p in Permission.objects.filter(code__contains=code): if p not in self.Permissions: self.Permissions(permission=p) if p: self.save()
[ "def", "add_permission_by_name", "(", "self", ",", "code", ",", "save", "=", "False", ")", ":", "if", "not", "save", ":", "return", "[", "\"%s | %s\"", "%", "(", "p", ".", "name", ",", "p", ".", "code", ")", "for", "p", "in", "Permission", ".", "objects", ".", "filter", "(", "code__contains", "=", "code", ")", "]", "for", "p", "in", "Permission", ".", "objects", ".", "filter", "(", "code__contains", "=", "code", ")", ":", "if", "p", "not", "in", "self", ".", "Permissions", ":", "self", ".", "Permissions", "(", "permission", "=", "p", ")", "if", "p", ":", "self", ".", "save", "(", ")" ]
Adds a permission with given name. Args: code (str): Code name of the permission. save (bool): If False, does nothing.
[ "Adds", "a", "permission", "with", "given", "name", "." ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/models/auth.py#L267-L282
zetaops/zengine
zengine/models/auth.py
Role.send_notification
def send_notification(self, title, message, typ=1, url=None, sender=None): """ sends a message to user of this role's private mq exchange """ self.user.send_notification(title=title, message=message, typ=typ, url=url, sender=sender)
python
def send_notification(self, title, message, typ=1, url=None, sender=None): """ sends a message to user of this role's private mq exchange """ self.user.send_notification(title=title, message=message, typ=typ, url=url, sender=sender)
[ "def", "send_notification", "(", "self", ",", "title", ",", "message", ",", "typ", "=", "1", ",", "url", "=", "None", ",", "sender", "=", "None", ")", ":", "self", ".", "user", ".", "send_notification", "(", "title", "=", "title", ",", "message", "=", "message", ",", "typ", "=", "typ", ",", "url", "=", "url", ",", "sender", "=", "sender", ")" ]
sends a message to user of this role's private mq exchange
[ "sends", "a", "message", "to", "user", "of", "this", "role", "s", "private", "mq", "exchange" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/models/auth.py#L284-L290
LordDarkula/chess_py
chess_py/core/algebraic/move.py
Move.would_move_be_promotion
def would_move_be_promotion(self): """ Finds if move from current location would be a promotion """ return (self._end_loc.rank == 0 and not self.color) or \ (self._end_loc.rank == 7 and self.color)
python
def would_move_be_promotion(self): """ Finds if move from current location would be a promotion """ return (self._end_loc.rank == 0 and not self.color) or \ (self._end_loc.rank == 7 and self.color)
[ "def", "would_move_be_promotion", "(", "self", ")", ":", "return", "(", "self", ".", "_end_loc", ".", "rank", "==", "0", "and", "not", "self", ".", "color", ")", "or", "(", "self", ".", "_end_loc", ".", "rank", "==", "7", "and", "self", ".", "color", ")" ]
Finds if move from current location would be a promotion
[ "Finds", "if", "move", "from", "current", "location", "would", "be", "a", "promotion" ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/core/algebraic/move.py#L120-L125
zetaops/zengine
zengine/dispatch/dispatcher.py
Signal.connect
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None): """ Connect receiver to sender for signal. Arguments: receiver A function or an instance method which is to receive signals. Receivers must be hashable objects. If weak is True, then receiver must be weak referenceable. Receivers must be able to accept keyword arguments. If a receiver is connected with a dispatch_uid argument, it will not be added if another receiver was already connected with that dispatch_uid. sender The sender to which the receiver should respond. Must either be of type Signal, or None to receive events from any sender. weak Whether to use weak references to the receiver. By default, the module will attempt to use weak references to the receiver objects. If this parameter is false, then strong references will be used. dispatch_uid An identifier used to uniquely identify a particular instance of a receiver. This will usually be a string, though it may be anything hashable. """ if dispatch_uid: lookup_key = (dispatch_uid, _make_id(sender)) else: lookup_key = (_make_id(receiver), _make_id(sender)) if weak: ref = weakref.ref receiver_object = receiver # Check for bound methods if hasattr(receiver, '__self__') and hasattr(receiver, '__func__'): ref = WeakMethod receiver_object = receiver.__self__ if six.PY3: receiver = ref(receiver) weakref.finalize(receiver_object, self._remove_receiver) else: receiver = ref(receiver, self._remove_receiver) with self.lock: self._clear_dead_receivers() for r_key, _ in self.receivers: if r_key == lookup_key: break else: self.receivers.append((lookup_key, receiver)) self.sender_receivers_cache.clear()
python
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None): """ Connect receiver to sender for signal. Arguments: receiver A function or an instance method which is to receive signals. Receivers must be hashable objects. If weak is True, then receiver must be weak referenceable. Receivers must be able to accept keyword arguments. If a receiver is connected with a dispatch_uid argument, it will not be added if another receiver was already connected with that dispatch_uid. sender The sender to which the receiver should respond. Must either be of type Signal, or None to receive events from any sender. weak Whether to use weak references to the receiver. By default, the module will attempt to use weak references to the receiver objects. If this parameter is false, then strong references will be used. dispatch_uid An identifier used to uniquely identify a particular instance of a receiver. This will usually be a string, though it may be anything hashable. """ if dispatch_uid: lookup_key = (dispatch_uid, _make_id(sender)) else: lookup_key = (_make_id(receiver), _make_id(sender)) if weak: ref = weakref.ref receiver_object = receiver # Check for bound methods if hasattr(receiver, '__self__') and hasattr(receiver, '__func__'): ref = WeakMethod receiver_object = receiver.__self__ if six.PY3: receiver = ref(receiver) weakref.finalize(receiver_object, self._remove_receiver) else: receiver = ref(receiver, self._remove_receiver) with self.lock: self._clear_dead_receivers() for r_key, _ in self.receivers: if r_key == lookup_key: break else: self.receivers.append((lookup_key, receiver)) self.sender_receivers_cache.clear()
[ "def", "connect", "(", "self", ",", "receiver", ",", "sender", "=", "None", ",", "weak", "=", "True", ",", "dispatch_uid", "=", "None", ")", ":", "if", "dispatch_uid", ":", "lookup_key", "=", "(", "dispatch_uid", ",", "_make_id", "(", "sender", ")", ")", "else", ":", "lookup_key", "=", "(", "_make_id", "(", "receiver", ")", ",", "_make_id", "(", "sender", ")", ")", "if", "weak", ":", "ref", "=", "weakref", ".", "ref", "receiver_object", "=", "receiver", "# Check for bound methods", "if", "hasattr", "(", "receiver", ",", "'__self__'", ")", "and", "hasattr", "(", "receiver", ",", "'__func__'", ")", ":", "ref", "=", "WeakMethod", "receiver_object", "=", "receiver", ".", "__self__", "if", "six", ".", "PY3", ":", "receiver", "=", "ref", "(", "receiver", ")", "weakref", ".", "finalize", "(", "receiver_object", ",", "self", ".", "_remove_receiver", ")", "else", ":", "receiver", "=", "ref", "(", "receiver", ",", "self", ".", "_remove_receiver", ")", "with", "self", ".", "lock", ":", "self", ".", "_clear_dead_receivers", "(", ")", "for", "r_key", ",", "_", "in", "self", ".", "receivers", ":", "if", "r_key", "==", "lookup_key", ":", "break", "else", ":", "self", ".", "receivers", ".", "append", "(", "(", "lookup_key", ",", "receiver", ")", ")", "self", ".", "sender_receivers_cache", ".", "clear", "(", ")" ]
Connect receiver to sender for signal. Arguments: receiver A function or an instance method which is to receive signals. Receivers must be hashable objects. If weak is True, then receiver must be weak referenceable. Receivers must be able to accept keyword arguments. If a receiver is connected with a dispatch_uid argument, it will not be added if another receiver was already connected with that dispatch_uid. sender The sender to which the receiver should respond. Must either be of type Signal, or None to receive events from any sender. weak Whether to use weak references to the receiver. By default, the module will attempt to use weak references to the receiver objects. If this parameter is false, then strong references will be used. dispatch_uid An identifier used to uniquely identify a particular instance of a receiver. This will usually be a string, though it may be anything hashable.
[ "Connect", "receiver", "to", "sender", "for", "signal", "." ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/dispatch/dispatcher.py#L56-L114
zetaops/zengine
zengine/dispatch/dispatcher.py
Signal.disconnect
def disconnect(self, receiver=None, sender=None, dispatch_uid=None): """ Disconnect receiver from sender for signal. If weak references are used, disconnect need not be called. The receiver will be remove from dispatch automatically. Arguments: receiver The registered receiver to disconnect. May be none if dispatch_uid is specified. sender The registered sender to disconnect dispatch_uid the unique identifier of the receiver to disconnect """ if dispatch_uid: lookup_key = (dispatch_uid, _make_id(sender)) else: lookup_key = (_make_id(receiver), _make_id(sender)) disconnected = False with self.lock: self._clear_dead_receivers() for index in range(len(self.receivers)): (r_key, _) = self.receivers[index] if r_key == lookup_key: disconnected = True del self.receivers[index] break self.sender_receivers_cache.clear() return disconnected
python
def disconnect(self, receiver=None, sender=None, dispatch_uid=None): """ Disconnect receiver from sender for signal. If weak references are used, disconnect need not be called. The receiver will be remove from dispatch automatically. Arguments: receiver The registered receiver to disconnect. May be none if dispatch_uid is specified. sender The registered sender to disconnect dispatch_uid the unique identifier of the receiver to disconnect """ if dispatch_uid: lookup_key = (dispatch_uid, _make_id(sender)) else: lookup_key = (_make_id(receiver), _make_id(sender)) disconnected = False with self.lock: self._clear_dead_receivers() for index in range(len(self.receivers)): (r_key, _) = self.receivers[index] if r_key == lookup_key: disconnected = True del self.receivers[index] break self.sender_receivers_cache.clear() return disconnected
[ "def", "disconnect", "(", "self", ",", "receiver", "=", "None", ",", "sender", "=", "None", ",", "dispatch_uid", "=", "None", ")", ":", "if", "dispatch_uid", ":", "lookup_key", "=", "(", "dispatch_uid", ",", "_make_id", "(", "sender", ")", ")", "else", ":", "lookup_key", "=", "(", "_make_id", "(", "receiver", ")", ",", "_make_id", "(", "sender", ")", ")", "disconnected", "=", "False", "with", "self", ".", "lock", ":", "self", ".", "_clear_dead_receivers", "(", ")", "for", "index", "in", "range", "(", "len", "(", "self", ".", "receivers", ")", ")", ":", "(", "r_key", ",", "_", ")", "=", "self", ".", "receivers", "[", "index", "]", "if", "r_key", "==", "lookup_key", ":", "disconnected", "=", "True", "del", "self", ".", "receivers", "[", "index", "]", "break", "self", ".", "sender_receivers_cache", ".", "clear", "(", ")", "return", "disconnected" ]
Disconnect receiver from sender for signal. If weak references are used, disconnect need not be called. The receiver will be remove from dispatch automatically. Arguments: receiver The registered receiver to disconnect. May be none if dispatch_uid is specified. sender The registered sender to disconnect dispatch_uid the unique identifier of the receiver to disconnect
[ "Disconnect", "receiver", "from", "sender", "for", "signal", "." ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/dispatch/dispatcher.py#L116-L151
camptocamp/marabunta
marabunta/core.py
migrate
def migrate(config): """Perform a migration according to config. :param config: The configuration to be applied :type config: Config """ webapp = WebApp(config.web_host, config.web_port, custom_maintenance_file=config.web_custom_html) webserver = WebServer(webapp) webserver.daemon = True webserver.start() migration_parser = YamlParser.parse_from_file(config.migration_file) migration = migration_parser.parse() database = Database(config) with database.connect() as lock_connection: application_lock = ApplicationLock(lock_connection) application_lock.start() while not application_lock.acquired: time.sleep(0.5) else: if application_lock.replica: # when a replica could finally acquire a lock, it # means that the concurrent process has finished the # migration or that it failed to run it. # In both cases after the lock is released, this process will # verify if it has still to do something (if the other process # failed mainly). application_lock.stop = True application_lock.join() # we are not in the replica or the lock is released: go on for the # migration try: table = MigrationTable(database) runner = Runner(config, migration, database, table) runner.perform() finally: application_lock.stop = True application_lock.join()
python
def migrate(config): """Perform a migration according to config. :param config: The configuration to be applied :type config: Config """ webapp = WebApp(config.web_host, config.web_port, custom_maintenance_file=config.web_custom_html) webserver = WebServer(webapp) webserver.daemon = True webserver.start() migration_parser = YamlParser.parse_from_file(config.migration_file) migration = migration_parser.parse() database = Database(config) with database.connect() as lock_connection: application_lock = ApplicationLock(lock_connection) application_lock.start() while not application_lock.acquired: time.sleep(0.5) else: if application_lock.replica: # when a replica could finally acquire a lock, it # means that the concurrent process has finished the # migration or that it failed to run it. # In both cases after the lock is released, this process will # verify if it has still to do something (if the other process # failed mainly). application_lock.stop = True application_lock.join() # we are not in the replica or the lock is released: go on for the # migration try: table = MigrationTable(database) runner = Runner(config, migration, database, table) runner.perform() finally: application_lock.stop = True application_lock.join()
[ "def", "migrate", "(", "config", ")", ":", "webapp", "=", "WebApp", "(", "config", ".", "web_host", ",", "config", ".", "web_port", ",", "custom_maintenance_file", "=", "config", ".", "web_custom_html", ")", "webserver", "=", "WebServer", "(", "webapp", ")", "webserver", ".", "daemon", "=", "True", "webserver", ".", "start", "(", ")", "migration_parser", "=", "YamlParser", ".", "parse_from_file", "(", "config", ".", "migration_file", ")", "migration", "=", "migration_parser", ".", "parse", "(", ")", "database", "=", "Database", "(", "config", ")", "with", "database", ".", "connect", "(", ")", "as", "lock_connection", ":", "application_lock", "=", "ApplicationLock", "(", "lock_connection", ")", "application_lock", ".", "start", "(", ")", "while", "not", "application_lock", ".", "acquired", ":", "time", ".", "sleep", "(", "0.5", ")", "else", ":", "if", "application_lock", ".", "replica", ":", "# when a replica could finally acquire a lock, it", "# means that the concurrent process has finished the", "# migration or that it failed to run it.", "# In both cases after the lock is released, this process will", "# verify if it has still to do something (if the other process", "# failed mainly).", "application_lock", ".", "stop", "=", "True", "application_lock", ".", "join", "(", ")", "# we are not in the replica or the lock is released: go on for the", "# migration", "try", ":", "table", "=", "MigrationTable", "(", "database", ")", "runner", "=", "Runner", "(", "config", ",", "migration", ",", "database", ",", "table", ")", "runner", ".", "perform", "(", ")", "finally", ":", "application_lock", ".", "stop", "=", "True", "application_lock", ".", "join", "(", ")" ]
Perform a migration according to config. :param config: The configuration to be applied :type config: Config
[ "Perform", "a", "migration", "according", "to", "config", "." ]
train
https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/core.py#L103-L146
camptocamp/marabunta
marabunta/core.py
main
def main(): """Parse the command line and run :func:`migrate`.""" parser = get_args_parser() args = parser.parse_args() config = Config.from_parse_args(args) migrate(config)
python
def main(): """Parse the command line and run :func:`migrate`.""" parser = get_args_parser() args = parser.parse_args() config = Config.from_parse_args(args) migrate(config)
[ "def", "main", "(", ")", ":", "parser", "=", "get_args_parser", "(", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "config", "=", "Config", ".", "from_parse_args", "(", "args", ")", "migrate", "(", "config", ")" ]
Parse the command line and run :func:`migrate`.
[ "Parse", "the", "command", "line", "and", "run", ":", "func", ":", "migrate", "." ]
train
https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/core.py#L149-L154
zetaops/zengine
zengine/views/crud.py
CrudMeta.get_permissions
def get_permissions(cls): """ Generates permissions for all CrudView based class methods. Returns: List of Permission objects. """ perms = [] for kls_name, kls in cls.registry.items(): for method_name in cls.__dict__.keys(): if method_name.endswith('_view'): perms.append("%s.%s" % (kls_name, method_name)) return perms
python
def get_permissions(cls): """ Generates permissions for all CrudView based class methods. Returns: List of Permission objects. """ perms = [] for kls_name, kls in cls.registry.items(): for method_name in cls.__dict__.keys(): if method_name.endswith('_view'): perms.append("%s.%s" % (kls_name, method_name)) return perms
[ "def", "get_permissions", "(", "cls", ")", ":", "perms", "=", "[", "]", "for", "kls_name", ",", "kls", "in", "cls", ".", "registry", ".", "items", "(", ")", ":", "for", "method_name", "in", "cls", ".", "__dict__", ".", "keys", "(", ")", ":", "if", "method_name", ".", "endswith", "(", "'_view'", ")", ":", "perms", ".", "append", "(", "\"%s.%s\"", "%", "(", "kls_name", ",", "method_name", ")", ")", "return", "perms" ]
Generates permissions for all CrudView based class methods. Returns: List of Permission objects.
[ "Generates", "permissions", "for", "all", "CrudView", "based", "class", "methods", "." ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/views/crud.py#L93-L105
zetaops/zengine
zengine/auth/permissions.py
_get_object_menu_models
def _get_object_menu_models(): """ we need to create basic permissions for only CRUD enabled models """ from pyoko.conf import settings enabled_models = [] for entry in settings.OBJECT_MENU.values(): for mdl in entry: if 'wf' not in mdl: enabled_models.append(mdl['name']) return enabled_models
python
def _get_object_menu_models(): """ we need to create basic permissions for only CRUD enabled models """ from pyoko.conf import settings enabled_models = [] for entry in settings.OBJECT_MENU.values(): for mdl in entry: if 'wf' not in mdl: enabled_models.append(mdl['name']) return enabled_models
[ "def", "_get_object_menu_models", "(", ")", ":", "from", "pyoko", ".", "conf", "import", "settings", "enabled_models", "=", "[", "]", "for", "entry", "in", "settings", ".", "OBJECT_MENU", ".", "values", "(", ")", ":", "for", "mdl", "in", "entry", ":", "if", "'wf'", "not", "in", "mdl", ":", "enabled_models", ".", "append", "(", "mdl", "[", "'name'", "]", ")", "return", "enabled_models" ]
we need to create basic permissions for only CRUD enabled models
[ "we", "need", "to", "create", "basic", "permissions", "for", "only", "CRUD", "enabled", "models" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/auth/permissions.py#L95-L106
zetaops/zengine
zengine/auth/permissions.py
CustomPermission.add
def add(cls, code_name, name='', description=''): """ create a custom permission """ if code_name not in cls.registry: cls.registry[code_name] = (code_name, name or code_name, description) return code_name
python
def add(cls, code_name, name='', description=''): """ create a custom permission """ if code_name not in cls.registry: cls.registry[code_name] = (code_name, name or code_name, description) return code_name
[ "def", "add", "(", "cls", ",", "code_name", ",", "name", "=", "''", ",", "description", "=", "''", ")", ":", "if", "code_name", "not", "in", "cls", ".", "registry", ":", "cls", ".", "registry", "[", "code_name", "]", "=", "(", "code_name", ",", "name", "or", "code_name", ",", "description", ")", "return", "code_name" ]
create a custom permission
[ "create", "a", "custom", "permission" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/auth/permissions.py#L27-L33
cimm-kzn/CGRtools
CGRtools/algorithms/isomorphism.py
Isomorphism.get_mapping
def get_mapping(self, other): """ get self to other mapping """ m = next(self._matcher(other).isomorphisms_iter(), None) if m: return {v: k for k, v in m.items()}
python
def get_mapping(self, other): """ get self to other mapping """ m = next(self._matcher(other).isomorphisms_iter(), None) if m: return {v: k for k, v in m.items()}
[ "def", "get_mapping", "(", "self", ",", "other", ")", ":", "m", "=", "next", "(", "self", ".", "_matcher", "(", "other", ")", ".", "isomorphisms_iter", "(", ")", ",", "None", ")", "if", "m", ":", "return", "{", "v", ":", "k", "for", "k", ",", "v", "in", "m", ".", "items", "(", ")", "}" ]
get self to other mapping
[ "get", "self", "to", "other", "mapping" ]
train
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/algorithms/isomorphism.py#L56-L62
cimm-kzn/CGRtools
CGRtools/algorithms/isomorphism.py
Isomorphism.get_substructure_mapping
def get_substructure_mapping(self, other, limit=1): """ get self to other substructure mapping :param limit: number of matches. if 0 return iterator for all possible; if 1 return dict or None; if > 1 return list of dicts """ i = self._matcher(other).subgraph_isomorphisms_iter() if limit == 1: m = next(i, None) if m: return {v: k for k, v in m.items()} return elif limit == 0: return ({v: k for k, v in m.items()} for m in i) return [{v: k for k, v in m.items()} for m in islice(i, limit)]
python
def get_substructure_mapping(self, other, limit=1): """ get self to other substructure mapping :param limit: number of matches. if 0 return iterator for all possible; if 1 return dict or None; if > 1 return list of dicts """ i = self._matcher(other).subgraph_isomorphisms_iter() if limit == 1: m = next(i, None) if m: return {v: k for k, v in m.items()} return elif limit == 0: return ({v: k for k, v in m.items()} for m in i) return [{v: k for k, v in m.items()} for m in islice(i, limit)]
[ "def", "get_substructure_mapping", "(", "self", ",", "other", ",", "limit", "=", "1", ")", ":", "i", "=", "self", ".", "_matcher", "(", "other", ")", ".", "subgraph_isomorphisms_iter", "(", ")", "if", "limit", "==", "1", ":", "m", "=", "next", "(", "i", ",", "None", ")", "if", "m", ":", "return", "{", "v", ":", "k", "for", "k", ",", "v", "in", "m", ".", "items", "(", ")", "}", "return", "elif", "limit", "==", "0", ":", "return", "(", "{", "v", ":", "k", "for", "k", ",", "v", "in", "m", ".", "items", "(", ")", "}", "for", "m", "in", "i", ")", "return", "[", "{", "v", ":", "k", "for", "k", ",", "v", "in", "m", ".", "items", "(", ")", "}", "for", "m", "in", "islice", "(", "i", ",", "limit", ")", "]" ]
get self to other substructure mapping :param limit: number of matches. if 0 return iterator for all possible; if 1 return dict or None; if > 1 return list of dicts
[ "get", "self", "to", "other", "substructure", "mapping" ]
train
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/algorithms/isomorphism.py#L64-L79
LordDarkula/chess_py
chess_py/core/algebraic/location.py
Location.from_string
def from_string(cls, alg_str): """ Creates a location from a two character string consisting of the file then rank written in algebraic notation. Examples: e4, b5, a7 :type: alg_str: str :rtype: Location """ try: return cls(int(alg_str[1]) - 1, ord(alg_str[0]) - 97) except ValueError as e: raise ValueError("Location.from_string {} invalid: {}".format(alg_str, e))
python
def from_string(cls, alg_str): """ Creates a location from a two character string consisting of the file then rank written in algebraic notation. Examples: e4, b5, a7 :type: alg_str: str :rtype: Location """ try: return cls(int(alg_str[1]) - 1, ord(alg_str[0]) - 97) except ValueError as e: raise ValueError("Location.from_string {} invalid: {}".format(alg_str, e))
[ "def", "from_string", "(", "cls", ",", "alg_str", ")", ":", "try", ":", "return", "cls", "(", "int", "(", "alg_str", "[", "1", "]", ")", "-", "1", ",", "ord", "(", "alg_str", "[", "0", "]", ")", "-", "97", ")", "except", "ValueError", "as", "e", ":", "raise", "ValueError", "(", "\"Location.from_string {} invalid: {}\"", ".", "format", "(", "alg_str", ",", "e", ")", ")" ]
Creates a location from a two character string consisting of the file then rank written in algebraic notation. Examples: e4, b5, a7 :type: alg_str: str :rtype: Location
[ "Creates", "a", "location", "from", "a", "two", "character", "string", "consisting", "of", "the", "file", "then", "rank", "written", "in", "algebraic", "notation", ".", "Examples", ":", "e4", "b5", "a7" ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/core/algebraic/location.py#L63-L76
LordDarkula/chess_py
chess_py/core/algebraic/location.py
Location.shift
def shift(self, direction): """ Shifts in direction provided by ``Direction`` enum. :type: direction: Direction :rtype: Location """ try: if direction == Direction.UP: return self.shift_up() elif direction == Direction.DOWN: return self.shift_down() elif direction == Direction.RIGHT: return self.shift_right() elif direction == Direction.LEFT: return self.shift_left() else: raise IndexError("Invalid direction {}".format(direction)) except IndexError as e: raise IndexError(e)
python
def shift(self, direction): """ Shifts in direction provided by ``Direction`` enum. :type: direction: Direction :rtype: Location """ try: if direction == Direction.UP: return self.shift_up() elif direction == Direction.DOWN: return self.shift_down() elif direction == Direction.RIGHT: return self.shift_right() elif direction == Direction.LEFT: return self.shift_left() else: raise IndexError("Invalid direction {}".format(direction)) except IndexError as e: raise IndexError(e)
[ "def", "shift", "(", "self", ",", "direction", ")", ":", "try", ":", "if", "direction", "==", "Direction", ".", "UP", ":", "return", "self", ".", "shift_up", "(", ")", "elif", "direction", "==", "Direction", ".", "DOWN", ":", "return", "self", ".", "shift_down", "(", ")", "elif", "direction", "==", "Direction", ".", "RIGHT", ":", "return", "self", ".", "shift_right", "(", ")", "elif", "direction", "==", "Direction", ".", "LEFT", ":", "return", "self", ".", "shift_left", "(", ")", "else", ":", "raise", "IndexError", "(", "\"Invalid direction {}\"", ".", "format", "(", "direction", ")", ")", "except", "IndexError", "as", "e", ":", "raise", "IndexError", "(", "e", ")" ]
Shifts in direction provided by ``Direction`` enum. :type: direction: Direction :rtype: Location
[ "Shifts", "in", "direction", "provided", "by", "Direction", "enum", "." ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/core/algebraic/location.py#L144-L163
LordDarkula/chess_py
chess_py/core/algebraic/location.py
Location.shift_up
def shift_up(self, times=1): """ Finds Location shifted up by 1 :rtype: Location """ try: return Location(self._rank + times, self._file) except IndexError as e: raise IndexError(e)
python
def shift_up(self, times=1): """ Finds Location shifted up by 1 :rtype: Location """ try: return Location(self._rank + times, self._file) except IndexError as e: raise IndexError(e)
[ "def", "shift_up", "(", "self", ",", "times", "=", "1", ")", ":", "try", ":", "return", "Location", "(", "self", ".", "_rank", "+", "times", ",", "self", ".", "_file", ")", "except", "IndexError", "as", "e", ":", "raise", "IndexError", "(", "e", ")" ]
Finds Location shifted up by 1 :rtype: Location
[ "Finds", "Location", "shifted", "up", "by", "1" ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/core/algebraic/location.py#L165-L174
LordDarkula/chess_py
chess_py/core/algebraic/location.py
Location.shift_down
def shift_down(self, times=1): """ Finds Location shifted down by 1 :rtype: Location """ try: return Location(self._rank - times, self._file) except IndexError as e: raise IndexError(e)
python
def shift_down(self, times=1): """ Finds Location shifted down by 1 :rtype: Location """ try: return Location(self._rank - times, self._file) except IndexError as e: raise IndexError(e)
[ "def", "shift_down", "(", "self", ",", "times", "=", "1", ")", ":", "try", ":", "return", "Location", "(", "self", ".", "_rank", "-", "times", ",", "self", ".", "_file", ")", "except", "IndexError", "as", "e", ":", "raise", "IndexError", "(", "e", ")" ]
Finds Location shifted down by 1 :rtype: Location
[ "Finds", "Location", "shifted", "down", "by", "1" ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/core/algebraic/location.py#L176-L185
LordDarkula/chess_py
chess_py/core/algebraic/location.py
Location.shift_right
def shift_right(self, times=1): """ Finds Location shifted right by 1 :rtype: Location """ try: return Location(self._rank, self._file + times) except IndexError as e: raise IndexError(e)
python
def shift_right(self, times=1): """ Finds Location shifted right by 1 :rtype: Location """ try: return Location(self._rank, self._file + times) except IndexError as e: raise IndexError(e)
[ "def", "shift_right", "(", "self", ",", "times", "=", "1", ")", ":", "try", ":", "return", "Location", "(", "self", ".", "_rank", ",", "self", ".", "_file", "+", "times", ")", "except", "IndexError", "as", "e", ":", "raise", "IndexError", "(", "e", ")" ]
Finds Location shifted right by 1 :rtype: Location
[ "Finds", "Location", "shifted", "right", "by", "1" ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/core/algebraic/location.py#L201-L210
LordDarkula/chess_py
chess_py/core/algebraic/location.py
Location.shift_left
def shift_left(self, times=1): """ Finds Location shifted left by 1 :rtype: Location """ try: return Location(self._rank, self._file - times) except IndexError as e: raise IndexError(e)
python
def shift_left(self, times=1): """ Finds Location shifted left by 1 :rtype: Location """ try: return Location(self._rank, self._file - times) except IndexError as e: raise IndexError(e)
[ "def", "shift_left", "(", "self", ",", "times", "=", "1", ")", ":", "try", ":", "return", "Location", "(", "self", ".", "_rank", ",", "self", ".", "_file", "-", "times", ")", "except", "IndexError", "as", "e", ":", "raise", "IndexError", "(", "e", ")" ]
Finds Location shifted left by 1 :rtype: Location
[ "Finds", "Location", "shifted", "left", "by", "1" ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/core/algebraic/location.py#L212-L221
LordDarkula/chess_py
chess_py/core/algebraic/location.py
Location.shift_up_right
def shift_up_right(self, times=1): """ Finds Location shifted up right by 1 :rtype: Location """ try: return Location(self._rank + times, self._file + times) except IndexError as e: raise IndexError(e)
python
def shift_up_right(self, times=1): """ Finds Location shifted up right by 1 :rtype: Location """ try: return Location(self._rank + times, self._file + times) except IndexError as e: raise IndexError(e)
[ "def", "shift_up_right", "(", "self", ",", "times", "=", "1", ")", ":", "try", ":", "return", "Location", "(", "self", ".", "_rank", "+", "times", ",", "self", ".", "_file", "+", "times", ")", "except", "IndexError", "as", "e", ":", "raise", "IndexError", "(", "e", ")" ]
Finds Location shifted up right by 1 :rtype: Location
[ "Finds", "Location", "shifted", "up", "right", "by", "1" ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/core/algebraic/location.py#L223-L232
LordDarkula/chess_py
chess_py/core/algebraic/location.py
Location.shift_up_left
def shift_up_left(self, times=1): """ Finds Location shifted up left by 1 :rtype: Location """ try: return Location(self._rank + times, self._file - times) except IndexError as e: raise IndexError(e)
python
def shift_up_left(self, times=1): """ Finds Location shifted up left by 1 :rtype: Location """ try: return Location(self._rank + times, self._file - times) except IndexError as e: raise IndexError(e)
[ "def", "shift_up_left", "(", "self", ",", "times", "=", "1", ")", ":", "try", ":", "return", "Location", "(", "self", ".", "_rank", "+", "times", ",", "self", ".", "_file", "-", "times", ")", "except", "IndexError", "as", "e", ":", "raise", "IndexError", "(", "e", ")" ]
Finds Location shifted up left by 1 :rtype: Location
[ "Finds", "Location", "shifted", "up", "left", "by", "1" ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/core/algebraic/location.py#L234-L243
LordDarkula/chess_py
chess_py/core/algebraic/location.py
Location.shift_down_right
def shift_down_right(self, times=1): """ Finds Location shifted down right by 1 :rtype: Location """ try: return Location(self._rank - times, self._file + times) except IndexError as e: raise IndexError(e)
python
def shift_down_right(self, times=1): """ Finds Location shifted down right by 1 :rtype: Location """ try: return Location(self._rank - times, self._file + times) except IndexError as e: raise IndexError(e)
[ "def", "shift_down_right", "(", "self", ",", "times", "=", "1", ")", ":", "try", ":", "return", "Location", "(", "self", ".", "_rank", "-", "times", ",", "self", ".", "_file", "+", "times", ")", "except", "IndexError", "as", "e", ":", "raise", "IndexError", "(", "e", ")" ]
Finds Location shifted down right by 1 :rtype: Location
[ "Finds", "Location", "shifted", "down", "right", "by", "1" ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/core/algebraic/location.py#L245-L254
LordDarkula/chess_py
chess_py/core/algebraic/location.py
Location.shift_down_left
def shift_down_left(self, times=1): """ Finds Location shifted down left by 1 :rtype: Location """ try: return Location(self._rank - times, self._file - times) except IndexError as e: raise IndexError(e)
python
def shift_down_left(self, times=1): """ Finds Location shifted down left by 1 :rtype: Location """ try: return Location(self._rank - times, self._file - times) except IndexError as e: raise IndexError(e)
[ "def", "shift_down_left", "(", "self", ",", "times", "=", "1", ")", ":", "try", ":", "return", "Location", "(", "self", ".", "_rank", "-", "times", ",", "self", ".", "_file", "-", "times", ")", "except", "IndexError", "as", "e", ":", "raise", "IndexError", "(", "e", ")" ]
Finds Location shifted down left by 1 :rtype: Location
[ "Finds", "Location", "shifted", "down", "left", "by", "1" ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/core/algebraic/location.py#L256-L265
cimm-kzn/CGRtools
CGRtools/algorithms/standardize.py
Standardize.standardize
def standardize(self): """ standardize functional groups :return: number of found groups """ self.reset_query_marks() seen = set() total = 0 for n, atom in self.atoms(): if n in seen: continue for k, center in central.items(): if center != atom: continue shell = tuple((bond, self._node[m]) for m, bond in self._adj[n].items()) for shell_query, shell_patch, atom_patch in query_patch[k]: if shell_query != shell: continue total += 1 for attr_name, attr_value in atom_patch.items(): setattr(atom, attr_name, attr_value) for (bond_patch, atom_patch), (bond, atom) in zip(shell_patch, shell): bond.update(bond_patch) for attr_name, attr_value in atom_patch.items(): setattr(atom, attr_name, attr_value) seen.add(n) seen.update(self._adj[n]) break else: continue break if total: self.flush_cache() return total
python
def standardize(self): """ standardize functional groups :return: number of found groups """ self.reset_query_marks() seen = set() total = 0 for n, atom in self.atoms(): if n in seen: continue for k, center in central.items(): if center != atom: continue shell = tuple((bond, self._node[m]) for m, bond in self._adj[n].items()) for shell_query, shell_patch, atom_patch in query_patch[k]: if shell_query != shell: continue total += 1 for attr_name, attr_value in atom_patch.items(): setattr(atom, attr_name, attr_value) for (bond_patch, atom_patch), (bond, atom) in zip(shell_patch, shell): bond.update(bond_patch) for attr_name, attr_value in atom_patch.items(): setattr(atom, attr_name, attr_value) seen.add(n) seen.update(self._adj[n]) break else: continue break if total: self.flush_cache() return total
[ "def", "standardize", "(", "self", ")", ":", "self", ".", "reset_query_marks", "(", ")", "seen", "=", "set", "(", ")", "total", "=", "0", "for", "n", ",", "atom", "in", "self", ".", "atoms", "(", ")", ":", "if", "n", "in", "seen", ":", "continue", "for", "k", ",", "center", "in", "central", ".", "items", "(", ")", ":", "if", "center", "!=", "atom", ":", "continue", "shell", "=", "tuple", "(", "(", "bond", ",", "self", ".", "_node", "[", "m", "]", ")", "for", "m", ",", "bond", "in", "self", ".", "_adj", "[", "n", "]", ".", "items", "(", ")", ")", "for", "shell_query", ",", "shell_patch", ",", "atom_patch", "in", "query_patch", "[", "k", "]", ":", "if", "shell_query", "!=", "shell", ":", "continue", "total", "+=", "1", "for", "attr_name", ",", "attr_value", "in", "atom_patch", ".", "items", "(", ")", ":", "setattr", "(", "atom", ",", "attr_name", ",", "attr_value", ")", "for", "(", "bond_patch", ",", "atom_patch", ")", ",", "(", "bond", ",", "atom", ")", "in", "zip", "(", "shell_patch", ",", "shell", ")", ":", "bond", ".", "update", "(", "bond_patch", ")", "for", "attr_name", ",", "attr_value", "in", "atom_patch", ".", "items", "(", ")", ":", "setattr", "(", "atom", ",", "attr_name", ",", "attr_value", ")", "seen", ".", "add", "(", "n", ")", "seen", ".", "update", "(", "self", ".", "_adj", "[", "n", "]", ")", "break", "else", ":", "continue", "break", "if", "total", ":", "self", ".", "flush_cache", "(", ")", "return", "total" ]
standardize functional groups :return: number of found groups
[ "standardize", "functional", "groups" ]
train
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/algorithms/standardize.py#L26-L60
zetaops/zengine
git-hooks/pre-commit.py
get_staged_files
def get_staged_files(): """Get all files staged for the current commit. """ proc = subprocess.Popen(('git', 'status', '--porcelain'), stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, _ = proc.communicate() staged_files = modified_re.findall(out) return staged_files
python
def get_staged_files(): """Get all files staged for the current commit. """ proc = subprocess.Popen(('git', 'status', '--porcelain'), stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, _ = proc.communicate() staged_files = modified_re.findall(out) return staged_files
[ "def", "get_staged_files", "(", ")", ":", "proc", "=", "subprocess", ".", "Popen", "(", "(", "'git'", ",", "'status'", ",", "'--porcelain'", ")", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "out", ",", "_", "=", "proc", ".", "communicate", "(", ")", "staged_files", "=", "modified_re", ".", "findall", "(", "out", ")", "return", "staged_files" ]
Get all files staged for the current commit.
[ "Get", "all", "files", "staged", "for", "the", "current", "commit", "." ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/git-hooks/pre-commit.py#L23-L31
zetaops/zengine
zengine/tornado_server/server.py
runserver
def runserver(host=None, port=None): """ Run Tornado server """ host = host or os.getenv('HTTP_HOST', '0.0.0.0') port = port or os.getenv('HTTP_PORT', '9001') zioloop = ioloop.IOLoop.instance() # setup pika client: pc = QueueManager(zioloop) app.pc = pc pc.connect() app.listen(port, host) zioloop.start()
python
def runserver(host=None, port=None): """ Run Tornado server """ host = host or os.getenv('HTTP_HOST', '0.0.0.0') port = port or os.getenv('HTTP_PORT', '9001') zioloop = ioloop.IOLoop.instance() # setup pika client: pc = QueueManager(zioloop) app.pc = pc pc.connect() app.listen(port, host) zioloop.start()
[ "def", "runserver", "(", "host", "=", "None", ",", "port", "=", "None", ")", ":", "host", "=", "host", "or", "os", ".", "getenv", "(", "'HTTP_HOST'", ",", "'0.0.0.0'", ")", "port", "=", "port", "or", "os", ".", "getenv", "(", "'HTTP_PORT'", ",", "'9001'", ")", "zioloop", "=", "ioloop", ".", "IOLoop", ".", "instance", "(", ")", "# setup pika client:", "pc", "=", "QueueManager", "(", "zioloop", ")", "app", ".", "pc", "=", "pc", "pc", ".", "connect", "(", ")", "app", ".", "listen", "(", "port", ",", "host", ")", "zioloop", ".", "start", "(", ")" ]
Run Tornado server
[ "Run", "Tornado", "server" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/tornado_server/server.py#L160-L173
zetaops/zengine
zengine/tornado_server/server.py
SocketHandler.open
def open(self): """ Called on new websocket connection. """ sess_id = self._get_sess_id() if sess_id: self.application.pc.websockets[self._get_sess_id()] = self self.write_message(json.dumps({"cmd": "status", "status": "open"})) else: self.write_message(json.dumps({"cmd": "error", "error": "Please login", "code": 401}))
python
def open(self): """ Called on new websocket connection. """ sess_id = self._get_sess_id() if sess_id: self.application.pc.websockets[self._get_sess_id()] = self self.write_message(json.dumps({"cmd": "status", "status": "open"})) else: self.write_message(json.dumps({"cmd": "error", "error": "Please login", "code": 401}))
[ "def", "open", "(", "self", ")", ":", "sess_id", "=", "self", ".", "_get_sess_id", "(", ")", "if", "sess_id", ":", "self", ".", "application", ".", "pc", ".", "websockets", "[", "self", ".", "_get_sess_id", "(", ")", "]", "=", "self", "self", ".", "write_message", "(", "json", ".", "dumps", "(", "{", "\"cmd\"", ":", "\"status\"", ",", "\"status\"", ":", "\"open\"", "}", ")", ")", "else", ":", "self", ".", "write_message", "(", "json", ".", "dumps", "(", "{", "\"cmd\"", ":", "\"error\"", ",", "\"error\"", ":", "\"Please login\"", ",", "\"code\"", ":", "401", "}", ")", ")" ]
Called on new websocket connection.
[ "Called", "on", "new", "websocket", "connection", "." ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/tornado_server/server.py#L48-L57
zetaops/zengine
zengine/tornado_server/server.py
SocketHandler.on_message
def on_message(self, message): """ called on new websocket message, """ log.debug("WS MSG for %s: %s" % (self._get_sess_id(), message)) self.application.pc.redirect_incoming_message(self._get_sess_id(), message, self.request)
python
def on_message(self, message): """ called on new websocket message, """ log.debug("WS MSG for %s: %s" % (self._get_sess_id(), message)) self.application.pc.redirect_incoming_message(self._get_sess_id(), message, self.request)
[ "def", "on_message", "(", "self", ",", "message", ")", ":", "log", ".", "debug", "(", "\"WS MSG for %s: %s\"", "%", "(", "self", ".", "_get_sess_id", "(", ")", ",", "message", ")", ")", "self", ".", "application", ".", "pc", ".", "redirect_incoming_message", "(", "self", ".", "_get_sess_id", "(", ")", ",", "message", ",", "self", ".", "request", ")" ]
called on new websocket message,
[ "called", "on", "new", "websocket", "message" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/tornado_server/server.py#L59-L64
zetaops/zengine
zengine/tornado_server/server.py
HttpHandler._handle_headers
def _handle_headers(self): """ Do response processing """ origin = self.request.headers.get('Origin') if not settings.DEBUG: if origin in settings.ALLOWED_ORIGINS or not origin: self.set_header('Access-Control-Allow-Origin', origin) else: log.debug("CORS ERROR: %s not allowed, allowed hosts: %s" % (origin, settings.ALLOWED_ORIGINS)) raise HTTPError(403, "Origin not in ALLOWED_ORIGINS: %s" % origin) else: self.set_header('Access-Control-Allow-Origin', origin or '*') self.set_header('Access-Control-Allow-Credentials', "true") self.set_header('Access-Control-Allow-Headers', 'Content-Type') self.set_header('Access-Control-Allow-Methods', 'OPTIONS') self.set_header('Content-Type', 'application/json')
python
def _handle_headers(self): """ Do response processing """ origin = self.request.headers.get('Origin') if not settings.DEBUG: if origin in settings.ALLOWED_ORIGINS or not origin: self.set_header('Access-Control-Allow-Origin', origin) else: log.debug("CORS ERROR: %s not allowed, allowed hosts: %s" % (origin, settings.ALLOWED_ORIGINS)) raise HTTPError(403, "Origin not in ALLOWED_ORIGINS: %s" % origin) else: self.set_header('Access-Control-Allow-Origin', origin or '*') self.set_header('Access-Control-Allow-Credentials', "true") self.set_header('Access-Control-Allow-Headers', 'Content-Type') self.set_header('Access-Control-Allow-Methods', 'OPTIONS') self.set_header('Content-Type', 'application/json')
[ "def", "_handle_headers", "(", "self", ")", ":", "origin", "=", "self", ".", "request", ".", "headers", ".", "get", "(", "'Origin'", ")", "if", "not", "settings", ".", "DEBUG", ":", "if", "origin", "in", "settings", ".", "ALLOWED_ORIGINS", "or", "not", "origin", ":", "self", ".", "set_header", "(", "'Access-Control-Allow-Origin'", ",", "origin", ")", "else", ":", "log", ".", "debug", "(", "\"CORS ERROR: %s not allowed, allowed hosts: %s\"", "%", "(", "origin", ",", "settings", ".", "ALLOWED_ORIGINS", ")", ")", "raise", "HTTPError", "(", "403", ",", "\"Origin not in ALLOWED_ORIGINS: %s\"", "%", "origin", ")", "else", ":", "self", ".", "set_header", "(", "'Access-Control-Allow-Origin'", ",", "origin", "or", "'*'", ")", "self", ".", "set_header", "(", "'Access-Control-Allow-Credentials'", ",", "\"true\"", ")", "self", ".", "set_header", "(", "'Access-Control-Allow-Headers'", ",", "'Content-Type'", ")", "self", ".", "set_header", "(", "'Access-Control-Allow-Methods'", ",", "'OPTIONS'", ")", "self", ".", "set_header", "(", "'Content-Type'", ",", "'application/json'", ")" ]
Do response processing
[ "Do", "response", "processing" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/tornado_server/server.py#L79-L96
zetaops/zengine
zengine/tornado_server/server.py
HttpHandler.post
def post(self, view_name): """ login handler """ sess_id = None input_data = {} # try: self._handle_headers() # handle input input_data = json_decode(self.request.body) if self.request.body else {} input_data['path'] = view_name # set or get session cookie if not self.get_cookie(COOKIE_NAME) or 'username' in input_data: sess_id = uuid4().hex self.set_cookie(COOKIE_NAME, sess_id) # , domain='127.0.0.1' else: sess_id = self.get_cookie(COOKIE_NAME) # h_sess_id = "HTTP_%s" % sess_id input_data = {'data': input_data, '_zops_remote_ip': self.request.remote_ip} log.info("New Request for %s: %s" % (sess_id, input_data)) self.application.pc.register_websocket(sess_id, self) self.application.pc.redirect_incoming_message(sess_id, json_encode(input_data), self.request)
python
def post(self, view_name): """ login handler """ sess_id = None input_data = {} # try: self._handle_headers() # handle input input_data = json_decode(self.request.body) if self.request.body else {} input_data['path'] = view_name # set or get session cookie if not self.get_cookie(COOKIE_NAME) or 'username' in input_data: sess_id = uuid4().hex self.set_cookie(COOKIE_NAME, sess_id) # , domain='127.0.0.1' else: sess_id = self.get_cookie(COOKIE_NAME) # h_sess_id = "HTTP_%s" % sess_id input_data = {'data': input_data, '_zops_remote_ip': self.request.remote_ip} log.info("New Request for %s: %s" % (sess_id, input_data)) self.application.pc.register_websocket(sess_id, self) self.application.pc.redirect_incoming_message(sess_id, json_encode(input_data), self.request)
[ "def", "post", "(", "self", ",", "view_name", ")", ":", "sess_id", "=", "None", "input_data", "=", "{", "}", "# try:", "self", ".", "_handle_headers", "(", ")", "# handle input", "input_data", "=", "json_decode", "(", "self", ".", "request", ".", "body", ")", "if", "self", ".", "request", ".", "body", "else", "{", "}", "input_data", "[", "'path'", "]", "=", "view_name", "# set or get session cookie", "if", "not", "self", ".", "get_cookie", "(", "COOKIE_NAME", ")", "or", "'username'", "in", "input_data", ":", "sess_id", "=", "uuid4", "(", ")", ".", "hex", "self", ".", "set_cookie", "(", "COOKIE_NAME", ",", "sess_id", ")", "# , domain='127.0.0.1'", "else", ":", "sess_id", "=", "self", ".", "get_cookie", "(", "COOKIE_NAME", ")", "# h_sess_id = \"HTTP_%s\" % sess_id", "input_data", "=", "{", "'data'", ":", "input_data", ",", "'_zops_remote_ip'", ":", "self", ".", "request", ".", "remote_ip", "}", "log", ".", "info", "(", "\"New Request for %s: %s\"", "%", "(", "sess_id", ",", "input_data", ")", ")", "self", ".", "application", ".", "pc", ".", "register_websocket", "(", "sess_id", ",", "self", ")", "self", ".", "application", ".", "pc", ".", "redirect_incoming_message", "(", "sess_id", ",", "json_encode", "(", "input_data", ")", ",", "self", ".", "request", ")" ]
login handler
[ "login", "handler" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/tornado_server/server.py#L110-L137
deep-compute/logagg
logagg/collector.py
load_formatter_fn
def load_formatter_fn(formatter): ''' >>> load_formatter_fn('logagg.formatters.basescript') #doctest: +ELLIPSIS <function basescript at 0x...> ''' obj = util.load_object(formatter) if not hasattr(obj, 'ispartial'): obj.ispartial = util.ispartial return obj
python
def load_formatter_fn(formatter): ''' >>> load_formatter_fn('logagg.formatters.basescript') #doctest: +ELLIPSIS <function basescript at 0x...> ''' obj = util.load_object(formatter) if not hasattr(obj, 'ispartial'): obj.ispartial = util.ispartial return obj
[ "def", "load_formatter_fn", "(", "formatter", ")", ":", "obj", "=", "util", ".", "load_object", "(", "formatter", ")", "if", "not", "hasattr", "(", "obj", ",", "'ispartial'", ")", ":", "obj", ".", "ispartial", "=", "util", ".", "ispartial", "return", "obj" ]
>>> load_formatter_fn('logagg.formatters.basescript') #doctest: +ELLIPSIS <function basescript at 0x...>
[ ">>>", "load_formatter_fn", "(", "logagg", ".", "formatters", ".", "basescript", ")", "#doctest", ":", "+", "ELLIPSIS", "<function", "basescript", "at", "0x", "...", ">" ]
train
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/collector.py#L23-L31
deep-compute/logagg
logagg/collector.py
LogCollector._remove_redundancy
def _remove_redundancy(self, log): """Removes duplicate data from 'data' inside log dict and brings it out. >>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30) >>> log = {'id' : 46846876, 'type' : 'log', ... 'data' : {'a' : 1, 'b' : 2, 'type' : 'metric'}} >>> lc._remove_redundancy(log) {'data': {'a': 1, 'b': 2}, 'type': 'metric', 'id': 46846876} """ for key in log: if key in log and key in log['data']: log[key] = log['data'].pop(key) return log
python
def _remove_redundancy(self, log): """Removes duplicate data from 'data' inside log dict and brings it out. >>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30) >>> log = {'id' : 46846876, 'type' : 'log', ... 'data' : {'a' : 1, 'b' : 2, 'type' : 'metric'}} >>> lc._remove_redundancy(log) {'data': {'a': 1, 'b': 2}, 'type': 'metric', 'id': 46846876} """ for key in log: if key in log and key in log['data']: log[key] = log['data'].pop(key) return log
[ "def", "_remove_redundancy", "(", "self", ",", "log", ")", ":", "for", "key", "in", "log", ":", "if", "key", "in", "log", "and", "key", "in", "log", "[", "'data'", "]", ":", "log", "[", "key", "]", "=", "log", "[", "'data'", "]", ".", "pop", "(", "key", ")", "return", "log" ]
Removes duplicate data from 'data' inside log dict and brings it out. >>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30) >>> log = {'id' : 46846876, 'type' : 'log', ... 'data' : {'a' : 1, 'b' : 2, 'type' : 'metric'}} >>> lc._remove_redundancy(log) {'data': {'a': 1, 'b': 2}, 'type': 'metric', 'id': 46846876}
[ "Removes", "duplicate", "data", "from", "data", "inside", "log", "dict", "and", "brings", "it", "out", "." ]
train
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/collector.py#L78-L92
deep-compute/logagg
logagg/collector.py
LogCollector.validate_log_format
def validate_log_format(self, log): ''' >>> lc = LogCollector('file=/path/to/file.log:formatter=logagg.formatters.basescript', 30) >>> incomplete_log = {'data' : {'x' : 1, 'y' : 2}, ... 'raw' : 'Not all keys present'} >>> lc.validate_log_format(incomplete_log) 'failed' >>> redundant_log = {'one_invalid_key' : 'Extra information', ... 'data': {'x' : 1, 'y' : 2}, ... 'error': False, ... 'error_tb': '', ... 'event': 'event', ... 'file': '/path/to/file.log', ... 'formatter': 'logagg.formatters.mongodb', ... 'host': 'deepcompute-ThinkPad-E470', ... 'id': '0112358', ... 'level': 'debug', ... 'raw': 'some log line here', ... 'timestamp': '2018-04-07T14:06:17.404818', ... 'type': 'log'} >>> lc.validate_log_format(redundant_log) 'failed' >>> correct_log = {'data': {'x' : 1, 'y' : 2}, ... 'error': False, ... 'error_tb': '', ... 'event': 'event', ... 'file': '/path/to/file.log', ... 'formatter': 'logagg.formatters.mongodb', ... 'host': 'deepcompute-ThinkPad-E470', ... 'id': '0112358', ... 'level': 'debug', ... 'raw': 'some log line here', ... 'timestamp': '2018-04-07T14:06:17.404818', ... 'type': 'log'} >>> lc.validate_log_format(correct_log) 'passed' ''' keys_in_log = set(log) keys_in_log_structure = set(self.LOG_STRUCTURE) try: assert (keys_in_log == keys_in_log_structure) except AssertionError as e: self.log.warning('formatted_log_structure_rejected' , key_not_found = list(keys_in_log_structure-keys_in_log), extra_keys_found = list(keys_in_log-keys_in_log_structure), num_logs=1, type='metric') return 'failed' for key in log: try: assert isinstance(log[key], self.LOG_STRUCTURE[key]) except AssertionError as e: self.log.warning('formatted_log_structure_rejected' , key_datatype_not_matched = key, datatype_expected = type(self.LOG_STRUCTURE[key]), datatype_got = type(log[key]), num_logs=1, type='metric') return 'failed' return 'passed'
python
def validate_log_format(self, log): ''' >>> lc = LogCollector('file=/path/to/file.log:formatter=logagg.formatters.basescript', 30) >>> incomplete_log = {'data' : {'x' : 1, 'y' : 2}, ... 'raw' : 'Not all keys present'} >>> lc.validate_log_format(incomplete_log) 'failed' >>> redundant_log = {'one_invalid_key' : 'Extra information', ... 'data': {'x' : 1, 'y' : 2}, ... 'error': False, ... 'error_tb': '', ... 'event': 'event', ... 'file': '/path/to/file.log', ... 'formatter': 'logagg.formatters.mongodb', ... 'host': 'deepcompute-ThinkPad-E470', ... 'id': '0112358', ... 'level': 'debug', ... 'raw': 'some log line here', ... 'timestamp': '2018-04-07T14:06:17.404818', ... 'type': 'log'} >>> lc.validate_log_format(redundant_log) 'failed' >>> correct_log = {'data': {'x' : 1, 'y' : 2}, ... 'error': False, ... 'error_tb': '', ... 'event': 'event', ... 'file': '/path/to/file.log', ... 'formatter': 'logagg.formatters.mongodb', ... 'host': 'deepcompute-ThinkPad-E470', ... 'id': '0112358', ... 'level': 'debug', ... 'raw': 'some log line here', ... 'timestamp': '2018-04-07T14:06:17.404818', ... 'type': 'log'} >>> lc.validate_log_format(correct_log) 'passed' ''' keys_in_log = set(log) keys_in_log_structure = set(self.LOG_STRUCTURE) try: assert (keys_in_log == keys_in_log_structure) except AssertionError as e: self.log.warning('formatted_log_structure_rejected' , key_not_found = list(keys_in_log_structure-keys_in_log), extra_keys_found = list(keys_in_log-keys_in_log_structure), num_logs=1, type='metric') return 'failed' for key in log: try: assert isinstance(log[key], self.LOG_STRUCTURE[key]) except AssertionError as e: self.log.warning('formatted_log_structure_rejected' , key_datatype_not_matched = key, datatype_expected = type(self.LOG_STRUCTURE[key]), datatype_got = type(log[key]), num_logs=1, type='metric') return 'failed' return 'passed'
[ "def", "validate_log_format", "(", "self", ",", "log", ")", ":", "keys_in_log", "=", "set", "(", "log", ")", "keys_in_log_structure", "=", "set", "(", "self", ".", "LOG_STRUCTURE", ")", "try", ":", "assert", "(", "keys_in_log", "==", "keys_in_log_structure", ")", "except", "AssertionError", "as", "e", ":", "self", ".", "log", ".", "warning", "(", "'formatted_log_structure_rejected'", ",", "key_not_found", "=", "list", "(", "keys_in_log_structure", "-", "keys_in_log", ")", ",", "extra_keys_found", "=", "list", "(", "keys_in_log", "-", "keys_in_log_structure", ")", ",", "num_logs", "=", "1", ",", "type", "=", "'metric'", ")", "return", "'failed'", "for", "key", "in", "log", ":", "try", ":", "assert", "isinstance", "(", "log", "[", "key", "]", ",", "self", ".", "LOG_STRUCTURE", "[", "key", "]", ")", "except", "AssertionError", "as", "e", ":", "self", ".", "log", ".", "warning", "(", "'formatted_log_structure_rejected'", ",", "key_datatype_not_matched", "=", "key", ",", "datatype_expected", "=", "type", "(", "self", ".", "LOG_STRUCTURE", "[", "key", "]", ")", ",", "datatype_got", "=", "type", "(", "log", "[", "key", "]", ")", ",", "num_logs", "=", "1", ",", "type", "=", "'metric'", ")", "return", "'failed'", "return", "'passed'" ]
>>> lc = LogCollector('file=/path/to/file.log:formatter=logagg.formatters.basescript', 30) >>> incomplete_log = {'data' : {'x' : 1, 'y' : 2}, ... 'raw' : 'Not all keys present'} >>> lc.validate_log_format(incomplete_log) 'failed' >>> redundant_log = {'one_invalid_key' : 'Extra information', ... 'data': {'x' : 1, 'y' : 2}, ... 'error': False, ... 'error_tb': '', ... 'event': 'event', ... 'file': '/path/to/file.log', ... 'formatter': 'logagg.formatters.mongodb', ... 'host': 'deepcompute-ThinkPad-E470', ... 'id': '0112358', ... 'level': 'debug', ... 'raw': 'some log line here', ... 'timestamp': '2018-04-07T14:06:17.404818', ... 'type': 'log'} >>> lc.validate_log_format(redundant_log) 'failed' >>> correct_log = {'data': {'x' : 1, 'y' : 2}, ... 'error': False, ... 'error_tb': '', ... 'event': 'event', ... 'file': '/path/to/file.log', ... 'formatter': 'logagg.formatters.mongodb', ... 'host': 'deepcompute-ThinkPad-E470', ... 'id': '0112358', ... 'level': 'debug', ... 'raw': 'some log line here', ... 'timestamp': '2018-04-07T14:06:17.404818', ... 'type': 'log'} >>> lc.validate_log_format(correct_log) 'passed'
[ ">>>", "lc", "=", "LogCollector", "(", "file", "=", "/", "path", "/", "to", "/", "file", ".", "log", ":", "formatter", "=", "logagg", ".", "formatters", ".", "basescript", "30", ")" ]
train
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/collector.py#L94-L159
deep-compute/logagg
logagg/collector.py
LogCollector.assign_default_log_values
def assign_default_log_values(self, fpath, line, formatter): ''' >>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30) >>> from pprint import pprint >>> formatter = 'logagg.formatters.mongodb' >>> fpath = '/var/log/mongodb/mongodb.log' >>> line = 'some log line here' >>> default_log = lc.assign_default_log_values(fpath, line, formatter) >>> pprint(default_log) #doctest: +ELLIPSIS {'data': {}, 'error': False, 'error_tb': '', 'event': 'event', 'file': '/var/log/mongodb/mongodb.log', 'formatter': 'logagg.formatters.mongodb', 'host': '...', 'id': None, 'level': 'debug', 'raw': 'some log line here', 'timestamp': '...', 'type': 'log'} ''' return dict( id=None, file=fpath, host=self.HOST, formatter=formatter, event='event', data={}, raw=line, timestamp=datetime.datetime.utcnow().isoformat(), type='log', level='debug', error= False, error_tb='', )
python
def assign_default_log_values(self, fpath, line, formatter): ''' >>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30) >>> from pprint import pprint >>> formatter = 'logagg.formatters.mongodb' >>> fpath = '/var/log/mongodb/mongodb.log' >>> line = 'some log line here' >>> default_log = lc.assign_default_log_values(fpath, line, formatter) >>> pprint(default_log) #doctest: +ELLIPSIS {'data': {}, 'error': False, 'error_tb': '', 'event': 'event', 'file': '/var/log/mongodb/mongodb.log', 'formatter': 'logagg.formatters.mongodb', 'host': '...', 'id': None, 'level': 'debug', 'raw': 'some log line here', 'timestamp': '...', 'type': 'log'} ''' return dict( id=None, file=fpath, host=self.HOST, formatter=formatter, event='event', data={}, raw=line, timestamp=datetime.datetime.utcnow().isoformat(), type='log', level='debug', error= False, error_tb='', )
[ "def", "assign_default_log_values", "(", "self", ",", "fpath", ",", "line", ",", "formatter", ")", ":", "return", "dict", "(", "id", "=", "None", ",", "file", "=", "fpath", ",", "host", "=", "self", ".", "HOST", ",", "formatter", "=", "formatter", ",", "event", "=", "'event'", ",", "data", "=", "{", "}", ",", "raw", "=", "line", ",", "timestamp", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "isoformat", "(", ")", ",", "type", "=", "'log'", ",", "level", "=", "'debug'", ",", "error", "=", "False", ",", "error_tb", "=", "''", ",", ")" ]
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30) >>> from pprint import pprint >>> formatter = 'logagg.formatters.mongodb' >>> fpath = '/var/log/mongodb/mongodb.log' >>> line = 'some log line here' >>> default_log = lc.assign_default_log_values(fpath, line, formatter) >>> pprint(default_log) #doctest: +ELLIPSIS {'data': {}, 'error': False, 'error_tb': '', 'event': 'event', 'file': '/var/log/mongodb/mongodb.log', 'formatter': 'logagg.formatters.mongodb', 'host': '...', 'id': None, 'level': 'debug', 'raw': 'some log line here', 'timestamp': '...', 'type': 'log'}
[ ">>>", "lc", "=", "LogCollector", "(", "file", "=", "/", "path", "/", "to", "/", "log_file", ".", "log", ":", "formatter", "=", "logagg", ".", "formatters", ".", "basescript", "30", ")", ">>>", "from", "pprint", "import", "pprint" ]
train
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/collector.py#L184-L221
deep-compute/logagg
logagg/collector.py
LogCollector._scan_fpatterns
def _scan_fpatterns(self, state): ''' For a list of given fpatterns, this starts a thread collecting log lines from file >>> os.path.isfile = lambda path: path == '/path/to/log_file.log' >>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30) >>> print(lc.fpaths) file=/path/to/log_file.log:formatter=logagg.formatters.basescript >>> print('formatters loaded:', lc.formatters) {} >>> print('log file reader threads started:', lc.log_reader_threads) {} >>> state = AttrDict(files_tracked=list()) >>> print('files bieng tracked:', state.files_tracked) [] >>> if not state.files_tracked: >>> lc._scan_fpatterns(state) >>> print('formatters loaded:', lc.formatters) >>> print('log file reader threads started:', lc.log_reader_threads) >>> print('files bieng tracked:', state.files_tracked) ''' for f in self.fpaths: fpattern, formatter =(a.split('=')[1] for a in f.split(':', 1)) self.log.debug('scan_fpatterns', fpattern=fpattern, formatter=formatter) # TODO code for scanning fpatterns for the files not yet present goes here fpaths = glob.glob(fpattern) # Load formatter_fn if not in list fpaths = list(set(fpaths) - set(state.files_tracked)) for fpath in fpaths: try: formatter_fn = self.formatters.get(formatter, load_formatter_fn(formatter)) self.log.info('found_formatter_fn', fn=formatter) self.formatters[formatter] = formatter_fn except (SystemExit, KeyboardInterrupt): raise except (ImportError, AttributeError): self.log.exception('formatter_fn_not_found', fn=formatter) sys.exit(-1) # Start a thread for every file self.log.info('found_log_file', log_file=fpath) log_f = dict(fpath=fpath, fpattern=fpattern, formatter=formatter, formatter_fn=formatter_fn) log_key = (fpath, fpattern, formatter) if log_key not in self.log_reader_threads: self.log.info('starting_collect_log_lines_thread', log_key=log_key) # There is no existing thread tracking this log file. Start one log_reader_thread = util.start_daemon_thread(self.collect_log_lines, (log_f,)) self.log_reader_threads[log_key] = log_reader_thread state.files_tracked.append(fpath) time.sleep(self.SCAN_FPATTERNS_INTERVAL)
python
def _scan_fpatterns(self, state): ''' For a list of given fpatterns, this starts a thread collecting log lines from file >>> os.path.isfile = lambda path: path == '/path/to/log_file.log' >>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30) >>> print(lc.fpaths) file=/path/to/log_file.log:formatter=logagg.formatters.basescript >>> print('formatters loaded:', lc.formatters) {} >>> print('log file reader threads started:', lc.log_reader_threads) {} >>> state = AttrDict(files_tracked=list()) >>> print('files bieng tracked:', state.files_tracked) [] >>> if not state.files_tracked: >>> lc._scan_fpatterns(state) >>> print('formatters loaded:', lc.formatters) >>> print('log file reader threads started:', lc.log_reader_threads) >>> print('files bieng tracked:', state.files_tracked) ''' for f in self.fpaths: fpattern, formatter =(a.split('=')[1] for a in f.split(':', 1)) self.log.debug('scan_fpatterns', fpattern=fpattern, formatter=formatter) # TODO code for scanning fpatterns for the files not yet present goes here fpaths = glob.glob(fpattern) # Load formatter_fn if not in list fpaths = list(set(fpaths) - set(state.files_tracked)) for fpath in fpaths: try: formatter_fn = self.formatters.get(formatter, load_formatter_fn(formatter)) self.log.info('found_formatter_fn', fn=formatter) self.formatters[formatter] = formatter_fn except (SystemExit, KeyboardInterrupt): raise except (ImportError, AttributeError): self.log.exception('formatter_fn_not_found', fn=formatter) sys.exit(-1) # Start a thread for every file self.log.info('found_log_file', log_file=fpath) log_f = dict(fpath=fpath, fpattern=fpattern, formatter=formatter, formatter_fn=formatter_fn) log_key = (fpath, fpattern, formatter) if log_key not in self.log_reader_threads: self.log.info('starting_collect_log_lines_thread', log_key=log_key) # There is no existing thread tracking this log file. Start one log_reader_thread = util.start_daemon_thread(self.collect_log_lines, (log_f,)) self.log_reader_threads[log_key] = log_reader_thread state.files_tracked.append(fpath) time.sleep(self.SCAN_FPATTERNS_INTERVAL)
[ "def", "_scan_fpatterns", "(", "self", ",", "state", ")", ":", "for", "f", "in", "self", ".", "fpaths", ":", "fpattern", ",", "formatter", "=", "(", "a", ".", "split", "(", "'='", ")", "[", "1", "]", "for", "a", "in", "f", ".", "split", "(", "':'", ",", "1", ")", ")", "self", ".", "log", ".", "debug", "(", "'scan_fpatterns'", ",", "fpattern", "=", "fpattern", ",", "formatter", "=", "formatter", ")", "# TODO code for scanning fpatterns for the files not yet present goes here", "fpaths", "=", "glob", ".", "glob", "(", "fpattern", ")", "# Load formatter_fn if not in list", "fpaths", "=", "list", "(", "set", "(", "fpaths", ")", "-", "set", "(", "state", ".", "files_tracked", ")", ")", "for", "fpath", "in", "fpaths", ":", "try", ":", "formatter_fn", "=", "self", ".", "formatters", ".", "get", "(", "formatter", ",", "load_formatter_fn", "(", "formatter", ")", ")", "self", ".", "log", ".", "info", "(", "'found_formatter_fn'", ",", "fn", "=", "formatter", ")", "self", ".", "formatters", "[", "formatter", "]", "=", "formatter_fn", "except", "(", "SystemExit", ",", "KeyboardInterrupt", ")", ":", "raise", "except", "(", "ImportError", ",", "AttributeError", ")", ":", "self", ".", "log", ".", "exception", "(", "'formatter_fn_not_found'", ",", "fn", "=", "formatter", ")", "sys", ".", "exit", "(", "-", "1", ")", "# Start a thread for every file", "self", ".", "log", ".", "info", "(", "'found_log_file'", ",", "log_file", "=", "fpath", ")", "log_f", "=", "dict", "(", "fpath", "=", "fpath", ",", "fpattern", "=", "fpattern", ",", "formatter", "=", "formatter", ",", "formatter_fn", "=", "formatter_fn", ")", "log_key", "=", "(", "fpath", ",", "fpattern", ",", "formatter", ")", "if", "log_key", "not", "in", "self", ".", "log_reader_threads", ":", "self", ".", "log", ".", "info", "(", "'starting_collect_log_lines_thread'", ",", "log_key", "=", "log_key", ")", "# There is no existing thread tracking this log file. Start one", "log_reader_thread", "=", "util", ".", "start_daemon_thread", "(", "self", ".", "collect_log_lines", ",", "(", "log_f", ",", ")", ")", "self", ".", "log_reader_threads", "[", "log_key", "]", "=", "log_reader_thread", "state", ".", "files_tracked", ".", "append", "(", "fpath", ")", "time", ".", "sleep", "(", "self", ".", "SCAN_FPATTERNS_INTERVAL", ")" ]
For a list of given fpatterns, this starts a thread collecting log lines from file >>> os.path.isfile = lambda path: path == '/path/to/log_file.log' >>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30) >>> print(lc.fpaths) file=/path/to/log_file.log:formatter=logagg.formatters.basescript >>> print('formatters loaded:', lc.formatters) {} >>> print('log file reader threads started:', lc.log_reader_threads) {} >>> state = AttrDict(files_tracked=list()) >>> print('files bieng tracked:', state.files_tracked) [] >>> if not state.files_tracked: >>> lc._scan_fpatterns(state) >>> print('formatters loaded:', lc.formatters) >>> print('log file reader threads started:', lc.log_reader_threads) >>> print('files bieng tracked:', state.files_tracked)
[ "For", "a", "list", "of", "given", "fpatterns", "this", "starts", "a", "thread", "collecting", "log", "lines", "from", "file" ]
train
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/collector.py#L356-L412
zetaops/zengine
zengine/forms/json_form.py
JsonForm.get_links
def get_links(self, **kw): """ Prepare links of form by mimicing pyoko's get_links method's result Args: **kw: Returns: list of link dicts """ links = [a for a in dir(self) if isinstance(getattr(self, a), Model) and not a.startswith('_model')] return [ { 'field': l, 'mdl': getattr(self, l).__class__, } for l in links ]
python
def get_links(self, **kw): """ Prepare links of form by mimicing pyoko's get_links method's result Args: **kw: Returns: list of link dicts """ links = [a for a in dir(self) if isinstance(getattr(self, a), Model) and not a.startswith('_model')] return [ { 'field': l, 'mdl': getattr(self, l).__class__, } for l in links ]
[ "def", "get_links", "(", "self", ",", "*", "*", "kw", ")", ":", "links", "=", "[", "a", "for", "a", "in", "dir", "(", "self", ")", "if", "isinstance", "(", "getattr", "(", "self", ",", "a", ")", ",", "Model", ")", "and", "not", "a", ".", "startswith", "(", "'_model'", ")", "]", "return", "[", "{", "'field'", ":", "l", ",", "'mdl'", ":", "getattr", "(", "self", ",", "l", ")", ".", "__class__", ",", "}", "for", "l", "in", "links", "]" ]
Prepare links of form by mimicing pyoko's get_links method's result Args: **kw: Returns: list of link dicts
[ "Prepare", "links", "of", "form", "by", "mimicing", "pyoko", "s", "get_links", "method", "s", "result" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/forms/json_form.py#L94-L113
zetaops/zengine
zengine/forms/json_form.py
JsonForm.set_data
def set_data(self, data): """ Fills form with data Args: data (dict): Data to assign form fields. Returns: Self. Form object. """ for name in self._fields: setattr(self, name, data.get(name)) return self
python
def set_data(self, data): """ Fills form with data Args: data (dict): Data to assign form fields. Returns: Self. Form object. """ for name in self._fields: setattr(self, name, data.get(name)) return self
[ "def", "set_data", "(", "self", ",", "data", ")", ":", "for", "name", "in", "self", ".", "_fields", ":", "setattr", "(", "self", ",", "name", ",", "data", ".", "get", "(", "name", ")", ")", "return", "self" ]
Fills form with data Args: data (dict): Data to assign form fields. Returns: Self. Form object.
[ "Fills", "form", "with", "data" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/forms/json_form.py#L158-L171
zetaops/zengine
zengine/forms/json_form.py
JsonForm.serialize
def serialize(self): """ Converts the form/model into JSON ready dicts/lists compatible with `Ulakbus-UI API`_. Example: .. code-block:: json { "forms": { "constraints": {}, "model": { "code": null, "name": null, "save_edit": null, }, "grouping": {}, "form": [ { "helpvalue": null, "type": "help" }, "name", "code", "save_edit" ], "schema": { "required": [ "name", "code", "save_edit" ], "type": "object", "properties": { "code": { "type": "string", "title": "Code Name" }, "name": { "type": "string", "title": "Name" }, "save_edit": { "cmd": "save::add_edit_form", "type": "button", "title": "Save" } }, "title": "Add Permission" } } } """ result = { "schema": { "title": self.title, "type": "object", "properties": {}, "required": [] }, "form": [ { "type": "help", "helpvalue": self.help_text } ], "model": {} } for itm in self.META_TO_FORM_ROOT: if itm in self.Meta.__dict__: result[itm] = self.Meta.__dict__[itm] if self._model.is_in_db(): result["model"]['object_key'] = self._model.key result["model"]['model_type'] = self._model.__class__.__name__ result["model"]['unicode'] = six.text_type(self._model) # if form intentionally marked as fillable from task data by assigning False to always_blank # field in Meta class, form_data is retrieved from task_data if exist in else None form_data = None if not self.Meta.always_blank: form_data = self.context.task_data.get(self.__class__.__name__, None) for itm in self._serialize(): item_props = {'type': itm['type'], 'title': itm['title']} if not itm.get('value') and 'kwargs' in itm and 'value' in itm['kwargs']: itm['value'] = itm['kwargs'].pop('value') if 'kwargs' in itm and 'widget' in itm['kwargs']: item_props['widget'] = itm['kwargs'].pop('widget') if form_data: if form_data[itm['name']] and (itm['type'] == 'date' or itm['type'] == 'datetime'): value_to_serialize = datetime.strptime( form_data[itm['name']], itm['format']) else: value_to_serialize = form_data[itm['name']] value = self._serialize_value(value_to_serialize) if itm['type'] == 'button': value = None # if form_data is empty, value will be None, so it is needed to fill the form from model # or leave empty else: # if itm['value'] is not None returns itm['value'] # else itm['default'] if itm['value'] is not None: value = itm['value'] else: value = itm['default'] result["model"][itm['name']] = value if itm['type'] == 'model': item_props['model_name'] = itm['model_name'] if itm['type'] not in ['ListNode', 'model', 'Node']: if 'hidden' in itm['kwargs']: # we're simulating HTML's hidden form fields # by just setting it in "model" dict and bypassing other parts continue else: item_props.update(itm['kwargs']) if itm.get('choices'): self._handle_choices(itm, item_props, result) else: result["form"].append(itm['name']) if 'help_text' in itm: item_props['help_text'] = itm['help_text'] if 'schema' in itm: item_props['schema'] = itm['schema'] # this adds default directives for building # add and list views of linked models if item_props['type'] == 'model': # this control for passing test. # object gets context but do not use it. why is it for? if self.context: if self.context.has_permission("%s.select_list" % item_props['model_name']): item_props.update({ 'list_cmd': 'select_list', 'wf': 'crud', }) if self.context.has_permission("%s.add_edit_form" % item_props['model_name']): item_props.update({ 'add_cmd': 'add_edit_form', 'wf': 'crud', }) else: item_props.update({ 'list_cmd': 'select_list', 'add_cmd': 'add_edit_form', 'wf': 'crud' }) result["schema"]["properties"][itm['name']] = item_props if itm['required']: result["schema"]["required"].append(itm['name']) self._cache_form_details(result) return result
python
def serialize(self): """ Converts the form/model into JSON ready dicts/lists compatible with `Ulakbus-UI API`_. Example: .. code-block:: json { "forms": { "constraints": {}, "model": { "code": null, "name": null, "save_edit": null, }, "grouping": {}, "form": [ { "helpvalue": null, "type": "help" }, "name", "code", "save_edit" ], "schema": { "required": [ "name", "code", "save_edit" ], "type": "object", "properties": { "code": { "type": "string", "title": "Code Name" }, "name": { "type": "string", "title": "Name" }, "save_edit": { "cmd": "save::add_edit_form", "type": "button", "title": "Save" } }, "title": "Add Permission" } } } """ result = { "schema": { "title": self.title, "type": "object", "properties": {}, "required": [] }, "form": [ { "type": "help", "helpvalue": self.help_text } ], "model": {} } for itm in self.META_TO_FORM_ROOT: if itm in self.Meta.__dict__: result[itm] = self.Meta.__dict__[itm] if self._model.is_in_db(): result["model"]['object_key'] = self._model.key result["model"]['model_type'] = self._model.__class__.__name__ result["model"]['unicode'] = six.text_type(self._model) # if form intentionally marked as fillable from task data by assigning False to always_blank # field in Meta class, form_data is retrieved from task_data if exist in else None form_data = None if not self.Meta.always_blank: form_data = self.context.task_data.get(self.__class__.__name__, None) for itm in self._serialize(): item_props = {'type': itm['type'], 'title': itm['title']} if not itm.get('value') and 'kwargs' in itm and 'value' in itm['kwargs']: itm['value'] = itm['kwargs'].pop('value') if 'kwargs' in itm and 'widget' in itm['kwargs']: item_props['widget'] = itm['kwargs'].pop('widget') if form_data: if form_data[itm['name']] and (itm['type'] == 'date' or itm['type'] == 'datetime'): value_to_serialize = datetime.strptime( form_data[itm['name']], itm['format']) else: value_to_serialize = form_data[itm['name']] value = self._serialize_value(value_to_serialize) if itm['type'] == 'button': value = None # if form_data is empty, value will be None, so it is needed to fill the form from model # or leave empty else: # if itm['value'] is not None returns itm['value'] # else itm['default'] if itm['value'] is not None: value = itm['value'] else: value = itm['default'] result["model"][itm['name']] = value if itm['type'] == 'model': item_props['model_name'] = itm['model_name'] if itm['type'] not in ['ListNode', 'model', 'Node']: if 'hidden' in itm['kwargs']: # we're simulating HTML's hidden form fields # by just setting it in "model" dict and bypassing other parts continue else: item_props.update(itm['kwargs']) if itm.get('choices'): self._handle_choices(itm, item_props, result) else: result["form"].append(itm['name']) if 'help_text' in itm: item_props['help_text'] = itm['help_text'] if 'schema' in itm: item_props['schema'] = itm['schema'] # this adds default directives for building # add and list views of linked models if item_props['type'] == 'model': # this control for passing test. # object gets context but do not use it. why is it for? if self.context: if self.context.has_permission("%s.select_list" % item_props['model_name']): item_props.update({ 'list_cmd': 'select_list', 'wf': 'crud', }) if self.context.has_permission("%s.add_edit_form" % item_props['model_name']): item_props.update({ 'add_cmd': 'add_edit_form', 'wf': 'crud', }) else: item_props.update({ 'list_cmd': 'select_list', 'add_cmd': 'add_edit_form', 'wf': 'crud' }) result["schema"]["properties"][itm['name']] = item_props if itm['required']: result["schema"]["required"].append(itm['name']) self._cache_form_details(result) return result
[ "def", "serialize", "(", "self", ")", ":", "result", "=", "{", "\"schema\"", ":", "{", "\"title\"", ":", "self", ".", "title", ",", "\"type\"", ":", "\"object\"", ",", "\"properties\"", ":", "{", "}", ",", "\"required\"", ":", "[", "]", "}", ",", "\"form\"", ":", "[", "{", "\"type\"", ":", "\"help\"", ",", "\"helpvalue\"", ":", "self", ".", "help_text", "}", "]", ",", "\"model\"", ":", "{", "}", "}", "for", "itm", "in", "self", ".", "META_TO_FORM_ROOT", ":", "if", "itm", "in", "self", ".", "Meta", ".", "__dict__", ":", "result", "[", "itm", "]", "=", "self", ".", "Meta", ".", "__dict__", "[", "itm", "]", "if", "self", ".", "_model", ".", "is_in_db", "(", ")", ":", "result", "[", "\"model\"", "]", "[", "'object_key'", "]", "=", "self", ".", "_model", ".", "key", "result", "[", "\"model\"", "]", "[", "'model_type'", "]", "=", "self", ".", "_model", ".", "__class__", ".", "__name__", "result", "[", "\"model\"", "]", "[", "'unicode'", "]", "=", "six", ".", "text_type", "(", "self", ".", "_model", ")", "# if form intentionally marked as fillable from task data by assigning False to always_blank", "# field in Meta class, form_data is retrieved from task_data if exist in else None", "form_data", "=", "None", "if", "not", "self", ".", "Meta", ".", "always_blank", ":", "form_data", "=", "self", ".", "context", ".", "task_data", ".", "get", "(", "self", ".", "__class__", ".", "__name__", ",", "None", ")", "for", "itm", "in", "self", ".", "_serialize", "(", ")", ":", "item_props", "=", "{", "'type'", ":", "itm", "[", "'type'", "]", ",", "'title'", ":", "itm", "[", "'title'", "]", "}", "if", "not", "itm", ".", "get", "(", "'value'", ")", "and", "'kwargs'", "in", "itm", "and", "'value'", "in", "itm", "[", "'kwargs'", "]", ":", "itm", "[", "'value'", "]", "=", "itm", "[", "'kwargs'", "]", ".", "pop", "(", "'value'", ")", "if", "'kwargs'", "in", "itm", "and", "'widget'", "in", "itm", "[", "'kwargs'", "]", ":", "item_props", "[", "'widget'", "]", "=", "itm", "[", "'kwargs'", "]", ".", "pop", "(", "'widget'", ")", "if", "form_data", ":", "if", "form_data", "[", "itm", "[", "'name'", "]", "]", "and", "(", "itm", "[", "'type'", "]", "==", "'date'", "or", "itm", "[", "'type'", "]", "==", "'datetime'", ")", ":", "value_to_serialize", "=", "datetime", ".", "strptime", "(", "form_data", "[", "itm", "[", "'name'", "]", "]", ",", "itm", "[", "'format'", "]", ")", "else", ":", "value_to_serialize", "=", "form_data", "[", "itm", "[", "'name'", "]", "]", "value", "=", "self", ".", "_serialize_value", "(", "value_to_serialize", ")", "if", "itm", "[", "'type'", "]", "==", "'button'", ":", "value", "=", "None", "# if form_data is empty, value will be None, so it is needed to fill the form from model", "# or leave empty", "else", ":", "# if itm['value'] is not None returns itm['value']", "# else itm['default']", "if", "itm", "[", "'value'", "]", "is", "not", "None", ":", "value", "=", "itm", "[", "'value'", "]", "else", ":", "value", "=", "itm", "[", "'default'", "]", "result", "[", "\"model\"", "]", "[", "itm", "[", "'name'", "]", "]", "=", "value", "if", "itm", "[", "'type'", "]", "==", "'model'", ":", "item_props", "[", "'model_name'", "]", "=", "itm", "[", "'model_name'", "]", "if", "itm", "[", "'type'", "]", "not", "in", "[", "'ListNode'", ",", "'model'", ",", "'Node'", "]", ":", "if", "'hidden'", "in", "itm", "[", "'kwargs'", "]", ":", "# we're simulating HTML's hidden form fields", "# by just setting it in \"model\" dict and bypassing other parts", "continue", "else", ":", "item_props", ".", "update", "(", "itm", "[", "'kwargs'", "]", ")", "if", "itm", ".", "get", "(", "'choices'", ")", ":", "self", ".", "_handle_choices", "(", "itm", ",", "item_props", ",", "result", ")", "else", ":", "result", "[", "\"form\"", "]", ".", "append", "(", "itm", "[", "'name'", "]", ")", "if", "'help_text'", "in", "itm", ":", "item_props", "[", "'help_text'", "]", "=", "itm", "[", "'help_text'", "]", "if", "'schema'", "in", "itm", ":", "item_props", "[", "'schema'", "]", "=", "itm", "[", "'schema'", "]", "# this adds default directives for building", "# add and list views of linked models", "if", "item_props", "[", "'type'", "]", "==", "'model'", ":", "# this control for passing test.", "# object gets context but do not use it. why is it for?", "if", "self", ".", "context", ":", "if", "self", ".", "context", ".", "has_permission", "(", "\"%s.select_list\"", "%", "item_props", "[", "'model_name'", "]", ")", ":", "item_props", ".", "update", "(", "{", "'list_cmd'", ":", "'select_list'", ",", "'wf'", ":", "'crud'", ",", "}", ")", "if", "self", ".", "context", ".", "has_permission", "(", "\"%s.add_edit_form\"", "%", "item_props", "[", "'model_name'", "]", ")", ":", "item_props", ".", "update", "(", "{", "'add_cmd'", ":", "'add_edit_form'", ",", "'wf'", ":", "'crud'", ",", "}", ")", "else", ":", "item_props", ".", "update", "(", "{", "'list_cmd'", ":", "'select_list'", ",", "'add_cmd'", ":", "'add_edit_form'", ",", "'wf'", ":", "'crud'", "}", ")", "result", "[", "\"schema\"", "]", "[", "\"properties\"", "]", "[", "itm", "[", "'name'", "]", "]", "=", "item_props", "if", "itm", "[", "'required'", "]", ":", "result", "[", "\"schema\"", "]", "[", "\"required\"", "]", ".", "append", "(", "itm", "[", "'name'", "]", ")", "self", ".", "_cache_form_details", "(", "result", ")", "return", "result" ]
Converts the form/model into JSON ready dicts/lists compatible with `Ulakbus-UI API`_. Example: .. code-block:: json { "forms": { "constraints": {}, "model": { "code": null, "name": null, "save_edit": null, }, "grouping": {}, "form": [ { "helpvalue": null, "type": "help" }, "name", "code", "save_edit" ], "schema": { "required": [ "name", "code", "save_edit" ], "type": "object", "properties": { "code": { "type": "string", "title": "Code Name" }, "name": { "type": "string", "title": "Name" }, "save_edit": { "cmd": "save::add_edit_form", "type": "button", "title": "Save" } }, "title": "Add Permission" } } }
[ "Converts", "the", "form", "/", "model", "into", "JSON", "ready", "dicts", "/", "lists", "compatible", "with", "Ulakbus", "-", "UI", "API", "_", "." ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/forms/json_form.py#L173-L336
zetaops/zengine
zengine/forms/json_form.py
JsonForm._cache_form_details
def _cache_form_details(self, form): """ Caches some form details to lates process and validate incoming (response) form data Args: form: form dict """ cache = FormCache() form['model']['form_key'] = cache.form_id form['model']['form_name'] = self.__class__.__name__ cache.set( { 'model': list(form['model'].keys()), # In Python 3, dictionary keys are not serializable 'non_data_fields': self.non_data_fields } )
python
def _cache_form_details(self, form): """ Caches some form details to lates process and validate incoming (response) form data Args: form: form dict """ cache = FormCache() form['model']['form_key'] = cache.form_id form['model']['form_name'] = self.__class__.__name__ cache.set( { 'model': list(form['model'].keys()), # In Python 3, dictionary keys are not serializable 'non_data_fields': self.non_data_fields } )
[ "def", "_cache_form_details", "(", "self", ",", "form", ")", ":", "cache", "=", "FormCache", "(", ")", "form", "[", "'model'", "]", "[", "'form_key'", "]", "=", "cache", ".", "form_id", "form", "[", "'model'", "]", "[", "'form_name'", "]", "=", "self", ".", "__class__", ".", "__name__", "cache", ".", "set", "(", "{", "'model'", ":", "list", "(", "form", "[", "'model'", "]", ".", "keys", "(", ")", ")", ",", "# In Python 3, dictionary keys are not serializable", "'non_data_fields'", ":", "self", ".", "non_data_fields", "}", ")" ]
Caches some form details to lates process and validate incoming (response) form data Args: form: form dict
[ "Caches", "some", "form", "details", "to", "lates", "process", "and", "validate", "incoming", "(", "response", ")", "form", "data" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/forms/json_form.py#L350-L365
deep-compute/logagg
logagg/forwarders.py
MongoDBForwarder._parse_msg_for_mongodb
def _parse_msg_for_mongodb(self, msgs): ''' >>> mdbf = MongoDBForwarder('no_host', '27017', 'deadpool', ... 'chimichanga', 'logs', 'collection') >>> log = [{u'data': {u'_': {u'file': u'log.py', ... u'fn': u'start', ... u'ln': 8, ... u'name': u'__main__'}, ... u'a': 1, ... u'b': 2, ... u'msg': u'this is a dummy log'}, ... u'error': False, ... u'error_tb': u'', ... u'event': u'some_log', ... u'file': u'/var/log/sample.log', ... u'formatter': u'logagg.formatters.basescript', ... u'host': u'deepcompute', ... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e', ... u'level': u'info', ... u'raw': u'{...}', ... u'timestamp': u'2018-04-09T09:59:24.733945Z', ... u'type': u'metric'}] >>> records = mdbf._parse_msg_for_mongodb(log) >>> from pprint import pprint >>> pprint(records) [{'_id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e', u'data': {u'_': {u'file': u'log.py', u'fn': u'start', u'ln': 8, u'name': u'__main__'}, u'a': 1, u'b': 2, u'msg': u'this is a dummy log'}, u'error': False, u'error_tb': u'', u'event': u'some_log', u'file': u'/var/log/sample.log', u'formatter': u'logagg.formatters.basescript', u'host': u'deepcompute', u'level': u'info', u'raw': u'{...}', u'timestamp': u'2018-04-09T09:59:24.733945Z', u'type': u'metric'}] ''' msgs_list = [] for msg in msgs: try: msg['_id'] = msg.pop('id') except KeyError: self.log.exception('collector_failure_id_not_found', log=msg) msgs_list.append(msg) return msgs_list
python
def _parse_msg_for_mongodb(self, msgs): ''' >>> mdbf = MongoDBForwarder('no_host', '27017', 'deadpool', ... 'chimichanga', 'logs', 'collection') >>> log = [{u'data': {u'_': {u'file': u'log.py', ... u'fn': u'start', ... u'ln': 8, ... u'name': u'__main__'}, ... u'a': 1, ... u'b': 2, ... u'msg': u'this is a dummy log'}, ... u'error': False, ... u'error_tb': u'', ... u'event': u'some_log', ... u'file': u'/var/log/sample.log', ... u'formatter': u'logagg.formatters.basescript', ... u'host': u'deepcompute', ... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e', ... u'level': u'info', ... u'raw': u'{...}', ... u'timestamp': u'2018-04-09T09:59:24.733945Z', ... u'type': u'metric'}] >>> records = mdbf._parse_msg_for_mongodb(log) >>> from pprint import pprint >>> pprint(records) [{'_id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e', u'data': {u'_': {u'file': u'log.py', u'fn': u'start', u'ln': 8, u'name': u'__main__'}, u'a': 1, u'b': 2, u'msg': u'this is a dummy log'}, u'error': False, u'error_tb': u'', u'event': u'some_log', u'file': u'/var/log/sample.log', u'formatter': u'logagg.formatters.basescript', u'host': u'deepcompute', u'level': u'info', u'raw': u'{...}', u'timestamp': u'2018-04-09T09:59:24.733945Z', u'type': u'metric'}] ''' msgs_list = [] for msg in msgs: try: msg['_id'] = msg.pop('id') except KeyError: self.log.exception('collector_failure_id_not_found', log=msg) msgs_list.append(msg) return msgs_list
[ "def", "_parse_msg_for_mongodb", "(", "self", ",", "msgs", ")", ":", "msgs_list", "=", "[", "]", "for", "msg", "in", "msgs", ":", "try", ":", "msg", "[", "'_id'", "]", "=", "msg", ".", "pop", "(", "'id'", ")", "except", "KeyError", ":", "self", ".", "log", ".", "exception", "(", "'collector_failure_id_not_found'", ",", "log", "=", "msg", ")", "msgs_list", ".", "append", "(", "msg", ")", "return", "msgs_list" ]
>>> mdbf = MongoDBForwarder('no_host', '27017', 'deadpool', ... 'chimichanga', 'logs', 'collection') >>> log = [{u'data': {u'_': {u'file': u'log.py', ... u'fn': u'start', ... u'ln': 8, ... u'name': u'__main__'}, ... u'a': 1, ... u'b': 2, ... u'msg': u'this is a dummy log'}, ... u'error': False, ... u'error_tb': u'', ... u'event': u'some_log', ... u'file': u'/var/log/sample.log', ... u'formatter': u'logagg.formatters.basescript', ... u'host': u'deepcompute', ... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e', ... u'level': u'info', ... u'raw': u'{...}', ... u'timestamp': u'2018-04-09T09:59:24.733945Z', ... u'type': u'metric'}] >>> records = mdbf._parse_msg_for_mongodb(log) >>> from pprint import pprint >>> pprint(records) [{'_id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e', u'data': {u'_': {u'file': u'log.py', u'fn': u'start', u'ln': 8, u'name': u'__main__'}, u'a': 1, u'b': 2, u'msg': u'this is a dummy log'}, u'error': False, u'error_tb': u'', u'event': u'some_log', u'file': u'/var/log/sample.log', u'formatter': u'logagg.formatters.basescript', u'host': u'deepcompute', u'level': u'info', u'raw': u'{...}', u'timestamp': u'2018-04-09T09:59:24.733945Z', u'type': u'metric'}]
[ ">>>", "mdbf", "=", "MongoDBForwarder", "(", "no_host", "27017", "deadpool", "...", "chimichanga", "logs", "collection", ")", ">>>", "log", "=", "[", "{", "u", "data", ":", "{", "u", "_", ":", "{", "u", "file", ":", "u", "log", ".", "py", "...", "u", "fn", ":", "u", "start", "...", "u", "ln", ":", "8", "...", "u", "name", ":", "u", "__main__", "}", "...", "u", "a", ":", "1", "...", "u", "b", ":", "2", "...", "u", "msg", ":", "u", "this", "is", "a", "dummy", "log", "}", "...", "u", "error", ":", "False", "...", "u", "error_tb", ":", "u", "...", "u", "event", ":", "u", "some_log", "...", "u", "file", ":", "u", "/", "var", "/", "log", "/", "sample", ".", "log", "...", "u", "formatter", ":", "u", "logagg", ".", "formatters", ".", "basescript", "...", "u", "host", ":", "u", "deepcompute", "...", "u", "id", ":", "u", "20180409T095924_aec36d313bdc11e89da654e1ad04f45e", "...", "u", "level", ":", "u", "info", "...", "u", "raw", ":", "u", "{", "...", "}", "...", "u", "timestamp", ":", "u", "2018", "-", "04", "-", "09T09", ":", "59", ":", "24", ".", "733945Z", "...", "u", "type", ":", "u", "metric", "}", "]" ]
train
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/forwarders.py#L64-L116
deep-compute/logagg
logagg/forwarders.py
InfluxDBForwarder._tag_and_field_maker
def _tag_and_field_maker(self, event): ''' >>> idbf = InfluxDBForwarder('no_host', '8086', 'deadpool', ... 'chimichanga', 'logs', 'collection') >>> log = {u'data': {u'_': {u'file': u'log.py', ... u'fn': u'start', ... u'ln': 8, ... u'name': u'__main__'}, ... u'a': 1, ... u'b': 2, ... u'__ignore_this': 'some_string', ... u'msg': u'this is a dummy log'}, ... u'error': False, ... u'error_tb': u'', ... u'event': u'some_log', ... u'file': u'/var/log/sample.log', ... u'formatter': u'logagg.formatters.basescript', ... u'host': u'deepcompute', ... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e', ... u'level': u'info', ... u'raw': u'{...}', ... u'timestamp': u'2018-04-09T09:59:24.733945Z', ... u'type': u'metric'} >>> tags, fields = idbf._tag_and_field_maker(log) >>> from pprint import pprint >>> pprint(tags) {u'data.msg': u'this is a dummy log', u'error_tb': u'', u'file': u'/var/log/sample.log', u'formatter': u'logagg.formatters.basescript', u'host': u'deepcompute', u'level': u'info'} >>> pprint(fields) {u'data._': "{u'ln': 8, u'fn': u'start', u'file': u'log.py', u'name': u'__main__'}", u'data.a': 1, u'data.b': 2} ''' data = event.pop('data') data = flatten_dict({'data': data}) t = dict((k, event[k]) for k in event if k not in self.EXCLUDE_TAGS) f = dict() for k in data: v = data[k] if is_number(v) or isinstance(v, MarkValue): f[k] = v else: #if v.startswith('_'): f[k] = eval(v.split('_', 1)[1]) t[k] = v return t, f
python
def _tag_and_field_maker(self, event): ''' >>> idbf = InfluxDBForwarder('no_host', '8086', 'deadpool', ... 'chimichanga', 'logs', 'collection') >>> log = {u'data': {u'_': {u'file': u'log.py', ... u'fn': u'start', ... u'ln': 8, ... u'name': u'__main__'}, ... u'a': 1, ... u'b': 2, ... u'__ignore_this': 'some_string', ... u'msg': u'this is a dummy log'}, ... u'error': False, ... u'error_tb': u'', ... u'event': u'some_log', ... u'file': u'/var/log/sample.log', ... u'formatter': u'logagg.formatters.basescript', ... u'host': u'deepcompute', ... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e', ... u'level': u'info', ... u'raw': u'{...}', ... u'timestamp': u'2018-04-09T09:59:24.733945Z', ... u'type': u'metric'} >>> tags, fields = idbf._tag_and_field_maker(log) >>> from pprint import pprint >>> pprint(tags) {u'data.msg': u'this is a dummy log', u'error_tb': u'', u'file': u'/var/log/sample.log', u'formatter': u'logagg.formatters.basescript', u'host': u'deepcompute', u'level': u'info'} >>> pprint(fields) {u'data._': "{u'ln': 8, u'fn': u'start', u'file': u'log.py', u'name': u'__main__'}", u'data.a': 1, u'data.b': 2} ''' data = event.pop('data') data = flatten_dict({'data': data}) t = dict((k, event[k]) for k in event if k not in self.EXCLUDE_TAGS) f = dict() for k in data: v = data[k] if is_number(v) or isinstance(v, MarkValue): f[k] = v else: #if v.startswith('_'): f[k] = eval(v.split('_', 1)[1]) t[k] = v return t, f
[ "def", "_tag_and_field_maker", "(", "self", ",", "event", ")", ":", "data", "=", "event", ".", "pop", "(", "'data'", ")", "data", "=", "flatten_dict", "(", "{", "'data'", ":", "data", "}", ")", "t", "=", "dict", "(", "(", "k", ",", "event", "[", "k", "]", ")", "for", "k", "in", "event", "if", "k", "not", "in", "self", ".", "EXCLUDE_TAGS", ")", "f", "=", "dict", "(", ")", "for", "k", "in", "data", ":", "v", "=", "data", "[", "k", "]", "if", "is_number", "(", "v", ")", "or", "isinstance", "(", "v", ",", "MarkValue", ")", ":", "f", "[", "k", "]", "=", "v", "else", ":", "#if v.startswith('_'): f[k] = eval(v.split('_', 1)[1])", "t", "[", "k", "]", "=", "v", "return", "t", ",", "f" ]
>>> idbf = InfluxDBForwarder('no_host', '8086', 'deadpool', ... 'chimichanga', 'logs', 'collection') >>> log = {u'data': {u'_': {u'file': u'log.py', ... u'fn': u'start', ... u'ln': 8, ... u'name': u'__main__'}, ... u'a': 1, ... u'b': 2, ... u'__ignore_this': 'some_string', ... u'msg': u'this is a dummy log'}, ... u'error': False, ... u'error_tb': u'', ... u'event': u'some_log', ... u'file': u'/var/log/sample.log', ... u'formatter': u'logagg.formatters.basescript', ... u'host': u'deepcompute', ... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e', ... u'level': u'info', ... u'raw': u'{...}', ... u'timestamp': u'2018-04-09T09:59:24.733945Z', ... u'type': u'metric'} >>> tags, fields = idbf._tag_and_field_maker(log) >>> from pprint import pprint >>> pprint(tags) {u'data.msg': u'this is a dummy log', u'error_tb': u'', u'file': u'/var/log/sample.log', u'formatter': u'logagg.formatters.basescript', u'host': u'deepcompute', u'level': u'info'} >>> pprint(fields) {u'data._': "{u'ln': 8, u'fn': u'start', u'file': u'log.py', u'name': u'__main__'}", u'data.a': 1, u'data.b': 2}
[ ">>>", "idbf", "=", "InfluxDBForwarder", "(", "no_host", "8086", "deadpool", "...", "chimichanga", "logs", "collection", ")", ">>>", "log", "=", "{", "u", "data", ":", "{", "u", "_", ":", "{", "u", "file", ":", "u", "log", ".", "py", "...", "u", "fn", ":", "u", "start", "...", "u", "ln", ":", "8", "...", "u", "name", ":", "u", "__main__", "}", "...", "u", "a", ":", "1", "...", "u", "b", ":", "2", "...", "u", "__ignore_this", ":", "some_string", "...", "u", "msg", ":", "u", "this", "is", "a", "dummy", "log", "}", "...", "u", "error", ":", "False", "...", "u", "error_tb", ":", "u", "...", "u", "event", ":", "u", "some_log", "...", "u", "file", ":", "u", "/", "var", "/", "log", "/", "sample", ".", "log", "...", "u", "formatter", ":", "u", "logagg", ".", "formatters", ".", "basescript", "...", "u", "host", ":", "u", "deepcompute", "...", "u", "id", ":", "u", "20180409T095924_aec36d313bdc11e89da654e1ad04f45e", "...", "u", "level", ":", "u", "info", "...", "u", "raw", ":", "u", "{", "...", "}", "...", "u", "timestamp", ":", "u", "2018", "-", "04", "-", "09T09", ":", "59", ":", "24", ".", "733945Z", "...", "u", "type", ":", "u", "metric", "}" ]
train
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/forwarders.py#L167-L221
deep-compute/logagg
logagg/forwarders.py
InfluxDBForwarder._parse_msg_for_influxdb
def _parse_msg_for_influxdb(self, msgs): ''' >>> from logagg.forwarders import InfluxDBForwarder >>> idbf = InfluxDBForwarder('no_host', '8086', 'deadpool', ... 'chimichanga', 'logs', 'collection') >>> valid_log = [{u'data': {u'_force_this_as_field': 'CXNS CNS nbkbsd', ... u'a': 1, ... u'b': 2, ... u'msg': u'this is a dummy log'}, ... u'error': False, ... u'error_tb': u'', ... u'event': u'some_log', ... u'file': u'/var/log/sample.log', ... u'formatter': u'logagg.formatters.basescript', ... u'host': u'deepcompute', ... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e', ... u'level': u'info', ... u'raw': u'{...}', ... u'timestamp': u'2018-04-09T09:59:24.733945Z', ... u'type': u'metric'}] >>> pointvalues = idbf._parse_msg_for_influxdb(valid_log) >>> from pprint import pprint >>> pprint(pointvalues) [{'fields': {u'data._force_this_as_field': "'CXNS CNS nbkbsd'", u'data.a': 1, u'data.b': 2}, 'measurement': u'some_log', 'tags': {u'data.msg': u'this is a dummy log', u'error_tb': u'', u'file': u'/var/log/sample.log', u'formatter': u'logagg.formatters.basescript', u'host': u'deepcompute', u'level': u'info'}, 'time': u'2018-04-09T09:59:24.733945Z'}] >>> invalid_log = valid_log >>> invalid_log[0]['error'] = True >>> pointvalues = idbf._parse_msg_for_influxdb(invalid_log) >>> pprint(pointvalues) [] >>> invalid_log = valid_log >>> invalid_log[0]['type'] = 'log' >>> pointvalues = idbf._parse_msg_for_influxdb(invalid_log) >>> pprint(pointvalues) [] ''' series = [] for msg in msgs: if msg.get('error'): continue if msg.get('type').lower() == 'metric': time = msg.get('timestamp') measurement = msg.get('event') tags, fields = self._tag_and_field_maker(msg) pointvalues = { "time": time, "measurement": measurement, "fields": fields, "tags": tags} series.append(pointvalues) return series
python
def _parse_msg_for_influxdb(self, msgs): ''' >>> from logagg.forwarders import InfluxDBForwarder >>> idbf = InfluxDBForwarder('no_host', '8086', 'deadpool', ... 'chimichanga', 'logs', 'collection') >>> valid_log = [{u'data': {u'_force_this_as_field': 'CXNS CNS nbkbsd', ... u'a': 1, ... u'b': 2, ... u'msg': u'this is a dummy log'}, ... u'error': False, ... u'error_tb': u'', ... u'event': u'some_log', ... u'file': u'/var/log/sample.log', ... u'formatter': u'logagg.formatters.basescript', ... u'host': u'deepcompute', ... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e', ... u'level': u'info', ... u'raw': u'{...}', ... u'timestamp': u'2018-04-09T09:59:24.733945Z', ... u'type': u'metric'}] >>> pointvalues = idbf._parse_msg_for_influxdb(valid_log) >>> from pprint import pprint >>> pprint(pointvalues) [{'fields': {u'data._force_this_as_field': "'CXNS CNS nbkbsd'", u'data.a': 1, u'data.b': 2}, 'measurement': u'some_log', 'tags': {u'data.msg': u'this is a dummy log', u'error_tb': u'', u'file': u'/var/log/sample.log', u'formatter': u'logagg.formatters.basescript', u'host': u'deepcompute', u'level': u'info'}, 'time': u'2018-04-09T09:59:24.733945Z'}] >>> invalid_log = valid_log >>> invalid_log[0]['error'] = True >>> pointvalues = idbf._parse_msg_for_influxdb(invalid_log) >>> pprint(pointvalues) [] >>> invalid_log = valid_log >>> invalid_log[0]['type'] = 'log' >>> pointvalues = idbf._parse_msg_for_influxdb(invalid_log) >>> pprint(pointvalues) [] ''' series = [] for msg in msgs: if msg.get('error'): continue if msg.get('type').lower() == 'metric': time = msg.get('timestamp') measurement = msg.get('event') tags, fields = self._tag_and_field_maker(msg) pointvalues = { "time": time, "measurement": measurement, "fields": fields, "tags": tags} series.append(pointvalues) return series
[ "def", "_parse_msg_for_influxdb", "(", "self", ",", "msgs", ")", ":", "series", "=", "[", "]", "for", "msg", "in", "msgs", ":", "if", "msg", ".", "get", "(", "'error'", ")", ":", "continue", "if", "msg", ".", "get", "(", "'type'", ")", ".", "lower", "(", ")", "==", "'metric'", ":", "time", "=", "msg", ".", "get", "(", "'timestamp'", ")", "measurement", "=", "msg", ".", "get", "(", "'event'", ")", "tags", ",", "fields", "=", "self", ".", "_tag_and_field_maker", "(", "msg", ")", "pointvalues", "=", "{", "\"time\"", ":", "time", ",", "\"measurement\"", ":", "measurement", ",", "\"fields\"", ":", "fields", ",", "\"tags\"", ":", "tags", "}", "series", ".", "append", "(", "pointvalues", ")", "return", "series" ]
>>> from logagg.forwarders import InfluxDBForwarder >>> idbf = InfluxDBForwarder('no_host', '8086', 'deadpool', ... 'chimichanga', 'logs', 'collection') >>> valid_log = [{u'data': {u'_force_this_as_field': 'CXNS CNS nbkbsd', ... u'a': 1, ... u'b': 2, ... u'msg': u'this is a dummy log'}, ... u'error': False, ... u'error_tb': u'', ... u'event': u'some_log', ... u'file': u'/var/log/sample.log', ... u'formatter': u'logagg.formatters.basescript', ... u'host': u'deepcompute', ... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e', ... u'level': u'info', ... u'raw': u'{...}', ... u'timestamp': u'2018-04-09T09:59:24.733945Z', ... u'type': u'metric'}] >>> pointvalues = idbf._parse_msg_for_influxdb(valid_log) >>> from pprint import pprint >>> pprint(pointvalues) [{'fields': {u'data._force_this_as_field': "'CXNS CNS nbkbsd'", u'data.a': 1, u'data.b': 2}, 'measurement': u'some_log', 'tags': {u'data.msg': u'this is a dummy log', u'error_tb': u'', u'file': u'/var/log/sample.log', u'formatter': u'logagg.formatters.basescript', u'host': u'deepcompute', u'level': u'info'}, 'time': u'2018-04-09T09:59:24.733945Z'}] >>> invalid_log = valid_log >>> invalid_log[0]['error'] = True >>> pointvalues = idbf._parse_msg_for_influxdb(invalid_log) >>> pprint(pointvalues) [] >>> invalid_log = valid_log >>> invalid_log[0]['type'] = 'log' >>> pointvalues = idbf._parse_msg_for_influxdb(invalid_log) >>> pprint(pointvalues) []
[ ">>>", "from", "logagg", ".", "forwarders", "import", "InfluxDBForwarder", ">>>", "idbf", "=", "InfluxDBForwarder", "(", "no_host", "8086", "deadpool", "...", "chimichanga", "logs", "collection", ")" ]
train
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/forwarders.py#L223-L290
wdecoster/nanoget
nanoget/nanoget.py
get_input
def get_input(source, files, threads=4, readtype="1D", combine="simple", names=None, barcoded=False): """Get input and process accordingly. Data can be: - a uncompressed, bgzip, bzip2 or gzip compressed fastq file - a uncompressed, bgzip, bzip2 or gzip compressed fasta file - a rich fastq containing additional key=value information in the description, as produced by MinKNOW and albacore with the same compression options as above - a sorted bam file - a sorted cram file - a (compressed) sequencing_summary.txt file generated by albacore Handle is passed to the proper functions to get DataFrame with metrics Multiple files of the same type can be used to extract info from, which is done in parallel Arguments: - source: defines the input data type and the function that needs to be called - files: is a list of one or more files to operate on, from the type of <source> - threads: is the amount of workers which can be used - readtype: (only relevant for summary input) and specifies which columns have to be extracted - combine: is either 'simple' or 'track', with the difference that with 'track' an additional field is created with the name of the dataset - names: if combine="track", the names to be used for the datasets. Needs to have same length as files, or None """ proc_functions = { 'fastq': ex.process_fastq_plain, 'fasta': ex.process_fasta, 'bam': ex.process_bam, 'summary': ex.process_summary, 'fastq_rich': ex.process_fastq_rich, 'fastq_minimal': ex.process_fastq_minimal, 'cram': ex.process_cram, 'ubam': ex.process_ubam, } filethreads = min(len(files), threads) threadsleft = threads - filethreads with cfutures.ProcessPoolExecutor(max_workers=filethreads) as executor: extration_function = partial(proc_functions[source], threads=threadsleft, readtype=readtype, barcoded=barcoded) datadf = combine_dfs( dfs=[out for out in executor.map(extration_function, files)], names=names or files, method=combine) if "readIDs" in datadf and pd.isna(datadf["readIDs"]).any(): datadf.drop("readIDs", axis='columns', inplace=True) datadf = calculate_start_time(datadf) logging.info("Nanoget: Gathered all metrics of {} reads".format(len(datadf))) if len(datadf) == 0: logging.critical("Nanoget: no reads retrieved.".format(len(datadf))) sys.exit("Fatal: No reads found in input.") else: return datadf
python
def get_input(source, files, threads=4, readtype="1D", combine="simple", names=None, barcoded=False): """Get input and process accordingly. Data can be: - a uncompressed, bgzip, bzip2 or gzip compressed fastq file - a uncompressed, bgzip, bzip2 or gzip compressed fasta file - a rich fastq containing additional key=value information in the description, as produced by MinKNOW and albacore with the same compression options as above - a sorted bam file - a sorted cram file - a (compressed) sequencing_summary.txt file generated by albacore Handle is passed to the proper functions to get DataFrame with metrics Multiple files of the same type can be used to extract info from, which is done in parallel Arguments: - source: defines the input data type and the function that needs to be called - files: is a list of one or more files to operate on, from the type of <source> - threads: is the amount of workers which can be used - readtype: (only relevant for summary input) and specifies which columns have to be extracted - combine: is either 'simple' or 'track', with the difference that with 'track' an additional field is created with the name of the dataset - names: if combine="track", the names to be used for the datasets. Needs to have same length as files, or None """ proc_functions = { 'fastq': ex.process_fastq_plain, 'fasta': ex.process_fasta, 'bam': ex.process_bam, 'summary': ex.process_summary, 'fastq_rich': ex.process_fastq_rich, 'fastq_minimal': ex.process_fastq_minimal, 'cram': ex.process_cram, 'ubam': ex.process_ubam, } filethreads = min(len(files), threads) threadsleft = threads - filethreads with cfutures.ProcessPoolExecutor(max_workers=filethreads) as executor: extration_function = partial(proc_functions[source], threads=threadsleft, readtype=readtype, barcoded=barcoded) datadf = combine_dfs( dfs=[out for out in executor.map(extration_function, files)], names=names or files, method=combine) if "readIDs" in datadf and pd.isna(datadf["readIDs"]).any(): datadf.drop("readIDs", axis='columns', inplace=True) datadf = calculate_start_time(datadf) logging.info("Nanoget: Gathered all metrics of {} reads".format(len(datadf))) if len(datadf) == 0: logging.critical("Nanoget: no reads retrieved.".format(len(datadf))) sys.exit("Fatal: No reads found in input.") else: return datadf
[ "def", "get_input", "(", "source", ",", "files", ",", "threads", "=", "4", ",", "readtype", "=", "\"1D\"", ",", "combine", "=", "\"simple\"", ",", "names", "=", "None", ",", "barcoded", "=", "False", ")", ":", "proc_functions", "=", "{", "'fastq'", ":", "ex", ".", "process_fastq_plain", ",", "'fasta'", ":", "ex", ".", "process_fasta", ",", "'bam'", ":", "ex", ".", "process_bam", ",", "'summary'", ":", "ex", ".", "process_summary", ",", "'fastq_rich'", ":", "ex", ".", "process_fastq_rich", ",", "'fastq_minimal'", ":", "ex", ".", "process_fastq_minimal", ",", "'cram'", ":", "ex", ".", "process_cram", ",", "'ubam'", ":", "ex", ".", "process_ubam", ",", "}", "filethreads", "=", "min", "(", "len", "(", "files", ")", ",", "threads", ")", "threadsleft", "=", "threads", "-", "filethreads", "with", "cfutures", ".", "ProcessPoolExecutor", "(", "max_workers", "=", "filethreads", ")", "as", "executor", ":", "extration_function", "=", "partial", "(", "proc_functions", "[", "source", "]", ",", "threads", "=", "threadsleft", ",", "readtype", "=", "readtype", ",", "barcoded", "=", "barcoded", ")", "datadf", "=", "combine_dfs", "(", "dfs", "=", "[", "out", "for", "out", "in", "executor", ".", "map", "(", "extration_function", ",", "files", ")", "]", ",", "names", "=", "names", "or", "files", ",", "method", "=", "combine", ")", "if", "\"readIDs\"", "in", "datadf", "and", "pd", ".", "isna", "(", "datadf", "[", "\"readIDs\"", "]", ")", ".", "any", "(", ")", ":", "datadf", ".", "drop", "(", "\"readIDs\"", ",", "axis", "=", "'columns'", ",", "inplace", "=", "True", ")", "datadf", "=", "calculate_start_time", "(", "datadf", ")", "logging", ".", "info", "(", "\"Nanoget: Gathered all metrics of {} reads\"", ".", "format", "(", "len", "(", "datadf", ")", ")", ")", "if", "len", "(", "datadf", ")", "==", "0", ":", "logging", ".", "critical", "(", "\"Nanoget: no reads retrieved.\"", ".", "format", "(", "len", "(", "datadf", ")", ")", ")", "sys", ".", "exit", "(", "\"Fatal: No reads found in input.\"", ")", "else", ":", "return", "datadf" ]
Get input and process accordingly. Data can be: - a uncompressed, bgzip, bzip2 or gzip compressed fastq file - a uncompressed, bgzip, bzip2 or gzip compressed fasta file - a rich fastq containing additional key=value information in the description, as produced by MinKNOW and albacore with the same compression options as above - a sorted bam file - a sorted cram file - a (compressed) sequencing_summary.txt file generated by albacore Handle is passed to the proper functions to get DataFrame with metrics Multiple files of the same type can be used to extract info from, which is done in parallel Arguments: - source: defines the input data type and the function that needs to be called - files: is a list of one or more files to operate on, from the type of <source> - threads: is the amount of workers which can be used - readtype: (only relevant for summary input) and specifies which columns have to be extracted - combine: is either 'simple' or 'track', with the difference that with 'track' an additional field is created with the name of the dataset - names: if combine="track", the names to be used for the datasets. Needs to have same length as files, or None
[ "Get", "input", "and", "process", "accordingly", "." ]
train
https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/nanoget.py#L29-L82
wdecoster/nanoget
nanoget/nanoget.py
combine_dfs
def combine_dfs(dfs, names, method): """Combine dataframes. Combination is either done simple by just concatenating the DataFrames or performs tracking by adding the name of the dataset as a column.""" if method == "track": res = list() for df, identifier in zip(dfs, names): df["dataset"] = identifier res.append(df) return pd.concat(res, ignore_index=True) elif method == "simple": return pd.concat(dfs, ignore_index=True)
python
def combine_dfs(dfs, names, method): """Combine dataframes. Combination is either done simple by just concatenating the DataFrames or performs tracking by adding the name of the dataset as a column.""" if method == "track": res = list() for df, identifier in zip(dfs, names): df["dataset"] = identifier res.append(df) return pd.concat(res, ignore_index=True) elif method == "simple": return pd.concat(dfs, ignore_index=True)
[ "def", "combine_dfs", "(", "dfs", ",", "names", ",", "method", ")", ":", "if", "method", "==", "\"track\"", ":", "res", "=", "list", "(", ")", "for", "df", ",", "identifier", "in", "zip", "(", "dfs", ",", "names", ")", ":", "df", "[", "\"dataset\"", "]", "=", "identifier", "res", ".", "append", "(", "df", ")", "return", "pd", ".", "concat", "(", "res", ",", "ignore_index", "=", "True", ")", "elif", "method", "==", "\"simple\"", ":", "return", "pd", ".", "concat", "(", "dfs", ",", "ignore_index", "=", "True", ")" ]
Combine dataframes. Combination is either done simple by just concatenating the DataFrames or performs tracking by adding the name of the dataset as a column.
[ "Combine", "dataframes", "." ]
train
https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/nanoget.py#L85-L97
wdecoster/nanoget
nanoget/nanoget.py
calculate_start_time
def calculate_start_time(df): """Calculate the star_time per read. Time data is either a "time" (in seconds, derived from summary files) or a "timestamp" (in UTC, derived from fastq_rich format) and has to be converted appropriately in a datetime format time_arr For both the time_zero is the minimal value of the time_arr, which is then used to subtract from all other times In the case of method=track (and dataset is a column in the df) then this subtraction is done per dataset """ if "time" in df: df["time_arr"] = pd.Series(df["time"], dtype='datetime64[s]') elif "timestamp" in df: df["time_arr"] = pd.Series(df["timestamp"], dtype="datetime64[ns]") else: return df if "dataset" in df: for dset in df["dataset"].unique(): time_zero = df.loc[df["dataset"] == dset, "time_arr"].min() df.loc[df["dataset"] == dset, "start_time"] = \ df.loc[df["dataset"] == dset, "time_arr"] - time_zero else: df["start_time"] = df["time_arr"] - df["time_arr"].min() return df.drop(["time", "timestamp", "time_arr"], axis=1, errors="ignore")
python
def calculate_start_time(df): """Calculate the star_time per read. Time data is either a "time" (in seconds, derived from summary files) or a "timestamp" (in UTC, derived from fastq_rich format) and has to be converted appropriately in a datetime format time_arr For both the time_zero is the minimal value of the time_arr, which is then used to subtract from all other times In the case of method=track (and dataset is a column in the df) then this subtraction is done per dataset """ if "time" in df: df["time_arr"] = pd.Series(df["time"], dtype='datetime64[s]') elif "timestamp" in df: df["time_arr"] = pd.Series(df["timestamp"], dtype="datetime64[ns]") else: return df if "dataset" in df: for dset in df["dataset"].unique(): time_zero = df.loc[df["dataset"] == dset, "time_arr"].min() df.loc[df["dataset"] == dset, "start_time"] = \ df.loc[df["dataset"] == dset, "time_arr"] - time_zero else: df["start_time"] = df["time_arr"] - df["time_arr"].min() return df.drop(["time", "timestamp", "time_arr"], axis=1, errors="ignore")
[ "def", "calculate_start_time", "(", "df", ")", ":", "if", "\"time\"", "in", "df", ":", "df", "[", "\"time_arr\"", "]", "=", "pd", ".", "Series", "(", "df", "[", "\"time\"", "]", ",", "dtype", "=", "'datetime64[s]'", ")", "elif", "\"timestamp\"", "in", "df", ":", "df", "[", "\"time_arr\"", "]", "=", "pd", ".", "Series", "(", "df", "[", "\"timestamp\"", "]", ",", "dtype", "=", "\"datetime64[ns]\"", ")", "else", ":", "return", "df", "if", "\"dataset\"", "in", "df", ":", "for", "dset", "in", "df", "[", "\"dataset\"", "]", ".", "unique", "(", ")", ":", "time_zero", "=", "df", ".", "loc", "[", "df", "[", "\"dataset\"", "]", "==", "dset", ",", "\"time_arr\"", "]", ".", "min", "(", ")", "df", ".", "loc", "[", "df", "[", "\"dataset\"", "]", "==", "dset", ",", "\"start_time\"", "]", "=", "df", ".", "loc", "[", "df", "[", "\"dataset\"", "]", "==", "dset", ",", "\"time_arr\"", "]", "-", "time_zero", "else", ":", "df", "[", "\"start_time\"", "]", "=", "df", "[", "\"time_arr\"", "]", "-", "df", "[", "\"time_arr\"", "]", ".", "min", "(", ")", "return", "df", ".", "drop", "(", "[", "\"time\"", ",", "\"timestamp\"", ",", "\"time_arr\"", "]", ",", "axis", "=", "1", ",", "errors", "=", "\"ignore\"", ")" ]
Calculate the star_time per read. Time data is either a "time" (in seconds, derived from summary files) or a "timestamp" (in UTC, derived from fastq_rich format) and has to be converted appropriately in a datetime format time_arr For both the time_zero is the minimal value of the time_arr, which is then used to subtract from all other times In the case of method=track (and dataset is a column in the df) then this subtraction is done per dataset
[ "Calculate", "the", "star_time", "per", "read", "." ]
train
https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/nanoget.py#L100-L127
camptocamp/marabunta
marabunta/parser.py
YamlParser.parser_from_buffer
def parser_from_buffer(cls, fp): """Construct YamlParser from a file pointer.""" yaml = YAML(typ="safe") return cls(yaml.load(fp))
python
def parser_from_buffer(cls, fp): """Construct YamlParser from a file pointer.""" yaml = YAML(typ="safe") return cls(yaml.load(fp))
[ "def", "parser_from_buffer", "(", "cls", ",", "fp", ")", ":", "yaml", "=", "YAML", "(", "typ", "=", "\"safe\"", ")", "return", "cls", "(", "yaml", ".", "load", "(", "fp", ")", ")" ]
Construct YamlParser from a file pointer.
[ "Construct", "YamlParser", "from", "a", "file", "pointer", "." ]
train
https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/parser.py#L87-L90