repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
GoogleCloudPlatform/httplib2shim
httplib2shim/__init__.py
_map_exception
def _map_exception(e): """Maps an exception from urlib3 to httplib2.""" if isinstance(e, urllib3.exceptions.MaxRetryError): if not e.reason: return e e = e.reason message = e.args[0] if e.args else '' if isinstance(e, urllib3.exceptions.ResponseError): if 'too many redirects' in message: return httplib2.RedirectLimit(message) if isinstance(e, urllib3.exceptions.NewConnectionError): if ('Name or service not known' in message or 'nodename nor servname provided, or not known' in message): return httplib2.ServerNotFoundError( 'Unable to find hostname.') if 'Connection refused' in message: return socket.error((errno.ECONNREFUSED, 'Connection refused')) if isinstance(e, urllib3.exceptions.DecodeError): return httplib2.FailedToDecompressContent( 'Content purported as compressed but not uncompressable.', httplib2.Response({'status': 500}), '') if isinstance(e, urllib3.exceptions.TimeoutError): return socket.timeout('timed out') if isinstance(e, urllib3.exceptions.SSLError): return ssl.SSLError(*e.args) return e
python
def _map_exception(e): """Maps an exception from urlib3 to httplib2.""" if isinstance(e, urllib3.exceptions.MaxRetryError): if not e.reason: return e e = e.reason message = e.args[0] if e.args else '' if isinstance(e, urllib3.exceptions.ResponseError): if 'too many redirects' in message: return httplib2.RedirectLimit(message) if isinstance(e, urllib3.exceptions.NewConnectionError): if ('Name or service not known' in message or 'nodename nor servname provided, or not known' in message): return httplib2.ServerNotFoundError( 'Unable to find hostname.') if 'Connection refused' in message: return socket.error((errno.ECONNREFUSED, 'Connection refused')) if isinstance(e, urllib3.exceptions.DecodeError): return httplib2.FailedToDecompressContent( 'Content purported as compressed but not uncompressable.', httplib2.Response({'status': 500}), '') if isinstance(e, urllib3.exceptions.TimeoutError): return socket.timeout('timed out') if isinstance(e, urllib3.exceptions.SSLError): return ssl.SSLError(*e.args) return e
[ "def", "_map_exception", "(", "e", ")", ":", "if", "isinstance", "(", "e", ",", "urllib3", ".", "exceptions", ".", "MaxRetryError", ")", ":", "if", "not", "e", ".", "reason", ":", "return", "e", "e", "=", "e", ".", "reason", "message", "=", "e", ".", "args", "[", "0", "]", "if", "e", ".", "args", "else", "''", "if", "isinstance", "(", "e", ",", "urllib3", ".", "exceptions", ".", "ResponseError", ")", ":", "if", "'too many redirects'", "in", "message", ":", "return", "httplib2", ".", "RedirectLimit", "(", "message", ")", "if", "isinstance", "(", "e", ",", "urllib3", ".", "exceptions", ".", "NewConnectionError", ")", ":", "if", "(", "'Name or service not known'", "in", "message", "or", "'nodename nor servname provided, or not known'", "in", "message", ")", ":", "return", "httplib2", ".", "ServerNotFoundError", "(", "'Unable to find hostname.'", ")", "if", "'Connection refused'", "in", "message", ":", "return", "socket", ".", "error", "(", "(", "errno", ".", "ECONNREFUSED", ",", "'Connection refused'", ")", ")", "if", "isinstance", "(", "e", ",", "urllib3", ".", "exceptions", ".", "DecodeError", ")", ":", "return", "httplib2", ".", "FailedToDecompressContent", "(", "'Content purported as compressed but not uncompressable.'", ",", "httplib2", ".", "Response", "(", "{", "'status'", ":", "500", "}", ")", ",", "''", ")", "if", "isinstance", "(", "e", ",", "urllib3", ".", "exceptions", ".", "TimeoutError", ")", ":", "return", "socket", ".", "timeout", "(", "'timed out'", ")", "if", "isinstance", "(", "e", ",", "urllib3", ".", "exceptions", ".", "SSLError", ")", ":", "return", "ssl", ".", "SSLError", "(", "*", "e", ".", "args", ")", "return", "e" ]
Maps an exception from urlib3 to httplib2.
[ "Maps", "an", "exception", "from", "urlib3", "to", "httplib2", "." ]
train
https://github.com/GoogleCloudPlatform/httplib2shim/blob/e034530c551f11cf5690ef78a24c66087976c310/httplib2shim/__init__.py#L227-L253
zetaops/zengine
zengine/wf_daemon.py
run_workers
def run_workers(no_subprocess, watch_paths=None, is_background=False): """ subprocess handler """ import atexit, os, subprocess, signal if watch_paths: from watchdog.observers import Observer # from watchdog.observers.fsevents import FSEventsObserver as Observer # from watchdog.observers.polling import PollingObserver as Observer from watchdog.events import FileSystemEventHandler def on_modified(event): if not is_background: print("Restarting worker due to change in %s" % event.src_path) log.info("modified %s" % event.src_path) try: kill_children() run_children() except: log.exception("Error while restarting worker") handler = FileSystemEventHandler() handler.on_modified = on_modified # global child_pids child_pids = [] log.info("starting %s workers" % no_subprocess) def run_children(): global child_pids child_pids = [] for i in range(int(no_subprocess)): proc = subprocess.Popen([sys.executable, __file__], stdout=subprocess.PIPE, stderr=subprocess.PIPE) child_pids.append(proc.pid) log.info("Started worker with pid %s" % proc.pid) def kill_children(): """ kill subprocess on exit of manager (this) process """ log.info("Stopping worker(s)") for pid in child_pids: if pid is not None: os.kill(pid, signal.SIGTERM) run_children() atexit.register(kill_children) signal.signal(signal.SIGTERM, kill_children) if watch_paths: observer = Observer() for path in watch_paths: if not is_background: print("Watching for changes under %s" % path) observer.schedule(handler, path=path, recursive=True) observer.start() while 1: try: sleep(1) except KeyboardInterrupt: log.info("Keyboard interrupt, exiting") if watch_paths: observer.stop() observer.join() sys.exit(0)
python
def run_workers(no_subprocess, watch_paths=None, is_background=False): """ subprocess handler """ import atexit, os, subprocess, signal if watch_paths: from watchdog.observers import Observer # from watchdog.observers.fsevents import FSEventsObserver as Observer # from watchdog.observers.polling import PollingObserver as Observer from watchdog.events import FileSystemEventHandler def on_modified(event): if not is_background: print("Restarting worker due to change in %s" % event.src_path) log.info("modified %s" % event.src_path) try: kill_children() run_children() except: log.exception("Error while restarting worker") handler = FileSystemEventHandler() handler.on_modified = on_modified # global child_pids child_pids = [] log.info("starting %s workers" % no_subprocess) def run_children(): global child_pids child_pids = [] for i in range(int(no_subprocess)): proc = subprocess.Popen([sys.executable, __file__], stdout=subprocess.PIPE, stderr=subprocess.PIPE) child_pids.append(proc.pid) log.info("Started worker with pid %s" % proc.pid) def kill_children(): """ kill subprocess on exit of manager (this) process """ log.info("Stopping worker(s)") for pid in child_pids: if pid is not None: os.kill(pid, signal.SIGTERM) run_children() atexit.register(kill_children) signal.signal(signal.SIGTERM, kill_children) if watch_paths: observer = Observer() for path in watch_paths: if not is_background: print("Watching for changes under %s" % path) observer.schedule(handler, path=path, recursive=True) observer.start() while 1: try: sleep(1) except KeyboardInterrupt: log.info("Keyboard interrupt, exiting") if watch_paths: observer.stop() observer.join() sys.exit(0)
[ "def", "run_workers", "(", "no_subprocess", ",", "watch_paths", "=", "None", ",", "is_background", "=", "False", ")", ":", "import", "atexit", ",", "os", ",", "subprocess", ",", "signal", "if", "watch_paths", ":", "from", "watchdog", ".", "observers", "import", "Observer", "# from watchdog.observers.fsevents import FSEventsObserver as Observer", "# from watchdog.observers.polling import PollingObserver as Observer", "from", "watchdog", ".", "events", "import", "FileSystemEventHandler", "def", "on_modified", "(", "event", ")", ":", "if", "not", "is_background", ":", "print", "(", "\"Restarting worker due to change in %s\"", "%", "event", ".", "src_path", ")", "log", ".", "info", "(", "\"modified %s\"", "%", "event", ".", "src_path", ")", "try", ":", "kill_children", "(", ")", "run_children", "(", ")", "except", ":", "log", ".", "exception", "(", "\"Error while restarting worker\"", ")", "handler", "=", "FileSystemEventHandler", "(", ")", "handler", ".", "on_modified", "=", "on_modified", "# global child_pids", "child_pids", "=", "[", "]", "log", ".", "info", "(", "\"starting %s workers\"", "%", "no_subprocess", ")", "def", "run_children", "(", ")", ":", "global", "child_pids", "child_pids", "=", "[", "]", "for", "i", "in", "range", "(", "int", "(", "no_subprocess", ")", ")", ":", "proc", "=", "subprocess", ".", "Popen", "(", "[", "sys", ".", "executable", ",", "__file__", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "child_pids", ".", "append", "(", "proc", ".", "pid", ")", "log", ".", "info", "(", "\"Started worker with pid %s\"", "%", "proc", ".", "pid", ")", "def", "kill_children", "(", ")", ":", "\"\"\"\n kill subprocess on exit of manager (this) process\n \"\"\"", "log", ".", "info", "(", "\"Stopping worker(s)\"", ")", "for", "pid", "in", "child_pids", ":", "if", "pid", "is", "not", "None", ":", "os", ".", "kill", "(", "pid", ",", "signal", ".", "SIGTERM", ")", "run_children", "(", ")", "atexit", ".", "register", "(", "kill_children", ")", "signal", ".", "signal", "(", "signal", ".", "SIGTERM", ",", "kill_children", ")", "if", "watch_paths", ":", "observer", "=", "Observer", "(", ")", "for", "path", "in", "watch_paths", ":", "if", "not", "is_background", ":", "print", "(", "\"Watching for changes under %s\"", "%", "path", ")", "observer", ".", "schedule", "(", "handler", ",", "path", "=", "path", ",", "recursive", "=", "True", ")", "observer", ".", "start", "(", ")", "while", "1", ":", "try", ":", "sleep", "(", "1", ")", "except", "KeyboardInterrupt", ":", "log", ".", "info", "(", "\"Keyboard interrupt, exiting\"", ")", "if", "watch_paths", ":", "observer", ".", "stop", "(", ")", "observer", ".", "join", "(", ")", "sys", ".", "exit", "(", "0", ")" ]
subprocess handler
[ "subprocess", "handler" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/wf_daemon.py#L235-L300
zetaops/zengine
zengine/wf_daemon.py
Worker.exit
def exit(self, signal=None, frame=None): """ Properly close the AMQP connections """ self.input_channel.close() self.client_queue.close() self.connection.close() log.info("Worker exiting") sys.exit(0)
python
def exit(self, signal=None, frame=None): """ Properly close the AMQP connections """ self.input_channel.close() self.client_queue.close() self.connection.close() log.info("Worker exiting") sys.exit(0)
[ "def", "exit", "(", "self", ",", "signal", "=", "None", ",", "frame", "=", "None", ")", ":", "self", ".", "input_channel", ".", "close", "(", ")", "self", ".", "client_queue", ".", "close", "(", ")", "self", ".", "connection", ".", "close", "(", ")", "log", ".", "info", "(", "\"Worker exiting\"", ")", "sys", ".", "exit", "(", "0", ")" ]
Properly close the AMQP connections
[ "Properly", "close", "the", "AMQP", "connections" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/wf_daemon.py#L48-L56
zetaops/zengine
zengine/wf_daemon.py
Worker.connect
def connect(self): """ make amqp connection and create channels and queue binding """ self.connection = pika.BlockingConnection(BLOCKING_MQ_PARAMS) self.client_queue = ClientQueue() self.input_channel = self.connection.channel() self.input_channel.exchange_declare(exchange=self.INPUT_EXCHANGE, type='topic', durable=True) self.input_channel.queue_declare(queue=self.INPUT_QUEUE_NAME) self.input_channel.queue_bind(exchange=self.INPUT_EXCHANGE, queue=self.INPUT_QUEUE_NAME) log.info("Bind to queue named '%s' queue with exchange '%s'" % (self.INPUT_QUEUE_NAME, self.INPUT_EXCHANGE))
python
def connect(self): """ make amqp connection and create channels and queue binding """ self.connection = pika.BlockingConnection(BLOCKING_MQ_PARAMS) self.client_queue = ClientQueue() self.input_channel = self.connection.channel() self.input_channel.exchange_declare(exchange=self.INPUT_EXCHANGE, type='topic', durable=True) self.input_channel.queue_declare(queue=self.INPUT_QUEUE_NAME) self.input_channel.queue_bind(exchange=self.INPUT_EXCHANGE, queue=self.INPUT_QUEUE_NAME) log.info("Bind to queue named '%s' queue with exchange '%s'" % (self.INPUT_QUEUE_NAME, self.INPUT_EXCHANGE))
[ "def", "connect", "(", "self", ")", ":", "self", ".", "connection", "=", "pika", ".", "BlockingConnection", "(", "BLOCKING_MQ_PARAMS", ")", "self", ".", "client_queue", "=", "ClientQueue", "(", ")", "self", ".", "input_channel", "=", "self", ".", "connection", ".", "channel", "(", ")", "self", ".", "input_channel", ".", "exchange_declare", "(", "exchange", "=", "self", ".", "INPUT_EXCHANGE", ",", "type", "=", "'topic'", ",", "durable", "=", "True", ")", "self", ".", "input_channel", ".", "queue_declare", "(", "queue", "=", "self", ".", "INPUT_QUEUE_NAME", ")", "self", ".", "input_channel", ".", "queue_bind", "(", "exchange", "=", "self", ".", "INPUT_EXCHANGE", ",", "queue", "=", "self", ".", "INPUT_QUEUE_NAME", ")", "log", ".", "info", "(", "\"Bind to queue named '%s' queue with exchange '%s'\"", "%", "(", "self", ".", "INPUT_QUEUE_NAME", ",", "self", ".", "INPUT_EXCHANGE", ")", ")" ]
make amqp connection and create channels and queue binding
[ "make", "amqp", "connection", "and", "create", "channels", "and", "queue", "binding" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/wf_daemon.py#L58-L72
zetaops/zengine
zengine/wf_daemon.py
Worker.clear_queue
def clear_queue(self): """ clear outs all messages from INPUT_QUEUE_NAME """ def remove_message(ch, method, properties, body): print("Removed message: %s" % body) self.input_channel.basic_consume(remove_message, queue=self.INPUT_QUEUE_NAME, no_ack=True) try: self.input_channel.start_consuming() except (KeyboardInterrupt, SystemExit): log.info(" Exiting") self.exit()
python
def clear_queue(self): """ clear outs all messages from INPUT_QUEUE_NAME """ def remove_message(ch, method, properties, body): print("Removed message: %s" % body) self.input_channel.basic_consume(remove_message, queue=self.INPUT_QUEUE_NAME, no_ack=True) try: self.input_channel.start_consuming() except (KeyboardInterrupt, SystemExit): log.info(" Exiting") self.exit()
[ "def", "clear_queue", "(", "self", ")", ":", "def", "remove_message", "(", "ch", ",", "method", ",", "properties", ",", "body", ")", ":", "print", "(", "\"Removed message: %s\"", "%", "body", ")", "self", ".", "input_channel", ".", "basic_consume", "(", "remove_message", ",", "queue", "=", "self", ".", "INPUT_QUEUE_NAME", ",", "no_ack", "=", "True", ")", "try", ":", "self", ".", "input_channel", ".", "start_consuming", "(", ")", "except", "(", "KeyboardInterrupt", ",", "SystemExit", ")", ":", "log", ".", "info", "(", "\" Exiting\"", ")", "self", ".", "exit", "(", ")" ]
clear outs all messages from INPUT_QUEUE_NAME
[ "clear", "outs", "all", "messages", "from", "INPUT_QUEUE_NAME" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/wf_daemon.py#L76-L87
zetaops/zengine
zengine/wf_daemon.py
Worker.run
def run(self): """ actual consuming of incoming works starts here """ self.input_channel.basic_consume(self.handle_message, queue=self.INPUT_QUEUE_NAME, no_ack=True ) try: self.input_channel.start_consuming() except (KeyboardInterrupt, SystemExit): log.info(" Exiting") self.exit()
python
def run(self): """ actual consuming of incoming works starts here """ self.input_channel.basic_consume(self.handle_message, queue=self.INPUT_QUEUE_NAME, no_ack=True ) try: self.input_channel.start_consuming() except (KeyboardInterrupt, SystemExit): log.info(" Exiting") self.exit()
[ "def", "run", "(", "self", ")", ":", "self", ".", "input_channel", ".", "basic_consume", "(", "self", ".", "handle_message", ",", "queue", "=", "self", ".", "INPUT_QUEUE_NAME", ",", "no_ack", "=", "True", ")", "try", ":", "self", ".", "input_channel", ".", "start_consuming", "(", ")", "except", "(", "KeyboardInterrupt", ",", "SystemExit", ")", ":", "log", ".", "info", "(", "\" Exiting\"", ")", "self", ".", "exit", "(", ")" ]
actual consuming of incoming works starts here
[ "actual", "consuming", "of", "incoming", "works", "starts", "here" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/wf_daemon.py#L89-L101
zetaops/zengine
zengine/wf_daemon.py
Worker.handle_message
def handle_message(self, ch, method, properties, body): """ this is a pika.basic_consumer callback handles client inputs, runs appropriate workflows and views Args: ch: amqp channel method: amqp method properties: body: message body """ input = {} headers = {} try: self.sessid = method.routing_key input = json_decode(body) data = input['data'] # since this comes as "path" we dont know if it's view or workflow yet # TODO: just a workaround till we modify ui to if 'path' in data: if data['path'] in VIEW_METHODS: data['view'] = data['path'] else: data['wf'] = data['path'] session = Session(self.sessid) headers = {'remote_ip': input['_zops_remote_ip'], 'source': input['_zops_source']} if 'wf' in data: output = self._handle_workflow(session, data, headers) elif 'job' in data: self._handle_job(session, data, headers) return else: output = self._handle_view(session, data, headers) except HTTPError as e: import sys if hasattr(sys, '_called_from_test'): raise output = {"cmd": "error", "error": self._prepare_error_msg(e.message), "code": e.code} log.exception("Http error occurred") except: self.current = Current(session=session, input=data) self.current.headers = headers import sys if hasattr(sys, '_called_from_test'): raise err = traceback.format_exc() output = {"cmd": "error", "error": self._prepare_error_msg(err), "code": 500} log.exception("Worker error occurred with messsage body:\n%s" % body) if 'callbackID' in input: output['callbackID'] = input['callbackID'] log.info("OUTPUT for %s: %s" % (self.sessid, output)) output['reply_timestamp'] = time() self.send_output(output)
python
def handle_message(self, ch, method, properties, body): """ this is a pika.basic_consumer callback handles client inputs, runs appropriate workflows and views Args: ch: amqp channel method: amqp method properties: body: message body """ input = {} headers = {} try: self.sessid = method.routing_key input = json_decode(body) data = input['data'] # since this comes as "path" we dont know if it's view or workflow yet # TODO: just a workaround till we modify ui to if 'path' in data: if data['path'] in VIEW_METHODS: data['view'] = data['path'] else: data['wf'] = data['path'] session = Session(self.sessid) headers = {'remote_ip': input['_zops_remote_ip'], 'source': input['_zops_source']} if 'wf' in data: output = self._handle_workflow(session, data, headers) elif 'job' in data: self._handle_job(session, data, headers) return else: output = self._handle_view(session, data, headers) except HTTPError as e: import sys if hasattr(sys, '_called_from_test'): raise output = {"cmd": "error", "error": self._prepare_error_msg(e.message), "code": e.code} log.exception("Http error occurred") except: self.current = Current(session=session, input=data) self.current.headers = headers import sys if hasattr(sys, '_called_from_test'): raise err = traceback.format_exc() output = {"cmd": "error", "error": self._prepare_error_msg(err), "code": 500} log.exception("Worker error occurred with messsage body:\n%s" % body) if 'callbackID' in input: output['callbackID'] = input['callbackID'] log.info("OUTPUT for %s: %s" % (self.sessid, output)) output['reply_timestamp'] = time() self.send_output(output)
[ "def", "handle_message", "(", "self", ",", "ch", ",", "method", ",", "properties", ",", "body", ")", ":", "input", "=", "{", "}", "headers", "=", "{", "}", "try", ":", "self", ".", "sessid", "=", "method", ".", "routing_key", "input", "=", "json_decode", "(", "body", ")", "data", "=", "input", "[", "'data'", "]", "# since this comes as \"path\" we dont know if it's view or workflow yet", "# TODO: just a workaround till we modify ui to", "if", "'path'", "in", "data", ":", "if", "data", "[", "'path'", "]", "in", "VIEW_METHODS", ":", "data", "[", "'view'", "]", "=", "data", "[", "'path'", "]", "else", ":", "data", "[", "'wf'", "]", "=", "data", "[", "'path'", "]", "session", "=", "Session", "(", "self", ".", "sessid", ")", "headers", "=", "{", "'remote_ip'", ":", "input", "[", "'_zops_remote_ip'", "]", ",", "'source'", ":", "input", "[", "'_zops_source'", "]", "}", "if", "'wf'", "in", "data", ":", "output", "=", "self", ".", "_handle_workflow", "(", "session", ",", "data", ",", "headers", ")", "elif", "'job'", "in", "data", ":", "self", ".", "_handle_job", "(", "session", ",", "data", ",", "headers", ")", "return", "else", ":", "output", "=", "self", ".", "_handle_view", "(", "session", ",", "data", ",", "headers", ")", "except", "HTTPError", "as", "e", ":", "import", "sys", "if", "hasattr", "(", "sys", ",", "'_called_from_test'", ")", ":", "raise", "output", "=", "{", "\"cmd\"", ":", "\"error\"", ",", "\"error\"", ":", "self", ".", "_prepare_error_msg", "(", "e", ".", "message", ")", ",", "\"code\"", ":", "e", ".", "code", "}", "log", ".", "exception", "(", "\"Http error occurred\"", ")", "except", ":", "self", ".", "current", "=", "Current", "(", "session", "=", "session", ",", "input", "=", "data", ")", "self", ".", "current", ".", "headers", "=", "headers", "import", "sys", "if", "hasattr", "(", "sys", ",", "'_called_from_test'", ")", ":", "raise", "err", "=", "traceback", ".", "format_exc", "(", ")", "output", "=", "{", "\"cmd\"", ":", "\"error\"", ",", "\"error\"", ":", "self", ".", "_prepare_error_msg", "(", "err", ")", ",", "\"code\"", ":", "500", "}", "log", ".", "exception", "(", "\"Worker error occurred with messsage body:\\n%s\"", "%", "body", ")", "if", "'callbackID'", "in", "input", ":", "output", "[", "'callbackID'", "]", "=", "input", "[", "'callbackID'", "]", "log", ".", "info", "(", "\"OUTPUT for %s: %s\"", "%", "(", "self", ".", "sessid", ",", "output", ")", ")", "output", "[", "'reply_timestamp'", "]", "=", "time", "(", ")", "self", ".", "send_output", "(", "output", ")" ]
this is a pika.basic_consumer callback handles client inputs, runs appropriate workflows and views Args: ch: amqp channel method: amqp method properties: body: message body
[ "this", "is", "a", "pika", ".", "basic_consumer", "callback", "handles", "client", "inputs", "runs", "appropriate", "workflows", "and", "views" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/wf_daemon.py#L165-L224
zetaops/zengine
zengine/models/workflow_manager.py
get_progress
def get_progress(start, finish): """ Args: start (DateTime): start date finish (DateTime): finish date Returns: """ now = datetime.now() dif_time_start = start - now dif_time_finish = finish - now if dif_time_start.days < 0 and dif_time_finish.days < 0: return PROGRESS_STATES[3][0] elif dif_time_start.days < 0 and dif_time_finish.days >= 1: return PROGRESS_STATES[2][0] elif dif_time_start.days >= 1 and dif_time_finish.days >= 1: return PROGRESS_STATES[0][0] else: return PROGRESS_STATES[2][0]
python
def get_progress(start, finish): """ Args: start (DateTime): start date finish (DateTime): finish date Returns: """ now = datetime.now() dif_time_start = start - now dif_time_finish = finish - now if dif_time_start.days < 0 and dif_time_finish.days < 0: return PROGRESS_STATES[3][0] elif dif_time_start.days < 0 and dif_time_finish.days >= 1: return PROGRESS_STATES[2][0] elif dif_time_start.days >= 1 and dif_time_finish.days >= 1: return PROGRESS_STATES[0][0] else: return PROGRESS_STATES[2][0]
[ "def", "get_progress", "(", "start", ",", "finish", ")", ":", "now", "=", "datetime", ".", "now", "(", ")", "dif_time_start", "=", "start", "-", "now", "dif_time_finish", "=", "finish", "-", "now", "if", "dif_time_start", ".", "days", "<", "0", "and", "dif_time_finish", ".", "days", "<", "0", ":", "return", "PROGRESS_STATES", "[", "3", "]", "[", "0", "]", "elif", "dif_time_start", ".", "days", "<", "0", "and", "dif_time_finish", ".", "days", ">=", "1", ":", "return", "PROGRESS_STATES", "[", "2", "]", "[", "0", "]", "elif", "dif_time_start", ".", "days", ">=", "1", "and", "dif_time_finish", ".", "days", ">=", "1", ":", "return", "PROGRESS_STATES", "[", "0", "]", "[", "0", "]", "else", ":", "return", "PROGRESS_STATES", "[", "2", "]", "[", "0", "]" ]
Args: start (DateTime): start date finish (DateTime): finish date Returns:
[ "Args", ":", "start", "(", "DateTime", ")", ":", "start", "date", "finish", "(", "DateTime", ")", ":", "finish", "date", "Returns", ":" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/models/workflow_manager.py#L230-L249
zetaops/zengine
zengine/models/workflow_manager.py
sync_wf_cache
def sync_wf_cache(current): """ BG Job for storing wf state to DB """ wf_cache = WFCache(current) wf_state = wf_cache.get() # unicode serialized json to dict, all values are unicode if 'role_id' in wf_state: # role_id inserted by engine, so it's a sign that we get it from cache not db try: wfi = WFInstance.objects.get(key=current.input['token']) except ObjectDoesNotExist: # wf's that not started from a task invitation wfi = WFInstance(key=current.input['token']) wfi.wf = BPMNWorkflow.objects.get(name=wf_state['name']) if not wfi.current_actor.exist: # we just started the wf try: inv = TaskInvitation.objects.get(instance=wfi, role_id=wf_state['role_id']) inv.delete_other_invitations() inv.progress = 20 inv.save() except ObjectDoesNotExist: current.log.exception("Invitation not found: %s" % wf_state) except MultipleObjectsReturned: current.log.exception("Multiple invitations found: %s" % wf_state) wfi.step = wf_state['step'] wfi.name = wf_state['name'] wfi.pool = wf_state['pool'] wfi.current_actor_id = str(wf_state['role_id']) # keys must be str not unicode wfi.data = wf_state['data'] if wf_state['finished']: wfi.finished = True wfi.finish_date = wf_state['finish_date'] wf_cache.delete() wfi.save() else: # if cache already cleared, we have nothing to sync pass
python
def sync_wf_cache(current): """ BG Job for storing wf state to DB """ wf_cache = WFCache(current) wf_state = wf_cache.get() # unicode serialized json to dict, all values are unicode if 'role_id' in wf_state: # role_id inserted by engine, so it's a sign that we get it from cache not db try: wfi = WFInstance.objects.get(key=current.input['token']) except ObjectDoesNotExist: # wf's that not started from a task invitation wfi = WFInstance(key=current.input['token']) wfi.wf = BPMNWorkflow.objects.get(name=wf_state['name']) if not wfi.current_actor.exist: # we just started the wf try: inv = TaskInvitation.objects.get(instance=wfi, role_id=wf_state['role_id']) inv.delete_other_invitations() inv.progress = 20 inv.save() except ObjectDoesNotExist: current.log.exception("Invitation not found: %s" % wf_state) except MultipleObjectsReturned: current.log.exception("Multiple invitations found: %s" % wf_state) wfi.step = wf_state['step'] wfi.name = wf_state['name'] wfi.pool = wf_state['pool'] wfi.current_actor_id = str(wf_state['role_id']) # keys must be str not unicode wfi.data = wf_state['data'] if wf_state['finished']: wfi.finished = True wfi.finish_date = wf_state['finish_date'] wf_cache.delete() wfi.save() else: # if cache already cleared, we have nothing to sync pass
[ "def", "sync_wf_cache", "(", "current", ")", ":", "wf_cache", "=", "WFCache", "(", "current", ")", "wf_state", "=", "wf_cache", ".", "get", "(", ")", "# unicode serialized json to dict, all values are unicode", "if", "'role_id'", "in", "wf_state", ":", "# role_id inserted by engine, so it's a sign that we get it from cache not db", "try", ":", "wfi", "=", "WFInstance", ".", "objects", ".", "get", "(", "key", "=", "current", ".", "input", "[", "'token'", "]", ")", "except", "ObjectDoesNotExist", ":", "# wf's that not started from a task invitation", "wfi", "=", "WFInstance", "(", "key", "=", "current", ".", "input", "[", "'token'", "]", ")", "wfi", ".", "wf", "=", "BPMNWorkflow", ".", "objects", ".", "get", "(", "name", "=", "wf_state", "[", "'name'", "]", ")", "if", "not", "wfi", ".", "current_actor", ".", "exist", ":", "# we just started the wf", "try", ":", "inv", "=", "TaskInvitation", ".", "objects", ".", "get", "(", "instance", "=", "wfi", ",", "role_id", "=", "wf_state", "[", "'role_id'", "]", ")", "inv", ".", "delete_other_invitations", "(", ")", "inv", ".", "progress", "=", "20", "inv", ".", "save", "(", ")", "except", "ObjectDoesNotExist", ":", "current", ".", "log", ".", "exception", "(", "\"Invitation not found: %s\"", "%", "wf_state", ")", "except", "MultipleObjectsReturned", ":", "current", ".", "log", ".", "exception", "(", "\"Multiple invitations found: %s\"", "%", "wf_state", ")", "wfi", ".", "step", "=", "wf_state", "[", "'step'", "]", "wfi", ".", "name", "=", "wf_state", "[", "'name'", "]", "wfi", ".", "pool", "=", "wf_state", "[", "'pool'", "]", "wfi", ".", "current_actor_id", "=", "str", "(", "wf_state", "[", "'role_id'", "]", ")", "# keys must be str not unicode", "wfi", ".", "data", "=", "wf_state", "[", "'data'", "]", "if", "wf_state", "[", "'finished'", "]", ":", "wfi", ".", "finished", "=", "True", "wfi", ".", "finish_date", "=", "wf_state", "[", "'finish_date'", "]", "wf_cache", ".", "delete", "(", ")", "wfi", ".", "save", "(", ")", "else", ":", "# if cache already cleared, we have nothing to sync", "pass" ]
BG Job for storing wf state to DB
[ "BG", "Job", "for", "storing", "wf", "state", "to", "DB" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/models/workflow_manager.py#L759-L797
zetaops/zengine
zengine/models/workflow_manager.py
DiagramXML.get_or_create_by_content
def get_or_create_by_content(cls, name, content): """ if xml content updated, create a new entry for given wf name Args: name: name of wf content: xml content Returns (DiagramXML(), bool): A tuple with two members. (DiagramXML instance and True if it's new or False it's already exists """ new = False diagrams = cls.objects.filter(name=name) if diagrams: diagram = diagrams[0] if diagram.body != content: new = True else: new = True if new: diagram = cls(name=name, body=content).save() return diagram, new
python
def get_or_create_by_content(cls, name, content): """ if xml content updated, create a new entry for given wf name Args: name: name of wf content: xml content Returns (DiagramXML(), bool): A tuple with two members. (DiagramXML instance and True if it's new or False it's already exists """ new = False diagrams = cls.objects.filter(name=name) if diagrams: diagram = diagrams[0] if diagram.body != content: new = True else: new = True if new: diagram = cls(name=name, body=content).save() return diagram, new
[ "def", "get_or_create_by_content", "(", "cls", ",", "name", ",", "content", ")", ":", "new", "=", "False", "diagrams", "=", "cls", ".", "objects", ".", "filter", "(", "name", "=", "name", ")", "if", "diagrams", ":", "diagram", "=", "diagrams", "[", "0", "]", "if", "diagram", ".", "body", "!=", "content", ":", "new", "=", "True", "else", ":", "new", "=", "True", "if", "new", ":", "diagram", "=", "cls", "(", "name", "=", "name", ",", "body", "=", "content", ")", ".", "save", "(", ")", "return", "diagram", ",", "new" ]
if xml content updated, create a new entry for given wf name Args: name: name of wf content: xml content Returns (DiagramXML(), bool): A tuple with two members. (DiagramXML instance and True if it's new or False it's already exists
[ "if", "xml", "content", "updated", "create", "a", "new", "entry", "for", "given", "wf", "name", "Args", ":", "name", ":", "name", "of", "wf", "content", ":", "xml", "content" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/models/workflow_manager.py#L43-L63
zetaops/zengine
zengine/models/workflow_manager.py
BPMNParser.get_description
def get_description(self): """ Tries to get WF description from 'collabration' or 'process' or 'pariticipant' Returns str: WF description """ paths = ['bpmn:collaboration/bpmn:participant/bpmn:documentation', 'bpmn:collaboration/bpmn:documentation', 'bpmn:process/bpmn:documentation'] for path in paths: elm = self.root.find(path, NS) if elm is not None and elm.text: return elm.text
python
def get_description(self): """ Tries to get WF description from 'collabration' or 'process' or 'pariticipant' Returns str: WF description """ paths = ['bpmn:collaboration/bpmn:participant/bpmn:documentation', 'bpmn:collaboration/bpmn:documentation', 'bpmn:process/bpmn:documentation'] for path in paths: elm = self.root.find(path, NS) if elm is not None and elm.text: return elm.text
[ "def", "get_description", "(", "self", ")", ":", "paths", "=", "[", "'bpmn:collaboration/bpmn:participant/bpmn:documentation'", ",", "'bpmn:collaboration/bpmn:documentation'", ",", "'bpmn:process/bpmn:documentation'", "]", "for", "path", "in", "paths", ":", "elm", "=", "self", ".", "root", ".", "find", "(", "path", ",", "NS", ")", "if", "elm", "is", "not", "None", "and", "elm", ".", "text", ":", "return", "elm", ".", "text" ]
Tries to get WF description from 'collabration' or 'process' or 'pariticipant' Returns str: WF description
[ "Tries", "to", "get", "WF", "description", "from", "collabration", "or", "process", "or", "pariticipant" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/models/workflow_manager.py#L96-L109
zetaops/zengine
zengine/models/workflow_manager.py
BPMNParser.get_name
def get_name(self): """ Tries to get WF name from 'process' or 'collobration' or 'pariticipant' Returns: str. WF name. """ paths = ['bpmn:process', 'bpmn:collaboration/bpmn:participant/', 'bpmn:collaboration', ] for path in paths: tag = self.root.find(path, NS) if tag is not None and len(tag): name = tag.get('name') if name: return name
python
def get_name(self): """ Tries to get WF name from 'process' or 'collobration' or 'pariticipant' Returns: str. WF name. """ paths = ['bpmn:process', 'bpmn:collaboration/bpmn:participant/', 'bpmn:collaboration', ] for path in paths: tag = self.root.find(path, NS) if tag is not None and len(tag): name = tag.get('name') if name: return name
[ "def", "get_name", "(", "self", ")", ":", "paths", "=", "[", "'bpmn:process'", ",", "'bpmn:collaboration/bpmn:participant/'", ",", "'bpmn:collaboration'", ",", "]", "for", "path", "in", "paths", ":", "tag", "=", "self", ".", "root", ".", "find", "(", "path", ",", "NS", ")", "if", "tag", "is", "not", "None", "and", "len", "(", "tag", ")", ":", "name", "=", "tag", ".", "get", "(", "'name'", ")", "if", "name", ":", "return", "name" ]
Tries to get WF name from 'process' or 'collobration' or 'pariticipant' Returns: str. WF name.
[ "Tries", "to", "get", "WF", "name", "from", "process", "or", "collobration", "or", "pariticipant" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/models/workflow_manager.py#L111-L127
zetaops/zengine
zengine/models/workflow_manager.py
BPMNWorkflow.set_xml
def set_xml(self, diagram, force=False): """ updates xml link if there aren't any running instances of this wf Args: diagram: XMLDiagram object """ no_of_running = WFInstance.objects.filter(wf=self, finished=False, started=True).count() if no_of_running and not force: raise RunningInstancesExist( "Can't update WF diagram! Running %s WF instances exists for %s" % ( no_of_running, self.name )) else: self.xml = diagram parser = BPMNParser(diagram.body) self.description = parser.get_description() self.title = parser.get_name() or self.name.replace('_', ' ').title() extensions = dict(parser.get_wf_extensions()) self.programmable = extensions.get('programmable', False) self.task_type = extensions.get('task_type', None) self.menu_category = extensions.get('menu_category', settings.DEFAULT_WF_CATEGORY_NAME) self.save()
python
def set_xml(self, diagram, force=False): """ updates xml link if there aren't any running instances of this wf Args: diagram: XMLDiagram object """ no_of_running = WFInstance.objects.filter(wf=self, finished=False, started=True).count() if no_of_running and not force: raise RunningInstancesExist( "Can't update WF diagram! Running %s WF instances exists for %s" % ( no_of_running, self.name )) else: self.xml = diagram parser = BPMNParser(diagram.body) self.description = parser.get_description() self.title = parser.get_name() or self.name.replace('_', ' ').title() extensions = dict(parser.get_wf_extensions()) self.programmable = extensions.get('programmable', False) self.task_type = extensions.get('task_type', None) self.menu_category = extensions.get('menu_category', settings.DEFAULT_WF_CATEGORY_NAME) self.save()
[ "def", "set_xml", "(", "self", ",", "diagram", ",", "force", "=", "False", ")", ":", "no_of_running", "=", "WFInstance", ".", "objects", ".", "filter", "(", "wf", "=", "self", ",", "finished", "=", "False", ",", "started", "=", "True", ")", ".", "count", "(", ")", "if", "no_of_running", "and", "not", "force", ":", "raise", "RunningInstancesExist", "(", "\"Can't update WF diagram! Running %s WF instances exists for %s\"", "%", "(", "no_of_running", ",", "self", ".", "name", ")", ")", "else", ":", "self", ".", "xml", "=", "diagram", "parser", "=", "BPMNParser", "(", "diagram", ".", "body", ")", "self", ".", "description", "=", "parser", ".", "get_description", "(", ")", "self", ".", "title", "=", "parser", ".", "get_name", "(", ")", "or", "self", ".", "name", ".", "replace", "(", "'_'", ",", "' '", ")", ".", "title", "(", ")", "extensions", "=", "dict", "(", "parser", ".", "get_wf_extensions", "(", ")", ")", "self", ".", "programmable", "=", "extensions", ".", "get", "(", "'programmable'", ",", "False", ")", "self", ".", "task_type", "=", "extensions", ".", "get", "(", "'task_type'", ",", "None", ")", "self", ".", "menu_category", "=", "extensions", ".", "get", "(", "'menu_category'", ",", "settings", ".", "DEFAULT_WF_CATEGORY_NAME", ")", "self", ".", "save", "(", ")" ]
updates xml link if there aren't any running instances of this wf Args: diagram: XMLDiagram object
[ "updates", "xml", "link", "if", "there", "aren", "t", "any", "running", "instances", "of", "this", "wf", "Args", ":", "diagram", ":", "XMLDiagram", "object" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/models/workflow_manager.py#L184-L205
zetaops/zengine
zengine/models/workflow_manager.py
Task.create_wf_instances
def create_wf_instances(self, roles=None): """ Creates wf instances. Args: roles (list): role list Returns: (list): wf instances """ # if roles specified then create an instance for each role # else create only one instance if roles: wf_instances = [ WFInstance( wf=self.wf, current_actor=role, task=self, name=self.wf.name ) for role in roles ] else: wf_instances = [ WFInstance( wf=self.wf, task=self, name=self.wf.name ) ] # if task type is not related with objects save instances immediately. if self.task_type in ["C", "D"]: return [wfi.save() for wfi in wf_instances] # if task type is related with its objects, save populate instances per object else: wf_obj_instances = [] for wfi in wf_instances: role = wfi.current_actor if self.task_type == "A" else None keys = self.get_object_keys(role) wf_obj_instances.extend( [WFInstance( wf=self.wf, current_actor=role, task=self, name=self.wf.name, wf_object=key, wf_object_type=self.object_type ).save() for key in keys] ) return wf_obj_instances
python
def create_wf_instances(self, roles=None): """ Creates wf instances. Args: roles (list): role list Returns: (list): wf instances """ # if roles specified then create an instance for each role # else create only one instance if roles: wf_instances = [ WFInstance( wf=self.wf, current_actor=role, task=self, name=self.wf.name ) for role in roles ] else: wf_instances = [ WFInstance( wf=self.wf, task=self, name=self.wf.name ) ] # if task type is not related with objects save instances immediately. if self.task_type in ["C", "D"]: return [wfi.save() for wfi in wf_instances] # if task type is related with its objects, save populate instances per object else: wf_obj_instances = [] for wfi in wf_instances: role = wfi.current_actor if self.task_type == "A" else None keys = self.get_object_keys(role) wf_obj_instances.extend( [WFInstance( wf=self.wf, current_actor=role, task=self, name=self.wf.name, wf_object=key, wf_object_type=self.object_type ).save() for key in keys] ) return wf_obj_instances
[ "def", "create_wf_instances", "(", "self", ",", "roles", "=", "None", ")", ":", "# if roles specified then create an instance for each role", "# else create only one instance", "if", "roles", ":", "wf_instances", "=", "[", "WFInstance", "(", "wf", "=", "self", ".", "wf", ",", "current_actor", "=", "role", ",", "task", "=", "self", ",", "name", "=", "self", ".", "wf", ".", "name", ")", "for", "role", "in", "roles", "]", "else", ":", "wf_instances", "=", "[", "WFInstance", "(", "wf", "=", "self", ".", "wf", ",", "task", "=", "self", ",", "name", "=", "self", ".", "wf", ".", "name", ")", "]", "# if task type is not related with objects save instances immediately.", "if", "self", ".", "task_type", "in", "[", "\"C\"", ",", "\"D\"", "]", ":", "return", "[", "wfi", ".", "save", "(", ")", "for", "wfi", "in", "wf_instances", "]", "# if task type is related with its objects, save populate instances per object", "else", ":", "wf_obj_instances", "=", "[", "]", "for", "wfi", "in", "wf_instances", ":", "role", "=", "wfi", ".", "current_actor", "if", "self", ".", "task_type", "==", "\"A\"", "else", "None", "keys", "=", "self", ".", "get_object_keys", "(", "role", ")", "wf_obj_instances", ".", "extend", "(", "[", "WFInstance", "(", "wf", "=", "self", ".", "wf", ",", "current_actor", "=", "role", ",", "task", "=", "self", ",", "name", "=", "self", ".", "wf", ".", "name", ",", "wf_object", "=", "key", ",", "wf_object_type", "=", "self", ".", "object_type", ")", ".", "save", "(", ")", "for", "key", "in", "keys", "]", ")", "return", "wf_obj_instances" ]
Creates wf instances. Args: roles (list): role list Returns: (list): wf instances
[ "Creates", "wf", "instances", ".", "Args", ":", "roles", "(", "list", ")", ":", "role", "list" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/models/workflow_manager.py#L286-L338
zetaops/zengine
zengine/models/workflow_manager.py
Task.create_tasks
def create_tasks(self): """ will create a WFInstance per object and per TaskInvitation for each role and WFInstance """ roles = self.get_roles() if self.task_type in ["A", "D"]: instances = self.create_wf_instances(roles=roles) self.create_task_invitation(instances) elif self.task_type in ["C", "B"]: instances = self.create_wf_instances() self.create_task_invitation(instances, roles)
python
def create_tasks(self): """ will create a WFInstance per object and per TaskInvitation for each role and WFInstance """ roles = self.get_roles() if self.task_type in ["A", "D"]: instances = self.create_wf_instances(roles=roles) self.create_task_invitation(instances) elif self.task_type in ["C", "B"]: instances = self.create_wf_instances() self.create_task_invitation(instances, roles)
[ "def", "create_tasks", "(", "self", ")", ":", "roles", "=", "self", ".", "get_roles", "(", ")", "if", "self", ".", "task_type", "in", "[", "\"A\"", ",", "\"D\"", "]", ":", "instances", "=", "self", ".", "create_wf_instances", "(", "roles", "=", "roles", ")", "self", ".", "create_task_invitation", "(", "instances", ")", "elif", "self", ".", "task_type", "in", "[", "\"C\"", ",", "\"B\"", "]", ":", "instances", "=", "self", ".", "create_wf_instances", "(", ")", "self", ".", "create_task_invitation", "(", "instances", ",", "roles", ")" ]
will create a WFInstance per object and per TaskInvitation for each role and WFInstance
[ "will", "create", "a", "WFInstance", "per", "object", "and", "per", "TaskInvitation", "for", "each", "role", "and", "WFInstance" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/models/workflow_manager.py#L354-L367
zetaops/zengine
zengine/models/workflow_manager.py
Task.get_object_query_dict
def get_object_query_dict(self): """returns objects keys according to self.object_query_code which can be json encoded queryset filter dict or key=value set in the following format: ```"key=val, key2 = val2 , key3= value with spaces"``` Returns: (dict): Queryset filtering dicqt """ if isinstance(self.object_query_code, dict): # _DATE_ _DATETIME_ return self.object_query_code else: # comma separated, key=value pairs. wrapping spaces will be ignored # eg: "key=val, key2 = val2 , key3= value with spaces" return dict(pair.split('=') for pair in self.object_query_code.split(','))
python
def get_object_query_dict(self): """returns objects keys according to self.object_query_code which can be json encoded queryset filter dict or key=value set in the following format: ```"key=val, key2 = val2 , key3= value with spaces"``` Returns: (dict): Queryset filtering dicqt """ if isinstance(self.object_query_code, dict): # _DATE_ _DATETIME_ return self.object_query_code else: # comma separated, key=value pairs. wrapping spaces will be ignored # eg: "key=val, key2 = val2 , key3= value with spaces" return dict(pair.split('=') for pair in self.object_query_code.split(','))
[ "def", "get_object_query_dict", "(", "self", ")", ":", "if", "isinstance", "(", "self", ".", "object_query_code", ",", "dict", ")", ":", "# _DATE_ _DATETIME_", "return", "self", ".", "object_query_code", "else", ":", "# comma separated, key=value pairs. wrapping spaces will be ignored", "# eg: \"key=val, key2 = val2 , key3= value with spaces\"", "return", "dict", "(", "pair", ".", "split", "(", "'='", ")", "for", "pair", "in", "self", ".", "object_query_code", ".", "split", "(", "','", ")", ")" ]
returns objects keys according to self.object_query_code which can be json encoded queryset filter dict or key=value set in the following format: ```"key=val, key2 = val2 , key3= value with spaces"``` Returns: (dict): Queryset filtering dicqt
[ "returns", "objects", "keys", "according", "to", "self", ".", "object_query_code", "which", "can", "be", "json", "encoded", "queryset", "filter", "dict", "or", "key", "=", "value", "set", "in", "the", "following", "format", ":", "key", "=", "val", "key2", "=", "val2", "key3", "=", "value", "with", "spaces" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/models/workflow_manager.py#L369-L383
zetaops/zengine
zengine/models/workflow_manager.py
Task.get_object_keys
def get_object_keys(self, wfi_role=None): """returns object keys according to task definition which can be explicitly selected one object (self.object_key) or result of a queryset filter. Returns: list of object keys """ if self.object_key: return [self.object_key] if self.object_query_code: model = model_registry.get_model(self.object_type) return [m.key for m in self.get_model_objects(model, wfi_role, **self.get_object_query_dict())]
python
def get_object_keys(self, wfi_role=None): """returns object keys according to task definition which can be explicitly selected one object (self.object_key) or result of a queryset filter. Returns: list of object keys """ if self.object_key: return [self.object_key] if self.object_query_code: model = model_registry.get_model(self.object_type) return [m.key for m in self.get_model_objects(model, wfi_role, **self.get_object_query_dict())]
[ "def", "get_object_keys", "(", "self", ",", "wfi_role", "=", "None", ")", ":", "if", "self", ".", "object_key", ":", "return", "[", "self", ".", "object_key", "]", "if", "self", ".", "object_query_code", ":", "model", "=", "model_registry", ".", "get_model", "(", "self", ".", "object_type", ")", "return", "[", "m", ".", "key", "for", "m", "in", "self", ".", "get_model_objects", "(", "model", ",", "wfi_role", ",", "*", "*", "self", ".", "get_object_query_dict", "(", ")", ")", "]" ]
returns object keys according to task definition which can be explicitly selected one object (self.object_key) or result of a queryset filter. Returns: list of object keys
[ "returns", "object", "keys", "according", "to", "task", "definition", "which", "can", "be", "explicitly", "selected", "one", "object", "(", "self", ".", "object_key", ")", "or", "result", "of", "a", "queryset", "filter", "." ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/models/workflow_manager.py#L385-L398
zetaops/zengine
zengine/models/workflow_manager.py
Task.get_model_objects
def get_model_objects(model, wfi_role=None, **kwargs): """ Fetches model objects by filtering with kwargs If wfi_role is specified, then we expect kwargs contains a filter value starting with role, e.g. {'user': 'role.program.user'} We replace this `role` key with role instance parameter `wfi_role` and try to get object that filter value 'role.program.user' points by iterating `getattribute`. At the end filter argument becomes {'user': user}. Args: model (Model): Model class wfi_role (Role): role instance of wf instance **kwargs: filter arguments Returns: (list): list of model object instances """ query_dict = {} for k, v in kwargs.items(): if isinstance(v, list): query_dict[k] = [str(x) for x in v] else: parse = str(v).split('.') if parse[0] == 'role' and wfi_role: query_dict[k] = wfi_role for i in range(1, len(parse)): query_dict[k] = query_dict[k].__getattribute__(parse[i]) else: query_dict[k] = parse[0] return model.objects.all(**query_dict)
python
def get_model_objects(model, wfi_role=None, **kwargs): """ Fetches model objects by filtering with kwargs If wfi_role is specified, then we expect kwargs contains a filter value starting with role, e.g. {'user': 'role.program.user'} We replace this `role` key with role instance parameter `wfi_role` and try to get object that filter value 'role.program.user' points by iterating `getattribute`. At the end filter argument becomes {'user': user}. Args: model (Model): Model class wfi_role (Role): role instance of wf instance **kwargs: filter arguments Returns: (list): list of model object instances """ query_dict = {} for k, v in kwargs.items(): if isinstance(v, list): query_dict[k] = [str(x) for x in v] else: parse = str(v).split('.') if parse[0] == 'role' and wfi_role: query_dict[k] = wfi_role for i in range(1, len(parse)): query_dict[k] = query_dict[k].__getattribute__(parse[i]) else: query_dict[k] = parse[0] return model.objects.all(**query_dict)
[ "def", "get_model_objects", "(", "model", ",", "wfi_role", "=", "None", ",", "*", "*", "kwargs", ")", ":", "query_dict", "=", "{", "}", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "list", ")", ":", "query_dict", "[", "k", "]", "=", "[", "str", "(", "x", ")", "for", "x", "in", "v", "]", "else", ":", "parse", "=", "str", "(", "v", ")", ".", "split", "(", "'.'", ")", "if", "parse", "[", "0", "]", "==", "'role'", "and", "wfi_role", ":", "query_dict", "[", "k", "]", "=", "wfi_role", "for", "i", "in", "range", "(", "1", ",", "len", "(", "parse", ")", ")", ":", "query_dict", "[", "k", "]", "=", "query_dict", "[", "k", "]", ".", "__getattribute__", "(", "parse", "[", "i", "]", ")", "else", ":", "query_dict", "[", "k", "]", "=", "parse", "[", "0", "]", "return", "model", ".", "objects", ".", "all", "(", "*", "*", "query_dict", ")" ]
Fetches model objects by filtering with kwargs If wfi_role is specified, then we expect kwargs contains a filter value starting with role, e.g. {'user': 'role.program.user'} We replace this `role` key with role instance parameter `wfi_role` and try to get object that filter value 'role.program.user' points by iterating `getattribute`. At the end filter argument becomes {'user': user}. Args: model (Model): Model class wfi_role (Role): role instance of wf instance **kwargs: filter arguments Returns: (list): list of model object instances
[ "Fetches", "model", "objects", "by", "filtering", "with", "kwargs" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/models/workflow_manager.py#L401-L435
zetaops/zengine
zengine/models/workflow_manager.py
Task.get_roles
def get_roles(self): """ Returns: Role instances according to task definition. """ if self.role.exist: # return explicitly selected role return [self.role] else: roles = [] if self.role_query_code: # use given "role_query_code" roles = RoleModel.objects.filter(**self.role_query_code) elif self.unit.exist: # get roles from selected unit or sub-units of it if self.recursive_units: # this returns a list, we're converting it to a Role generator! roles = (RoleModel.objects.get(k) for k in UnitModel.get_role_keys(self.unit.key)) else: roles = RoleModel.objects.filter(unit=self.unit) elif self.get_roles_from: # get roles from selected predefined "get_roles_from" method return ROLE_GETTER_METHODS[self.get_roles_from](RoleModel) if self.abstract_role.exist and roles: # apply abstract_role filtering on roles we got if isinstance(roles, (list, types.GeneratorType)): roles = [a for a in roles if a.abstract_role.key == self.abstract_role.key] else: roles = roles.filter(abstract_role=self.abstract_role) else: roles = RoleModel.objects.filter(abstract_role=self.abstract_role) return roles
python
def get_roles(self): """ Returns: Role instances according to task definition. """ if self.role.exist: # return explicitly selected role return [self.role] else: roles = [] if self.role_query_code: # use given "role_query_code" roles = RoleModel.objects.filter(**self.role_query_code) elif self.unit.exist: # get roles from selected unit or sub-units of it if self.recursive_units: # this returns a list, we're converting it to a Role generator! roles = (RoleModel.objects.get(k) for k in UnitModel.get_role_keys(self.unit.key)) else: roles = RoleModel.objects.filter(unit=self.unit) elif self.get_roles_from: # get roles from selected predefined "get_roles_from" method return ROLE_GETTER_METHODS[self.get_roles_from](RoleModel) if self.abstract_role.exist and roles: # apply abstract_role filtering on roles we got if isinstance(roles, (list, types.GeneratorType)): roles = [a for a in roles if a.abstract_role.key == self.abstract_role.key] else: roles = roles.filter(abstract_role=self.abstract_role) else: roles = RoleModel.objects.filter(abstract_role=self.abstract_role) return roles
[ "def", "get_roles", "(", "self", ")", ":", "if", "self", ".", "role", ".", "exist", ":", "# return explicitly selected role", "return", "[", "self", ".", "role", "]", "else", ":", "roles", "=", "[", "]", "if", "self", ".", "role_query_code", ":", "# use given \"role_query_code\"", "roles", "=", "RoleModel", ".", "objects", ".", "filter", "(", "*", "*", "self", ".", "role_query_code", ")", "elif", "self", ".", "unit", ".", "exist", ":", "# get roles from selected unit or sub-units of it", "if", "self", ".", "recursive_units", ":", "# this returns a list, we're converting it to a Role generator!", "roles", "=", "(", "RoleModel", ".", "objects", ".", "get", "(", "k", ")", "for", "k", "in", "UnitModel", ".", "get_role_keys", "(", "self", ".", "unit", ".", "key", ")", ")", "else", ":", "roles", "=", "RoleModel", ".", "objects", ".", "filter", "(", "unit", "=", "self", ".", "unit", ")", "elif", "self", ".", "get_roles_from", ":", "# get roles from selected predefined \"get_roles_from\" method", "return", "ROLE_GETTER_METHODS", "[", "self", ".", "get_roles_from", "]", "(", "RoleModel", ")", "if", "self", ".", "abstract_role", ".", "exist", "and", "roles", ":", "# apply abstract_role filtering on roles we got", "if", "isinstance", "(", "roles", ",", "(", "list", ",", "types", ".", "GeneratorType", ")", ")", ":", "roles", "=", "[", "a", "for", "a", "in", "roles", "if", "a", ".", "abstract_role", ".", "key", "==", "self", ".", "abstract_role", ".", "key", "]", "else", ":", "roles", "=", "roles", ".", "filter", "(", "abstract_role", "=", "self", ".", "abstract_role", ")", "else", ":", "roles", "=", "RoleModel", ".", "objects", ".", "filter", "(", "abstract_role", "=", "self", ".", "abstract_role", ")", "return", "roles" ]
Returns: Role instances according to task definition.
[ "Returns", ":", "Role", "instances", "according", "to", "task", "definition", "." ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/models/workflow_manager.py#L437-L471
zetaops/zengine
zengine/models/workflow_manager.py
Task.post_save
def post_save(self): """can be removed when a proper task manager admin interface implemented""" if self.run: self.run = False self.create_tasks() self.save()
python
def post_save(self): """can be removed when a proper task manager admin interface implemented""" if self.run: self.run = False self.create_tasks() self.save()
[ "def", "post_save", "(", "self", ")", ":", "if", "self", ".", "run", ":", "self", ".", "run", "=", "False", "self", ".", "create_tasks", "(", ")", "self", ".", "save", "(", ")" ]
can be removed when a proper task manager admin interface implemented
[ "can", "be", "removed", "when", "a", "proper", "task", "manager", "admin", "interface", "implemented" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/models/workflow_manager.py#L553-L558
zetaops/zengine
zengine/models/workflow_manager.py
TaskInvitation.delete_other_invitations
def delete_other_invitations(self): """ When one person use an invitation, we should delete other invitations """ # TODO: Signal logged-in users to remove the task from their task list self.objects.filter(instance=self.instance).exclude(key=self.key).delete()
python
def delete_other_invitations(self): """ When one person use an invitation, we should delete other invitations """ # TODO: Signal logged-in users to remove the task from their task list self.objects.filter(instance=self.instance).exclude(key=self.key).delete()
[ "def", "delete_other_invitations", "(", "self", ")", ":", "# TODO: Signal logged-in users to remove the task from their task list", "self", ".", "objects", ".", "filter", "(", "instance", "=", "self", ".", "instance", ")", ".", "exclude", "(", "key", "=", "self", ".", "key", ")", ".", "delete", "(", ")" ]
When one person use an invitation, we should delete other invitations
[ "When", "one", "person", "use", "an", "invitation", "we", "should", "delete", "other", "invitations" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/models/workflow_manager.py#L664-L669
zetaops/zengine
zengine/models/workflow_manager.py
WFCache.save
def save(self, wf_state): """ write wf state to DB through MQ >> Worker >> _zops_sync_wf_cache Args: wf_state dict: wf state """ self.wf_state = wf_state self.wf_state['role_id'] = self.current.role_id self.set(self.wf_state) if self.wf_state['name'] not in settings.EPHEMERAL_WORKFLOWS: self.publish(job='_zops_sync_wf_cache', token=self.db_key)
python
def save(self, wf_state): """ write wf state to DB through MQ >> Worker >> _zops_sync_wf_cache Args: wf_state dict: wf state """ self.wf_state = wf_state self.wf_state['role_id'] = self.current.role_id self.set(self.wf_state) if self.wf_state['name'] not in settings.EPHEMERAL_WORKFLOWS: self.publish(job='_zops_sync_wf_cache', token=self.db_key)
[ "def", "save", "(", "self", ",", "wf_state", ")", ":", "self", ".", "wf_state", "=", "wf_state", "self", ".", "wf_state", "[", "'role_id'", "]", "=", "self", ".", "current", ".", "role_id", "self", ".", "set", "(", "self", ".", "wf_state", ")", "if", "self", ".", "wf_state", "[", "'name'", "]", "not", "in", "settings", ".", "EPHEMERAL_WORKFLOWS", ":", "self", ".", "publish", "(", "job", "=", "'_zops_sync_wf_cache'", ",", "token", "=", "self", ".", "db_key", ")" ]
write wf state to DB through MQ >> Worker >> _zops_sync_wf_cache Args: wf_state dict: wf state
[ "write", "wf", "state", "to", "DB", "through", "MQ", ">>", "Worker", ">>", "_zops_sync_wf_cache" ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/models/workflow_manager.py#L743-L755
zetaops/zengine
zengine/client_queue.py
ClientQueue.send_to_default_exchange
def send_to_default_exchange(self, sess_id, message=None): """ Send messages through RabbitMQ's default exchange, which will be delivered through routing_key (sess_id). This method only used for un-authenticated users, i.e. login process. Args: sess_id string: Session id message dict: Message object. """ msg = json.dumps(message, cls=ZEngineJSONEncoder) log.debug("Sending following message to %s queue through default exchange:\n%s" % ( sess_id, msg)) self.get_channel().publish(exchange='', routing_key=sess_id, body=msg)
python
def send_to_default_exchange(self, sess_id, message=None): """ Send messages through RabbitMQ's default exchange, which will be delivered through routing_key (sess_id). This method only used for un-authenticated users, i.e. login process. Args: sess_id string: Session id message dict: Message object. """ msg = json.dumps(message, cls=ZEngineJSONEncoder) log.debug("Sending following message to %s queue through default exchange:\n%s" % ( sess_id, msg)) self.get_channel().publish(exchange='', routing_key=sess_id, body=msg)
[ "def", "send_to_default_exchange", "(", "self", ",", "sess_id", ",", "message", "=", "None", ")", ":", "msg", "=", "json", ".", "dumps", "(", "message", ",", "cls", "=", "ZEngineJSONEncoder", ")", "log", ".", "debug", "(", "\"Sending following message to %s queue through default exchange:\\n%s\"", "%", "(", "sess_id", ",", "msg", ")", ")", "self", ".", "get_channel", "(", ")", ".", "publish", "(", "exchange", "=", "''", ",", "routing_key", "=", "sess_id", ",", "body", "=", "msg", ")" ]
Send messages through RabbitMQ's default exchange, which will be delivered through routing_key (sess_id). This method only used for un-authenticated users, i.e. login process. Args: sess_id string: Session id message dict: Message object.
[ "Send", "messages", "through", "RabbitMQ", "s", "default", "exchange", "which", "will", "be", "delivered", "through", "routing_key", "(", "sess_id", ")", "." ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/client_queue.py#L59-L73
zetaops/zengine
zengine/client_queue.py
ClientQueue.send_to_prv_exchange
def send_to_prv_exchange(self, user_id, message=None): """ Send messages through logged in users private exchange. Args: user_id string: User key message dict: Message object """ exchange = 'prv_%s' % user_id.lower() msg = json.dumps(message, cls=ZEngineJSONEncoder) log.debug("Sending following users \"%s\" exchange:\n%s " % (exchange, msg)) self.get_channel().publish(exchange=exchange, routing_key='', body=msg)
python
def send_to_prv_exchange(self, user_id, message=None): """ Send messages through logged in users private exchange. Args: user_id string: User key message dict: Message object """ exchange = 'prv_%s' % user_id.lower() msg = json.dumps(message, cls=ZEngineJSONEncoder) log.debug("Sending following users \"%s\" exchange:\n%s " % (exchange, msg)) self.get_channel().publish(exchange=exchange, routing_key='', body=msg)
[ "def", "send_to_prv_exchange", "(", "self", ",", "user_id", ",", "message", "=", "None", ")", ":", "exchange", "=", "'prv_%s'", "%", "user_id", ".", "lower", "(", ")", "msg", "=", "json", ".", "dumps", "(", "message", ",", "cls", "=", "ZEngineJSONEncoder", ")", "log", ".", "debug", "(", "\"Sending following users \\\"%s\\\" exchange:\\n%s \"", "%", "(", "exchange", ",", "msg", ")", ")", "self", ".", "get_channel", "(", ")", ".", "publish", "(", "exchange", "=", "exchange", ",", "routing_key", "=", "''", ",", "body", "=", "msg", ")" ]
Send messages through logged in users private exchange. Args: user_id string: User key message dict: Message object
[ "Send", "messages", "through", "logged", "in", "users", "private", "exchange", "." ]
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/client_queue.py#L75-L87
cimm-kzn/CGRtools
CGRtools/algorithms/compose.py
Compose.compose
def compose(self, other): """ compose 2 graphs to CGR :param other: Molecule or CGR Container :return: CGRContainer """ if not isinstance(other, Compose): raise TypeError('CGRContainer or MoleculeContainer [sub]class expected') cgr = self._get_subclass('CGRContainer') common = self._node.keys() & other if not common: if not (isinstance(self, cgr) or isinstance(other, cgr)): return cgr() | self | other return self | other unique_reactant = self._node.keys() - common unique_product = other._node.keys() - common h = cgr() atoms = h._node bonds = [] common_adj = {n: {} for n in common} common_bonds = [] r_atoms = {} r_skin = defaultdict(list) if isinstance(self, cgr): for n in unique_reactant: h.add_atom(self._node[n], n) for m, bond in self._adj[n].items(): if m not in atoms: if m in common: # bond to common atoms is broken bond r_bond = bond._reactant if r_bond is None: # skip None>None continue r_skin[n].append(m) bond = DynBond.__new__(DynBond) bond.__init_copy__(r_bond, None) bonds.append((n, m, bond)) for n in common: r_atoms[n] = self._node[n]._reactant for m, bond in self._adj[n].items(): if m not in r_atoms and m in common: tmp = [bond._reactant, None] common_adj[n][m] = common_adj[m][n] = tmp common_bonds.append((n, m, tmp)) else: for n in unique_reactant: atom = DynAtom.__new__(DynAtom) # add unique atom into CGR atom.__init_copy__(self._node[n], self._node[n]) h.add_atom(atom, n) for m, r_bond in self._adj[n].items(): # unique atom neighbors if m not in atoms: # bond not analyzed yet bond = DynBond.__new__(DynBond) if m in common: # bond to common atoms r_skin[n].append(m) bond.__init_copy__(r_bond, None) else: # bond static bond.__init_copy__(r_bond, r_bond) bonds.append((n, m, bond)) for n in common: r_atoms[n] = self._node[n] for m, bond in self._adj[n].items(): if m not in r_atoms and m in common: # analyze only common atoms bonds tmp = [bond, None] # reactant state only common_adj[n][m] = common_adj[m][n] = tmp common_bonds.append((n, m, tmp)) p_atoms = {} p_skin = defaultdict(list) if isinstance(other, cgr): for n in unique_product: h.add_atom(other._node[n], n) for m, bond in other._adj[n].items(): if m not in atoms: if m in common: # bond to common atoms is new bond p_bond = bond._product if p_bond is None: # skip None>None continue p_skin[n].append(m) bond = DynBond.__new__(DynBond) bond.__init_copy__(None, p_bond) bonds.append((n, m, bond)) for n in common: p_atoms[n] = other._node[n]._product n_bonds = common_adj[n] for m, bond in other._adj[n].items(): if m in n_bonds: n_bonds[m][1] = bond._product elif m not in p_atoms and m in common: # new bond of reaction p_bond = bond._product if p_bond is None: # skip None>None continue bond = DynBond.__new__(DynBond) bond.__init_copy__(None, p_bond) bonds.append((n, m, bond)) else: for n in unique_product: atom = DynAtom.__new__(DynAtom) atom.__init_copy__(other._node[n], other._node[n]) h.add_atom(atom, n) for m, p_bond in other._adj[n].items(): if m not in atoms: bond = DynBond.__new__(DynBond) if m in common: p_skin[n].append(m) bond.__init_copy__(None, p_bond) else: bond.__init_copy__(p_bond, p_bond) bonds.append((n, m, bond)) for n in common: p_atoms[n] = other._node[n] n_bonds = common_adj[n] for m, p_bond in other._adj[n].items(): if m in n_bonds: # set product state of changed bond n_bonds[m][1] = p_bond elif m not in p_atoms and m in common: # new bond of reaction bond = DynBond.__new__(DynBond) bond.__init_copy__(None, p_bond) bonds.append((n, m, bond)) for n, r_atom in r_atoms.items(): # prepare common DynAtom's p_atom = p_atoms[n] if r_atom.element != p_atom.element or r_atom.isotope != p_atom.isotope: raise ValueError('atom-to-atom mapping invalid') atom = DynAtom.__new__(DynAtom) atom.__init_copy__(r_atom, p_atom) h.add_atom(atom, n) for n, m, (r_bond, p_bond) in common_bonds: if r_bond is p_bond is None: # skip None>None continue bond = DynBond.__new__(DynBond) bond.__init_copy__(r_bond, p_bond) h.add_bond(n, m, bond) for n, m, bond in bonds: h.add_bond(n, m, bond) return h
python
def compose(self, other): """ compose 2 graphs to CGR :param other: Molecule or CGR Container :return: CGRContainer """ if not isinstance(other, Compose): raise TypeError('CGRContainer or MoleculeContainer [sub]class expected') cgr = self._get_subclass('CGRContainer') common = self._node.keys() & other if not common: if not (isinstance(self, cgr) or isinstance(other, cgr)): return cgr() | self | other return self | other unique_reactant = self._node.keys() - common unique_product = other._node.keys() - common h = cgr() atoms = h._node bonds = [] common_adj = {n: {} for n in common} common_bonds = [] r_atoms = {} r_skin = defaultdict(list) if isinstance(self, cgr): for n in unique_reactant: h.add_atom(self._node[n], n) for m, bond in self._adj[n].items(): if m not in atoms: if m in common: # bond to common atoms is broken bond r_bond = bond._reactant if r_bond is None: # skip None>None continue r_skin[n].append(m) bond = DynBond.__new__(DynBond) bond.__init_copy__(r_bond, None) bonds.append((n, m, bond)) for n in common: r_atoms[n] = self._node[n]._reactant for m, bond in self._adj[n].items(): if m not in r_atoms and m in common: tmp = [bond._reactant, None] common_adj[n][m] = common_adj[m][n] = tmp common_bonds.append((n, m, tmp)) else: for n in unique_reactant: atom = DynAtom.__new__(DynAtom) # add unique atom into CGR atom.__init_copy__(self._node[n], self._node[n]) h.add_atom(atom, n) for m, r_bond in self._adj[n].items(): # unique atom neighbors if m not in atoms: # bond not analyzed yet bond = DynBond.__new__(DynBond) if m in common: # bond to common atoms r_skin[n].append(m) bond.__init_copy__(r_bond, None) else: # bond static bond.__init_copy__(r_bond, r_bond) bonds.append((n, m, bond)) for n in common: r_atoms[n] = self._node[n] for m, bond in self._adj[n].items(): if m not in r_atoms and m in common: # analyze only common atoms bonds tmp = [bond, None] # reactant state only common_adj[n][m] = common_adj[m][n] = tmp common_bonds.append((n, m, tmp)) p_atoms = {} p_skin = defaultdict(list) if isinstance(other, cgr): for n in unique_product: h.add_atom(other._node[n], n) for m, bond in other._adj[n].items(): if m not in atoms: if m in common: # bond to common atoms is new bond p_bond = bond._product if p_bond is None: # skip None>None continue p_skin[n].append(m) bond = DynBond.__new__(DynBond) bond.__init_copy__(None, p_bond) bonds.append((n, m, bond)) for n in common: p_atoms[n] = other._node[n]._product n_bonds = common_adj[n] for m, bond in other._adj[n].items(): if m in n_bonds: n_bonds[m][1] = bond._product elif m not in p_atoms and m in common: # new bond of reaction p_bond = bond._product if p_bond is None: # skip None>None continue bond = DynBond.__new__(DynBond) bond.__init_copy__(None, p_bond) bonds.append((n, m, bond)) else: for n in unique_product: atom = DynAtom.__new__(DynAtom) atom.__init_copy__(other._node[n], other._node[n]) h.add_atom(atom, n) for m, p_bond in other._adj[n].items(): if m not in atoms: bond = DynBond.__new__(DynBond) if m in common: p_skin[n].append(m) bond.__init_copy__(None, p_bond) else: bond.__init_copy__(p_bond, p_bond) bonds.append((n, m, bond)) for n in common: p_atoms[n] = other._node[n] n_bonds = common_adj[n] for m, p_bond in other._adj[n].items(): if m in n_bonds: # set product state of changed bond n_bonds[m][1] = p_bond elif m not in p_atoms and m in common: # new bond of reaction bond = DynBond.__new__(DynBond) bond.__init_copy__(None, p_bond) bonds.append((n, m, bond)) for n, r_atom in r_atoms.items(): # prepare common DynAtom's p_atom = p_atoms[n] if r_atom.element != p_atom.element or r_atom.isotope != p_atom.isotope: raise ValueError('atom-to-atom mapping invalid') atom = DynAtom.__new__(DynAtom) atom.__init_copy__(r_atom, p_atom) h.add_atom(atom, n) for n, m, (r_bond, p_bond) in common_bonds: if r_bond is p_bond is None: # skip None>None continue bond = DynBond.__new__(DynBond) bond.__init_copy__(r_bond, p_bond) h.add_bond(n, m, bond) for n, m, bond in bonds: h.add_bond(n, m, bond) return h
[ "def", "compose", "(", "self", ",", "other", ")", ":", "if", "not", "isinstance", "(", "other", ",", "Compose", ")", ":", "raise", "TypeError", "(", "'CGRContainer or MoleculeContainer [sub]class expected'", ")", "cgr", "=", "self", ".", "_get_subclass", "(", "'CGRContainer'", ")", "common", "=", "self", ".", "_node", ".", "keys", "(", ")", "&", "other", "if", "not", "common", ":", "if", "not", "(", "isinstance", "(", "self", ",", "cgr", ")", "or", "isinstance", "(", "other", ",", "cgr", ")", ")", ":", "return", "cgr", "(", ")", "|", "self", "|", "other", "return", "self", "|", "other", "unique_reactant", "=", "self", ".", "_node", ".", "keys", "(", ")", "-", "common", "unique_product", "=", "other", ".", "_node", ".", "keys", "(", ")", "-", "common", "h", "=", "cgr", "(", ")", "atoms", "=", "h", ".", "_node", "bonds", "=", "[", "]", "common_adj", "=", "{", "n", ":", "{", "}", "for", "n", "in", "common", "}", "common_bonds", "=", "[", "]", "r_atoms", "=", "{", "}", "r_skin", "=", "defaultdict", "(", "list", ")", "if", "isinstance", "(", "self", ",", "cgr", ")", ":", "for", "n", "in", "unique_reactant", ":", "h", ".", "add_atom", "(", "self", ".", "_node", "[", "n", "]", ",", "n", ")", "for", "m", ",", "bond", "in", "self", ".", "_adj", "[", "n", "]", ".", "items", "(", ")", ":", "if", "m", "not", "in", "atoms", ":", "if", "m", "in", "common", ":", "# bond to common atoms is broken bond", "r_bond", "=", "bond", ".", "_reactant", "if", "r_bond", "is", "None", ":", "# skip None>None", "continue", "r_skin", "[", "n", "]", ".", "append", "(", "m", ")", "bond", "=", "DynBond", ".", "__new__", "(", "DynBond", ")", "bond", ".", "__init_copy__", "(", "r_bond", ",", "None", ")", "bonds", ".", "append", "(", "(", "n", ",", "m", ",", "bond", ")", ")", "for", "n", "in", "common", ":", "r_atoms", "[", "n", "]", "=", "self", ".", "_node", "[", "n", "]", ".", "_reactant", "for", "m", ",", "bond", "in", "self", ".", "_adj", "[", "n", "]", ".", "items", "(", ")", ":", "if", "m", "not", "in", "r_atoms", "and", "m", "in", "common", ":", "tmp", "=", "[", "bond", ".", "_reactant", ",", "None", "]", "common_adj", "[", "n", "]", "[", "m", "]", "=", "common_adj", "[", "m", "]", "[", "n", "]", "=", "tmp", "common_bonds", ".", "append", "(", "(", "n", ",", "m", ",", "tmp", ")", ")", "else", ":", "for", "n", "in", "unique_reactant", ":", "atom", "=", "DynAtom", ".", "__new__", "(", "DynAtom", ")", "# add unique atom into CGR", "atom", ".", "__init_copy__", "(", "self", ".", "_node", "[", "n", "]", ",", "self", ".", "_node", "[", "n", "]", ")", "h", ".", "add_atom", "(", "atom", ",", "n", ")", "for", "m", ",", "r_bond", "in", "self", ".", "_adj", "[", "n", "]", ".", "items", "(", ")", ":", "# unique atom neighbors", "if", "m", "not", "in", "atoms", ":", "# bond not analyzed yet", "bond", "=", "DynBond", ".", "__new__", "(", "DynBond", ")", "if", "m", "in", "common", ":", "# bond to common atoms", "r_skin", "[", "n", "]", ".", "append", "(", "m", ")", "bond", ".", "__init_copy__", "(", "r_bond", ",", "None", ")", "else", ":", "# bond static", "bond", ".", "__init_copy__", "(", "r_bond", ",", "r_bond", ")", "bonds", ".", "append", "(", "(", "n", ",", "m", ",", "bond", ")", ")", "for", "n", "in", "common", ":", "r_atoms", "[", "n", "]", "=", "self", ".", "_node", "[", "n", "]", "for", "m", ",", "bond", "in", "self", ".", "_adj", "[", "n", "]", ".", "items", "(", ")", ":", "if", "m", "not", "in", "r_atoms", "and", "m", "in", "common", ":", "# analyze only common atoms bonds", "tmp", "=", "[", "bond", ",", "None", "]", "# reactant state only", "common_adj", "[", "n", "]", "[", "m", "]", "=", "common_adj", "[", "m", "]", "[", "n", "]", "=", "tmp", "common_bonds", ".", "append", "(", "(", "n", ",", "m", ",", "tmp", ")", ")", "p_atoms", "=", "{", "}", "p_skin", "=", "defaultdict", "(", "list", ")", "if", "isinstance", "(", "other", ",", "cgr", ")", ":", "for", "n", "in", "unique_product", ":", "h", ".", "add_atom", "(", "other", ".", "_node", "[", "n", "]", ",", "n", ")", "for", "m", ",", "bond", "in", "other", ".", "_adj", "[", "n", "]", ".", "items", "(", ")", ":", "if", "m", "not", "in", "atoms", ":", "if", "m", "in", "common", ":", "# bond to common atoms is new bond", "p_bond", "=", "bond", ".", "_product", "if", "p_bond", "is", "None", ":", "# skip None>None", "continue", "p_skin", "[", "n", "]", ".", "append", "(", "m", ")", "bond", "=", "DynBond", ".", "__new__", "(", "DynBond", ")", "bond", ".", "__init_copy__", "(", "None", ",", "p_bond", ")", "bonds", ".", "append", "(", "(", "n", ",", "m", ",", "bond", ")", ")", "for", "n", "in", "common", ":", "p_atoms", "[", "n", "]", "=", "other", ".", "_node", "[", "n", "]", ".", "_product", "n_bonds", "=", "common_adj", "[", "n", "]", "for", "m", ",", "bond", "in", "other", ".", "_adj", "[", "n", "]", ".", "items", "(", ")", ":", "if", "m", "in", "n_bonds", ":", "n_bonds", "[", "m", "]", "[", "1", "]", "=", "bond", ".", "_product", "elif", "m", "not", "in", "p_atoms", "and", "m", "in", "common", ":", "# new bond of reaction", "p_bond", "=", "bond", ".", "_product", "if", "p_bond", "is", "None", ":", "# skip None>None", "continue", "bond", "=", "DynBond", ".", "__new__", "(", "DynBond", ")", "bond", ".", "__init_copy__", "(", "None", ",", "p_bond", ")", "bonds", ".", "append", "(", "(", "n", ",", "m", ",", "bond", ")", ")", "else", ":", "for", "n", "in", "unique_product", ":", "atom", "=", "DynAtom", ".", "__new__", "(", "DynAtom", ")", "atom", ".", "__init_copy__", "(", "other", ".", "_node", "[", "n", "]", ",", "other", ".", "_node", "[", "n", "]", ")", "h", ".", "add_atom", "(", "atom", ",", "n", ")", "for", "m", ",", "p_bond", "in", "other", ".", "_adj", "[", "n", "]", ".", "items", "(", ")", ":", "if", "m", "not", "in", "atoms", ":", "bond", "=", "DynBond", ".", "__new__", "(", "DynBond", ")", "if", "m", "in", "common", ":", "p_skin", "[", "n", "]", ".", "append", "(", "m", ")", "bond", ".", "__init_copy__", "(", "None", ",", "p_bond", ")", "else", ":", "bond", ".", "__init_copy__", "(", "p_bond", ",", "p_bond", ")", "bonds", ".", "append", "(", "(", "n", ",", "m", ",", "bond", ")", ")", "for", "n", "in", "common", ":", "p_atoms", "[", "n", "]", "=", "other", ".", "_node", "[", "n", "]", "n_bonds", "=", "common_adj", "[", "n", "]", "for", "m", ",", "p_bond", "in", "other", ".", "_adj", "[", "n", "]", ".", "items", "(", ")", ":", "if", "m", "in", "n_bonds", ":", "# set product state of changed bond", "n_bonds", "[", "m", "]", "[", "1", "]", "=", "p_bond", "elif", "m", "not", "in", "p_atoms", "and", "m", "in", "common", ":", "# new bond of reaction", "bond", "=", "DynBond", ".", "__new__", "(", "DynBond", ")", "bond", ".", "__init_copy__", "(", "None", ",", "p_bond", ")", "bonds", ".", "append", "(", "(", "n", ",", "m", ",", "bond", ")", ")", "for", "n", ",", "r_atom", "in", "r_atoms", ".", "items", "(", ")", ":", "# prepare common DynAtom's", "p_atom", "=", "p_atoms", "[", "n", "]", "if", "r_atom", ".", "element", "!=", "p_atom", ".", "element", "or", "r_atom", ".", "isotope", "!=", "p_atom", ".", "isotope", ":", "raise", "ValueError", "(", "'atom-to-atom mapping invalid'", ")", "atom", "=", "DynAtom", ".", "__new__", "(", "DynAtom", ")", "atom", ".", "__init_copy__", "(", "r_atom", ",", "p_atom", ")", "h", ".", "add_atom", "(", "atom", ",", "n", ")", "for", "n", ",", "m", ",", "(", "r_bond", ",", "p_bond", ")", "in", "common_bonds", ":", "if", "r_bond", "is", "p_bond", "is", "None", ":", "# skip None>None", "continue", "bond", "=", "DynBond", ".", "__new__", "(", "DynBond", ")", "bond", ".", "__init_copy__", "(", "r_bond", ",", "p_bond", ")", "h", ".", "add_bond", "(", "n", ",", "m", ",", "bond", ")", "for", "n", ",", "m", ",", "bond", "in", "bonds", ":", "h", ".", "add_bond", "(", "n", ",", "m", ",", "bond", ")", "return", "h" ]
compose 2 graphs to CGR :param other: Molecule or CGR Container :return: CGRContainer
[ "compose", "2", "graphs", "to", "CGR" ]
train
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/algorithms/compose.py#L30-L172
cimm-kzn/CGRtools
CGRtools/algorithms/compose.py
CGRCompose.decompose
def decompose(self): """ decompose CGR to pair of Molecules, which represents reactants and products state of reaction :return: tuple of two molecules """ mc = self._get_subclass('MoleculeContainer') reactants = mc() products = mc() for n, atom in self.atoms(): reactants.add_atom(atom._reactant, n) products.add_atom(atom._product, n) for n, m, bond in self.bonds(): if bond._reactant is not None: reactants.add_bond(n, m, bond._reactant) if bond._product is not None: products.add_bond(n, m, bond._product) return reactants, products
python
def decompose(self): """ decompose CGR to pair of Molecules, which represents reactants and products state of reaction :return: tuple of two molecules """ mc = self._get_subclass('MoleculeContainer') reactants = mc() products = mc() for n, atom in self.atoms(): reactants.add_atom(atom._reactant, n) products.add_atom(atom._product, n) for n, m, bond in self.bonds(): if bond._reactant is not None: reactants.add_bond(n, m, bond._reactant) if bond._product is not None: products.add_bond(n, m, bond._product) return reactants, products
[ "def", "decompose", "(", "self", ")", ":", "mc", "=", "self", ".", "_get_subclass", "(", "'MoleculeContainer'", ")", "reactants", "=", "mc", "(", ")", "products", "=", "mc", "(", ")", "for", "n", ",", "atom", "in", "self", ".", "atoms", "(", ")", ":", "reactants", ".", "add_atom", "(", "atom", ".", "_reactant", ",", "n", ")", "products", ".", "add_atom", "(", "atom", ".", "_product", ",", "n", ")", "for", "n", ",", "m", ",", "bond", "in", "self", ".", "bonds", "(", ")", ":", "if", "bond", ".", "_reactant", "is", "not", "None", ":", "reactants", ".", "add_bond", "(", "n", ",", "m", ",", "bond", ".", "_reactant", ")", "if", "bond", ".", "_product", "is", "not", "None", ":", "products", ".", "add_bond", "(", "n", ",", "m", ",", "bond", ".", "_product", ")", "return", "reactants", ",", "products" ]
decompose CGR to pair of Molecules, which represents reactants and products state of reaction :return: tuple of two molecules
[ "decompose", "CGR", "to", "pair", "of", "Molecules", "which", "represents", "reactants", "and", "products", "state", "of", "reaction" ]
train
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/algorithms/compose.py#L182-L201
rnwolf/jira-metrics-extract
jira_metrics_extract/cycletime.py
CycleTimeQueries.cycle_data
def cycle_data(self, verbose=False, result_cycle=None, result_size=None, result_edges=None,changelog=True): """Get data from JIRA for cycle/flow times and story points size change. Build a numerically indexed data frame with the following 'fixed' columns: `key`, 'url', 'issue_type', `summary`, `status`, and `resolution` from JIRA, as well as the value of any fields set in the `fields` dict in `settings`. If `known_values` is set (a dict of lists, with field names as keys and a list of known values for each field as values) and a field in `fields` contains a list of values, only the first value in the list of known values will be used. If 'query_attribute' is set in `settings`, a column with this name will be added, and populated with the `value` key, if any, from each criteria block under `queries` in settings. In addition, `cycle_time` will be set to the time delta between the first `accepted`-type column and the first `complete` column, or None. The remaining columns are the names of the items in the configured cycle, in order. Each cell contains the last date/time stamp when the relevant status was set. If an item moves backwards through the cycle, subsequent date/time stamps in the cycle are erased. """ cycle_names = [s['name'] for s in self.settings['cycle']] accepted_steps = set(s['name'] for s in self.settings['cycle'] if s['type'] == StatusTypes.accepted) completed_steps = set(s['name'] for s in self.settings['cycle'] if s['type'] == StatusTypes.complete) series = { 'key': {'data': [], 'dtype': str}, 'url': {'data': [], 'dtype': str}, 'issue_type': {'data': [], 'dtype': str}, 'summary': {'data': [], 'dtype': str}, 'status': {'data': [], 'dtype': str}, 'resolution': {'data': [], 'dtype': str}, 'cycle_time': {'data': [], 'dtype': 'timedelta64[ns]'}, 'completed_timestamp': {'data': [], 'dtype': 'datetime64[ns]'}, 'created_timestamp': {'data': [], 'dtype': 'datetime64[ns]'} } if sys.platform.startswith('win'): buffer = open("cycledata.tmp", "w+",1) # Opens a file for writing only in binary format. Overwrites the file if the file exists. # buffering value is 1 # Windows users seem to have a problem with spooled file else: buffer = tempfile.SpooledTemporaryFile(max_size=50000, mode='w+t') #issuelinks = open("issuelinks.csv", "w+", 1) #df_edges = pd.DataFrame() #df_edges = pd.DataFrame(columns=['Source', 'OutwardLink', 'Target', 'Inwardlink','LinkType']) #df_edges.to_csv(issuelinks, columns=['Source', 'OutwardLink', 'Target', 'Inwardlink','LinkType'], header=True, index=None, sep='\t',encoding='utf-8') df_size_history = pd.DataFrame( columns=['key','fromDate','toDate','size']) df_size_history.to_csv(buffer, columns=['key', 'fromDate', 'toDate', 'size'], header=True, index=None, sep='\t',encoding='utf-8') for cycle_name in cycle_names: series[cycle_name] = {'data': [], 'dtype': 'datetime64[ns]'} for name in self.fields.keys(): series[name] = {'data': [], 'dtype': 'object'} if self.settings['query_attribute']: series[self.settings['query_attribute']] = {'data': [], 'dtype': str} for criteria in self.settings['queries']: for issue in self.find_issues(criteria, order='updatedDate DESC', verbose=verbose, changelog=changelog): # Deal with the differences in strings between Python 2 & 3 if (sys.version_info > (3, 0)): # Python 3 code in this block item = { 'key': issue.key, 'url': "%s/browse/%s" % (self.jira._options['server'], issue.key,), 'issue_type': issue.fields.issuetype.name, 'summary': issue.fields.summary, # .encode('utf-8'), 'status': issue.fields.status.name, 'resolution': issue.fields.resolution.name if issue.fields.resolution else None, 'cycle_time': None, 'completed_timestamp': None, 'created_timestamp': issue.fields.created[:19] } else: # Python 2 code in this block item = { 'key': issue.key, 'url': "%s/browse/%s" % (self.jira._options['server'], issue.key,), 'issue_type': issue.fields.issuetype.name, 'summary': issue.fields.summary.encode('utf-8'), 'status': issue.fields.status.name, 'resolution': issue.fields.resolution.name if issue.fields.resolution else None, 'cycle_time': None, 'completed_timestamp': None, 'created_timestamp': issue.fields.created[:19] } for name, field_name in self.fields.items(): item[name] = self.resolve_field_value(issue, name, field_name) if self.settings['query_attribute']: item[self.settings['query_attribute']] = criteria.get('value', None) for cycle_name in cycle_names: item[cycle_name] = None # Get the relationships for this issue edges = [] # Source, Target, Inward Link, Outward Link, Type issuelinks = issue.fields.issuelinks # It is seems that having an Epic Parent does not record an Epic Link, just the name "Epic Name" # Creating Epic relationship requires more work. Also each Jira instance will have different customfields for Epic data # Remove this code. #issueEpic = issue.fields.customfield_10008 if issue.fields.customfield_10008 else None # Epic Link #if issueEpic is not None: # data = {'Source':issueEpic, 'Target':issue.key, 'InwardLink':'Belongs to Epic', 'OutwardLink':'Issue in Epic', 'LinkType':'EpicIssue'} # edges.append(data) for link in issuelinks: inwardissue = None outwardissue = None try: inwardissue = link.inwardIssue.key except: outwardissue = link.outwardIssue.key if inwardissue is not None: data = {'LinkID':link.id,'Source':inwardissue, 'Target':issue.key, 'InwardLink':link.type.inward, 'OutwardLink': link.type.outward, 'LinkType':link.type.name} else: data = {'LinkID':link.id,'Source':issue.key, 'Target': outwardissue, 'InwardLink':link.type.inward, 'OutwardLink':link.type.outward, 'LinkType':link.type.name} edges.append(data) if len(edges)>0: try: df_edges except NameError: #print('Not found') df_edges = pd.DataFrame(edges) else: df_links = pd.DataFrame(edges) df_edges=df_edges.append(df_links) # = pd.DataFrame(edges) # Got all the relationships for this issue rows = [] try: for snapshot in self.iter_size_changes(issue): data= {'key':snapshot.key,'fromDate':snapshot.date,'size':snapshot.size} rows.append(data) df = pd.DataFrame(rows) # Create the toDate column df_toDate=df['fromDate'].shift(-1) df_toDate.loc[len(df_toDate)-1] = datetime.datetime.now(pytz.utc) df['toDate'] = df_toDate except: df = pd.DataFrame(columns = ['key', 'fromDate', 'toDate', 'size']) # Round Down datetimes to full dates df['fromDate'] = df['fromDate'].apply(lambda dt: datetime.datetime(dt.year, dt.month, dt.day)) df['toDate'] = df['toDate'].apply(lambda dt: datetime.datetime(dt.year, dt.month, dt.day)) # If we only have one row of size changes and current issue has a size then it must have been created with a size value at creation. # This size will not be recorded in the size_change record. # Hence update the single row we have with the current issue size. # Get Story Points size changes history #If condition is met update the size cell if getattr(item, 'StoryPoints', None) is not None and (df.shape[0]==1): #if (item['StoryPoints'] is not None ) and (len(df)==1): df.loc[df.index[0], 'size'] = item['StoryPoints'] # Append to csv file df.to_csv(buffer, columns=['key', 'fromDate', 'toDate', 'size'], header=None, mode='a', sep='\t', date_format='%Y-%m-%d',encoding='utf-8') #print(rows) # If the first column in item lifecycle was scipted put the created data in it. if item[cycle_names[0]] is None: item[cycle_names[0]] = dateutil.parser.parse(item['created_timestamp']) #item['created_timestamp'] # Figure out why the first Column does not have created date #print(dateutil.parser.parse(item['created_timestamp'])) # Record date of status changes for snapshot in self.iter_changes(issue, True): snapshot_cycle_step = self.settings['cycle_lookup'].get(snapshot.status.lower(), None) if snapshot_cycle_step is None: if verbose: print(issue.key, "transitioned to unknown JIRA status", snapshot.status) continue snapshot_cycle_step_name = snapshot_cycle_step['name'] # Keep the first time we entered a step if item[snapshot_cycle_step_name] is None: item[snapshot_cycle_step_name] = snapshot.date # Wipe any subsequent dates, in case this was a move backwards found_cycle_name = False for cycle_name in cycle_names: if not found_cycle_name and cycle_name == snapshot_cycle_step_name: found_cycle_name = True continue elif found_cycle_name and item[cycle_name] is not None: if verbose: print(issue.key, "moved backwards to", snapshot_cycle_step_name, "wiping date for subsequent step", cycle_name) item[cycle_name] = None # Wipe timestamps if items have moved backwards; calculate cycle time previous_timestamp = None accepted_timestamp = None completed_timestamp = None for cycle_name in cycle_names: if item[cycle_name] is not None: previous_timestamp = item[cycle_name] if accepted_timestamp is None and previous_timestamp is not None and cycle_name in accepted_steps: accepted_timestamp = previous_timestamp if completed_timestamp is None and previous_timestamp is not None and cycle_name in completed_steps: completed_timestamp = previous_timestamp if accepted_timestamp is not None and completed_timestamp is not None: item['cycle_time'] = completed_timestamp - accepted_timestamp item['completed_timestamp'] = completed_timestamp for k, v in item.items(): series[k]['data'].append(v) data = {} for k, v in series.items(): data[k] = pd.Series(v['data'], dtype=v['dtype']) result_cycle = pd.DataFrame(data, columns=['key', 'url', 'issue_type', 'summary', 'status', 'resolution'] + sorted(self.fields.keys()) + ([self.settings['query_attribute']] if self.settings['query_attribute'] else []) + ['cycle_time', 'completed_timestamp'] + cycle_names ) result_size = pd.DataFrame() buffer.seek(0) result_size = result_size.from_csv(buffer, sep='\t') buffer.close() try: df_edges except NameError: # print('Not found') df_edges = pd.DataFrame() try: df_edges = df_edges[['Source', 'OutwardLink', 'Target', 'InwardLink','LinkType','LinkID']] # Specify dataframe sort order #df_edges.to_csv("myedges.csv", sep='\t', index=False,encoding='utf-8') except KeyError: print('Info: No issue edges found.') result_edges=df_edges # There maybe no result_size data is we might not have any change history try: result_size.set_index('key') except KeyError: result_size = pd.DataFrame(index= ['key'],columns = ['fromDate', 'toDate', 'size']) result_size['toDate'] = pd.to_datetime(result_size['toDate'], format=('%Y-%m-%d')) result_size['fromDate'] = pd.to_datetime(result_size['fromDate'], format=('%Y-%m-%d')) return result_cycle, result_size, result_edges
python
def cycle_data(self, verbose=False, result_cycle=None, result_size=None, result_edges=None,changelog=True): """Get data from JIRA for cycle/flow times and story points size change. Build a numerically indexed data frame with the following 'fixed' columns: `key`, 'url', 'issue_type', `summary`, `status`, and `resolution` from JIRA, as well as the value of any fields set in the `fields` dict in `settings`. If `known_values` is set (a dict of lists, with field names as keys and a list of known values for each field as values) and a field in `fields` contains a list of values, only the first value in the list of known values will be used. If 'query_attribute' is set in `settings`, a column with this name will be added, and populated with the `value` key, if any, from each criteria block under `queries` in settings. In addition, `cycle_time` will be set to the time delta between the first `accepted`-type column and the first `complete` column, or None. The remaining columns are the names of the items in the configured cycle, in order. Each cell contains the last date/time stamp when the relevant status was set. If an item moves backwards through the cycle, subsequent date/time stamps in the cycle are erased. """ cycle_names = [s['name'] for s in self.settings['cycle']] accepted_steps = set(s['name'] for s in self.settings['cycle'] if s['type'] == StatusTypes.accepted) completed_steps = set(s['name'] for s in self.settings['cycle'] if s['type'] == StatusTypes.complete) series = { 'key': {'data': [], 'dtype': str}, 'url': {'data': [], 'dtype': str}, 'issue_type': {'data': [], 'dtype': str}, 'summary': {'data': [], 'dtype': str}, 'status': {'data': [], 'dtype': str}, 'resolution': {'data': [], 'dtype': str}, 'cycle_time': {'data': [], 'dtype': 'timedelta64[ns]'}, 'completed_timestamp': {'data': [], 'dtype': 'datetime64[ns]'}, 'created_timestamp': {'data': [], 'dtype': 'datetime64[ns]'} } if sys.platform.startswith('win'): buffer = open("cycledata.tmp", "w+",1) # Opens a file for writing only in binary format. Overwrites the file if the file exists. # buffering value is 1 # Windows users seem to have a problem with spooled file else: buffer = tempfile.SpooledTemporaryFile(max_size=50000, mode='w+t') #issuelinks = open("issuelinks.csv", "w+", 1) #df_edges = pd.DataFrame() #df_edges = pd.DataFrame(columns=['Source', 'OutwardLink', 'Target', 'Inwardlink','LinkType']) #df_edges.to_csv(issuelinks, columns=['Source', 'OutwardLink', 'Target', 'Inwardlink','LinkType'], header=True, index=None, sep='\t',encoding='utf-8') df_size_history = pd.DataFrame( columns=['key','fromDate','toDate','size']) df_size_history.to_csv(buffer, columns=['key', 'fromDate', 'toDate', 'size'], header=True, index=None, sep='\t',encoding='utf-8') for cycle_name in cycle_names: series[cycle_name] = {'data': [], 'dtype': 'datetime64[ns]'} for name in self.fields.keys(): series[name] = {'data': [], 'dtype': 'object'} if self.settings['query_attribute']: series[self.settings['query_attribute']] = {'data': [], 'dtype': str} for criteria in self.settings['queries']: for issue in self.find_issues(criteria, order='updatedDate DESC', verbose=verbose, changelog=changelog): # Deal with the differences in strings between Python 2 & 3 if (sys.version_info > (3, 0)): # Python 3 code in this block item = { 'key': issue.key, 'url': "%s/browse/%s" % (self.jira._options['server'], issue.key,), 'issue_type': issue.fields.issuetype.name, 'summary': issue.fields.summary, # .encode('utf-8'), 'status': issue.fields.status.name, 'resolution': issue.fields.resolution.name if issue.fields.resolution else None, 'cycle_time': None, 'completed_timestamp': None, 'created_timestamp': issue.fields.created[:19] } else: # Python 2 code in this block item = { 'key': issue.key, 'url': "%s/browse/%s" % (self.jira._options['server'], issue.key,), 'issue_type': issue.fields.issuetype.name, 'summary': issue.fields.summary.encode('utf-8'), 'status': issue.fields.status.name, 'resolution': issue.fields.resolution.name if issue.fields.resolution else None, 'cycle_time': None, 'completed_timestamp': None, 'created_timestamp': issue.fields.created[:19] } for name, field_name in self.fields.items(): item[name] = self.resolve_field_value(issue, name, field_name) if self.settings['query_attribute']: item[self.settings['query_attribute']] = criteria.get('value', None) for cycle_name in cycle_names: item[cycle_name] = None # Get the relationships for this issue edges = [] # Source, Target, Inward Link, Outward Link, Type issuelinks = issue.fields.issuelinks # It is seems that having an Epic Parent does not record an Epic Link, just the name "Epic Name" # Creating Epic relationship requires more work. Also each Jira instance will have different customfields for Epic data # Remove this code. #issueEpic = issue.fields.customfield_10008 if issue.fields.customfield_10008 else None # Epic Link #if issueEpic is not None: # data = {'Source':issueEpic, 'Target':issue.key, 'InwardLink':'Belongs to Epic', 'OutwardLink':'Issue in Epic', 'LinkType':'EpicIssue'} # edges.append(data) for link in issuelinks: inwardissue = None outwardissue = None try: inwardissue = link.inwardIssue.key except: outwardissue = link.outwardIssue.key if inwardissue is not None: data = {'LinkID':link.id,'Source':inwardissue, 'Target':issue.key, 'InwardLink':link.type.inward, 'OutwardLink': link.type.outward, 'LinkType':link.type.name} else: data = {'LinkID':link.id,'Source':issue.key, 'Target': outwardissue, 'InwardLink':link.type.inward, 'OutwardLink':link.type.outward, 'LinkType':link.type.name} edges.append(data) if len(edges)>0: try: df_edges except NameError: #print('Not found') df_edges = pd.DataFrame(edges) else: df_links = pd.DataFrame(edges) df_edges=df_edges.append(df_links) # = pd.DataFrame(edges) # Got all the relationships for this issue rows = [] try: for snapshot in self.iter_size_changes(issue): data= {'key':snapshot.key,'fromDate':snapshot.date,'size':snapshot.size} rows.append(data) df = pd.DataFrame(rows) # Create the toDate column df_toDate=df['fromDate'].shift(-1) df_toDate.loc[len(df_toDate)-1] = datetime.datetime.now(pytz.utc) df['toDate'] = df_toDate except: df = pd.DataFrame(columns = ['key', 'fromDate', 'toDate', 'size']) # Round Down datetimes to full dates df['fromDate'] = df['fromDate'].apply(lambda dt: datetime.datetime(dt.year, dt.month, dt.day)) df['toDate'] = df['toDate'].apply(lambda dt: datetime.datetime(dt.year, dt.month, dt.day)) # If we only have one row of size changes and current issue has a size then it must have been created with a size value at creation. # This size will not be recorded in the size_change record. # Hence update the single row we have with the current issue size. # Get Story Points size changes history #If condition is met update the size cell if getattr(item, 'StoryPoints', None) is not None and (df.shape[0]==1): #if (item['StoryPoints'] is not None ) and (len(df)==1): df.loc[df.index[0], 'size'] = item['StoryPoints'] # Append to csv file df.to_csv(buffer, columns=['key', 'fromDate', 'toDate', 'size'], header=None, mode='a', sep='\t', date_format='%Y-%m-%d',encoding='utf-8') #print(rows) # If the first column in item lifecycle was scipted put the created data in it. if item[cycle_names[0]] is None: item[cycle_names[0]] = dateutil.parser.parse(item['created_timestamp']) #item['created_timestamp'] # Figure out why the first Column does not have created date #print(dateutil.parser.parse(item['created_timestamp'])) # Record date of status changes for snapshot in self.iter_changes(issue, True): snapshot_cycle_step = self.settings['cycle_lookup'].get(snapshot.status.lower(), None) if snapshot_cycle_step is None: if verbose: print(issue.key, "transitioned to unknown JIRA status", snapshot.status) continue snapshot_cycle_step_name = snapshot_cycle_step['name'] # Keep the first time we entered a step if item[snapshot_cycle_step_name] is None: item[snapshot_cycle_step_name] = snapshot.date # Wipe any subsequent dates, in case this was a move backwards found_cycle_name = False for cycle_name in cycle_names: if not found_cycle_name and cycle_name == snapshot_cycle_step_name: found_cycle_name = True continue elif found_cycle_name and item[cycle_name] is not None: if verbose: print(issue.key, "moved backwards to", snapshot_cycle_step_name, "wiping date for subsequent step", cycle_name) item[cycle_name] = None # Wipe timestamps if items have moved backwards; calculate cycle time previous_timestamp = None accepted_timestamp = None completed_timestamp = None for cycle_name in cycle_names: if item[cycle_name] is not None: previous_timestamp = item[cycle_name] if accepted_timestamp is None and previous_timestamp is not None and cycle_name in accepted_steps: accepted_timestamp = previous_timestamp if completed_timestamp is None and previous_timestamp is not None and cycle_name in completed_steps: completed_timestamp = previous_timestamp if accepted_timestamp is not None and completed_timestamp is not None: item['cycle_time'] = completed_timestamp - accepted_timestamp item['completed_timestamp'] = completed_timestamp for k, v in item.items(): series[k]['data'].append(v) data = {} for k, v in series.items(): data[k] = pd.Series(v['data'], dtype=v['dtype']) result_cycle = pd.DataFrame(data, columns=['key', 'url', 'issue_type', 'summary', 'status', 'resolution'] + sorted(self.fields.keys()) + ([self.settings['query_attribute']] if self.settings['query_attribute'] else []) + ['cycle_time', 'completed_timestamp'] + cycle_names ) result_size = pd.DataFrame() buffer.seek(0) result_size = result_size.from_csv(buffer, sep='\t') buffer.close() try: df_edges except NameError: # print('Not found') df_edges = pd.DataFrame() try: df_edges = df_edges[['Source', 'OutwardLink', 'Target', 'InwardLink','LinkType','LinkID']] # Specify dataframe sort order #df_edges.to_csv("myedges.csv", sep='\t', index=False,encoding='utf-8') except KeyError: print('Info: No issue edges found.') result_edges=df_edges # There maybe no result_size data is we might not have any change history try: result_size.set_index('key') except KeyError: result_size = pd.DataFrame(index= ['key'],columns = ['fromDate', 'toDate', 'size']) result_size['toDate'] = pd.to_datetime(result_size['toDate'], format=('%Y-%m-%d')) result_size['fromDate'] = pd.to_datetime(result_size['fromDate'], format=('%Y-%m-%d')) return result_cycle, result_size, result_edges
[ "def", "cycle_data", "(", "self", ",", "verbose", "=", "False", ",", "result_cycle", "=", "None", ",", "result_size", "=", "None", ",", "result_edges", "=", "None", ",", "changelog", "=", "True", ")", ":", "cycle_names", "=", "[", "s", "[", "'name'", "]", "for", "s", "in", "self", ".", "settings", "[", "'cycle'", "]", "]", "accepted_steps", "=", "set", "(", "s", "[", "'name'", "]", "for", "s", "in", "self", ".", "settings", "[", "'cycle'", "]", "if", "s", "[", "'type'", "]", "==", "StatusTypes", ".", "accepted", ")", "completed_steps", "=", "set", "(", "s", "[", "'name'", "]", "for", "s", "in", "self", ".", "settings", "[", "'cycle'", "]", "if", "s", "[", "'type'", "]", "==", "StatusTypes", ".", "complete", ")", "series", "=", "{", "'key'", ":", "{", "'data'", ":", "[", "]", ",", "'dtype'", ":", "str", "}", ",", "'url'", ":", "{", "'data'", ":", "[", "]", ",", "'dtype'", ":", "str", "}", ",", "'issue_type'", ":", "{", "'data'", ":", "[", "]", ",", "'dtype'", ":", "str", "}", ",", "'summary'", ":", "{", "'data'", ":", "[", "]", ",", "'dtype'", ":", "str", "}", ",", "'status'", ":", "{", "'data'", ":", "[", "]", ",", "'dtype'", ":", "str", "}", ",", "'resolution'", ":", "{", "'data'", ":", "[", "]", ",", "'dtype'", ":", "str", "}", ",", "'cycle_time'", ":", "{", "'data'", ":", "[", "]", ",", "'dtype'", ":", "'timedelta64[ns]'", "}", ",", "'completed_timestamp'", ":", "{", "'data'", ":", "[", "]", ",", "'dtype'", ":", "'datetime64[ns]'", "}", ",", "'created_timestamp'", ":", "{", "'data'", ":", "[", "]", ",", "'dtype'", ":", "'datetime64[ns]'", "}", "}", "if", "sys", ".", "platform", ".", "startswith", "(", "'win'", ")", ":", "buffer", "=", "open", "(", "\"cycledata.tmp\"", ",", "\"w+\"", ",", "1", ")", "# Opens a file for writing only in binary format. Overwrites the file if the file exists.", "# buffering value is 1", "# Windows users seem to have a problem with spooled file", "else", ":", "buffer", "=", "tempfile", ".", "SpooledTemporaryFile", "(", "max_size", "=", "50000", ",", "mode", "=", "'w+t'", ")", "#issuelinks = open(\"issuelinks.csv\", \"w+\", 1)", "#df_edges = pd.DataFrame()", "#df_edges = pd.DataFrame(columns=['Source', 'OutwardLink', 'Target', 'Inwardlink','LinkType'])", "#df_edges.to_csv(issuelinks, columns=['Source', 'OutwardLink', 'Target', 'Inwardlink','LinkType'], header=True, index=None, sep='\\t',encoding='utf-8')", "df_size_history", "=", "pd", ".", "DataFrame", "(", "columns", "=", "[", "'key'", ",", "'fromDate'", ",", "'toDate'", ",", "'size'", "]", ")", "df_size_history", ".", "to_csv", "(", "buffer", ",", "columns", "=", "[", "'key'", ",", "'fromDate'", ",", "'toDate'", ",", "'size'", "]", ",", "header", "=", "True", ",", "index", "=", "None", ",", "sep", "=", "'\\t'", ",", "encoding", "=", "'utf-8'", ")", "for", "cycle_name", "in", "cycle_names", ":", "series", "[", "cycle_name", "]", "=", "{", "'data'", ":", "[", "]", ",", "'dtype'", ":", "'datetime64[ns]'", "}", "for", "name", "in", "self", ".", "fields", ".", "keys", "(", ")", ":", "series", "[", "name", "]", "=", "{", "'data'", ":", "[", "]", ",", "'dtype'", ":", "'object'", "}", "if", "self", ".", "settings", "[", "'query_attribute'", "]", ":", "series", "[", "self", ".", "settings", "[", "'query_attribute'", "]", "]", "=", "{", "'data'", ":", "[", "]", ",", "'dtype'", ":", "str", "}", "for", "criteria", "in", "self", ".", "settings", "[", "'queries'", "]", ":", "for", "issue", "in", "self", ".", "find_issues", "(", "criteria", ",", "order", "=", "'updatedDate DESC'", ",", "verbose", "=", "verbose", ",", "changelog", "=", "changelog", ")", ":", "# Deal with the differences in strings between Python 2 & 3", "if", "(", "sys", ".", "version_info", ">", "(", "3", ",", "0", ")", ")", ":", "# Python 3 code in this block", "item", "=", "{", "'key'", ":", "issue", ".", "key", ",", "'url'", ":", "\"%s/browse/%s\"", "%", "(", "self", ".", "jira", ".", "_options", "[", "'server'", "]", ",", "issue", ".", "key", ",", ")", ",", "'issue_type'", ":", "issue", ".", "fields", ".", "issuetype", ".", "name", ",", "'summary'", ":", "issue", ".", "fields", ".", "summary", ",", "# .encode('utf-8'),", "'status'", ":", "issue", ".", "fields", ".", "status", ".", "name", ",", "'resolution'", ":", "issue", ".", "fields", ".", "resolution", ".", "name", "if", "issue", ".", "fields", ".", "resolution", "else", "None", ",", "'cycle_time'", ":", "None", ",", "'completed_timestamp'", ":", "None", ",", "'created_timestamp'", ":", "issue", ".", "fields", ".", "created", "[", ":", "19", "]", "}", "else", ":", "# Python 2 code in this block", "item", "=", "{", "'key'", ":", "issue", ".", "key", ",", "'url'", ":", "\"%s/browse/%s\"", "%", "(", "self", ".", "jira", ".", "_options", "[", "'server'", "]", ",", "issue", ".", "key", ",", ")", ",", "'issue_type'", ":", "issue", ".", "fields", ".", "issuetype", ".", "name", ",", "'summary'", ":", "issue", ".", "fields", ".", "summary", ".", "encode", "(", "'utf-8'", ")", ",", "'status'", ":", "issue", ".", "fields", ".", "status", ".", "name", ",", "'resolution'", ":", "issue", ".", "fields", ".", "resolution", ".", "name", "if", "issue", ".", "fields", ".", "resolution", "else", "None", ",", "'cycle_time'", ":", "None", ",", "'completed_timestamp'", ":", "None", ",", "'created_timestamp'", ":", "issue", ".", "fields", ".", "created", "[", ":", "19", "]", "}", "for", "name", ",", "field_name", "in", "self", ".", "fields", ".", "items", "(", ")", ":", "item", "[", "name", "]", "=", "self", ".", "resolve_field_value", "(", "issue", ",", "name", ",", "field_name", ")", "if", "self", ".", "settings", "[", "'query_attribute'", "]", ":", "item", "[", "self", ".", "settings", "[", "'query_attribute'", "]", "]", "=", "criteria", ".", "get", "(", "'value'", ",", "None", ")", "for", "cycle_name", "in", "cycle_names", ":", "item", "[", "cycle_name", "]", "=", "None", "# Get the relationships for this issue", "edges", "=", "[", "]", "# Source, Target, Inward Link, Outward Link, Type", "issuelinks", "=", "issue", ".", "fields", ".", "issuelinks", "# It is seems that having an Epic Parent does not record an Epic Link, just the name \"Epic Name\"", "# Creating Epic relationship requires more work. Also each Jira instance will have different customfields for Epic data", "# Remove this code.", "#issueEpic = issue.fields.customfield_10008 if issue.fields.customfield_10008 else None # Epic Link", "#if issueEpic is not None:", "# data = {'Source':issueEpic, 'Target':issue.key, 'InwardLink':'Belongs to Epic', 'OutwardLink':'Issue in Epic', 'LinkType':'EpicIssue'}", "# edges.append(data)", "for", "link", "in", "issuelinks", ":", "inwardissue", "=", "None", "outwardissue", "=", "None", "try", ":", "inwardissue", "=", "link", ".", "inwardIssue", ".", "key", "except", ":", "outwardissue", "=", "link", ".", "outwardIssue", ".", "key", "if", "inwardissue", "is", "not", "None", ":", "data", "=", "{", "'LinkID'", ":", "link", ".", "id", ",", "'Source'", ":", "inwardissue", ",", "'Target'", ":", "issue", ".", "key", ",", "'InwardLink'", ":", "link", ".", "type", ".", "inward", ",", "'OutwardLink'", ":", "link", ".", "type", ".", "outward", ",", "'LinkType'", ":", "link", ".", "type", ".", "name", "}", "else", ":", "data", "=", "{", "'LinkID'", ":", "link", ".", "id", ",", "'Source'", ":", "issue", ".", "key", ",", "'Target'", ":", "outwardissue", ",", "'InwardLink'", ":", "link", ".", "type", ".", "inward", ",", "'OutwardLink'", ":", "link", ".", "type", ".", "outward", ",", "'LinkType'", ":", "link", ".", "type", ".", "name", "}", "edges", ".", "append", "(", "data", ")", "if", "len", "(", "edges", ")", ">", "0", ":", "try", ":", "df_edges", "except", "NameError", ":", "#print('Not found')", "df_edges", "=", "pd", ".", "DataFrame", "(", "edges", ")", "else", ":", "df_links", "=", "pd", ".", "DataFrame", "(", "edges", ")", "df_edges", "=", "df_edges", ".", "append", "(", "df_links", ")", "# = pd.DataFrame(edges)", "# Got all the relationships for this issue", "rows", "=", "[", "]", "try", ":", "for", "snapshot", "in", "self", ".", "iter_size_changes", "(", "issue", ")", ":", "data", "=", "{", "'key'", ":", "snapshot", ".", "key", ",", "'fromDate'", ":", "snapshot", ".", "date", ",", "'size'", ":", "snapshot", ".", "size", "}", "rows", ".", "append", "(", "data", ")", "df", "=", "pd", ".", "DataFrame", "(", "rows", ")", "# Create the toDate column", "df_toDate", "=", "df", "[", "'fromDate'", "]", ".", "shift", "(", "-", "1", ")", "df_toDate", ".", "loc", "[", "len", "(", "df_toDate", ")", "-", "1", "]", "=", "datetime", ".", "datetime", ".", "now", "(", "pytz", ".", "utc", ")", "df", "[", "'toDate'", "]", "=", "df_toDate", "except", ":", "df", "=", "pd", ".", "DataFrame", "(", "columns", "=", "[", "'key'", ",", "'fromDate'", ",", "'toDate'", ",", "'size'", "]", ")", "# Round Down datetimes to full dates", "df", "[", "'fromDate'", "]", "=", "df", "[", "'fromDate'", "]", ".", "apply", "(", "lambda", "dt", ":", "datetime", ".", "datetime", "(", "dt", ".", "year", ",", "dt", ".", "month", ",", "dt", ".", "day", ")", ")", "df", "[", "'toDate'", "]", "=", "df", "[", "'toDate'", "]", ".", "apply", "(", "lambda", "dt", ":", "datetime", ".", "datetime", "(", "dt", ".", "year", ",", "dt", ".", "month", ",", "dt", ".", "day", ")", ")", "# If we only have one row of size changes and current issue has a size then it must have been created with a size value at creation.", "# This size will not be recorded in the size_change record.", "# Hence update the single row we have with the current issue size.", "# Get Story Points size changes history", "#If condition is met update the size cell", "if", "getattr", "(", "item", ",", "'StoryPoints'", ",", "None", ")", "is", "not", "None", "and", "(", "df", ".", "shape", "[", "0", "]", "==", "1", ")", ":", "#if (item['StoryPoints'] is not None ) and (len(df)==1):", "df", ".", "loc", "[", "df", ".", "index", "[", "0", "]", ",", "'size'", "]", "=", "item", "[", "'StoryPoints'", "]", "# Append to csv file", "df", ".", "to_csv", "(", "buffer", ",", "columns", "=", "[", "'key'", ",", "'fromDate'", ",", "'toDate'", ",", "'size'", "]", ",", "header", "=", "None", ",", "mode", "=", "'a'", ",", "sep", "=", "'\\t'", ",", "date_format", "=", "'%Y-%m-%d'", ",", "encoding", "=", "'utf-8'", ")", "#print(rows)", "# If the first column in item lifecycle was scipted put the created data in it.", "if", "item", "[", "cycle_names", "[", "0", "]", "]", "is", "None", ":", "item", "[", "cycle_names", "[", "0", "]", "]", "=", "dateutil", ".", "parser", ".", "parse", "(", "item", "[", "'created_timestamp'", "]", ")", "#item['created_timestamp']", "# Figure out why the first Column does not have created date", "#print(dateutil.parser.parse(item['created_timestamp']))", "# Record date of status changes", "for", "snapshot", "in", "self", ".", "iter_changes", "(", "issue", ",", "True", ")", ":", "snapshot_cycle_step", "=", "self", ".", "settings", "[", "'cycle_lookup'", "]", ".", "get", "(", "snapshot", ".", "status", ".", "lower", "(", ")", ",", "None", ")", "if", "snapshot_cycle_step", "is", "None", ":", "if", "verbose", ":", "print", "(", "issue", ".", "key", ",", "\"transitioned to unknown JIRA status\"", ",", "snapshot", ".", "status", ")", "continue", "snapshot_cycle_step_name", "=", "snapshot_cycle_step", "[", "'name'", "]", "# Keep the first time we entered a step", "if", "item", "[", "snapshot_cycle_step_name", "]", "is", "None", ":", "item", "[", "snapshot_cycle_step_name", "]", "=", "snapshot", ".", "date", "# Wipe any subsequent dates, in case this was a move backwards", "found_cycle_name", "=", "False", "for", "cycle_name", "in", "cycle_names", ":", "if", "not", "found_cycle_name", "and", "cycle_name", "==", "snapshot_cycle_step_name", ":", "found_cycle_name", "=", "True", "continue", "elif", "found_cycle_name", "and", "item", "[", "cycle_name", "]", "is", "not", "None", ":", "if", "verbose", ":", "print", "(", "issue", ".", "key", ",", "\"moved backwards to\"", ",", "snapshot_cycle_step_name", ",", "\"wiping date for subsequent step\"", ",", "cycle_name", ")", "item", "[", "cycle_name", "]", "=", "None", "# Wipe timestamps if items have moved backwards; calculate cycle time", "previous_timestamp", "=", "None", "accepted_timestamp", "=", "None", "completed_timestamp", "=", "None", "for", "cycle_name", "in", "cycle_names", ":", "if", "item", "[", "cycle_name", "]", "is", "not", "None", ":", "previous_timestamp", "=", "item", "[", "cycle_name", "]", "if", "accepted_timestamp", "is", "None", "and", "previous_timestamp", "is", "not", "None", "and", "cycle_name", "in", "accepted_steps", ":", "accepted_timestamp", "=", "previous_timestamp", "if", "completed_timestamp", "is", "None", "and", "previous_timestamp", "is", "not", "None", "and", "cycle_name", "in", "completed_steps", ":", "completed_timestamp", "=", "previous_timestamp", "if", "accepted_timestamp", "is", "not", "None", "and", "completed_timestamp", "is", "not", "None", ":", "item", "[", "'cycle_time'", "]", "=", "completed_timestamp", "-", "accepted_timestamp", "item", "[", "'completed_timestamp'", "]", "=", "completed_timestamp", "for", "k", ",", "v", "in", "item", ".", "items", "(", ")", ":", "series", "[", "k", "]", "[", "'data'", "]", ".", "append", "(", "v", ")", "data", "=", "{", "}", "for", "k", ",", "v", "in", "series", ".", "items", "(", ")", ":", "data", "[", "k", "]", "=", "pd", ".", "Series", "(", "v", "[", "'data'", "]", ",", "dtype", "=", "v", "[", "'dtype'", "]", ")", "result_cycle", "=", "pd", ".", "DataFrame", "(", "data", ",", "columns", "=", "[", "'key'", ",", "'url'", ",", "'issue_type'", ",", "'summary'", ",", "'status'", ",", "'resolution'", "]", "+", "sorted", "(", "self", ".", "fields", ".", "keys", "(", ")", ")", "+", "(", "[", "self", ".", "settings", "[", "'query_attribute'", "]", "]", "if", "self", ".", "settings", "[", "'query_attribute'", "]", "else", "[", "]", ")", "+", "[", "'cycle_time'", ",", "'completed_timestamp'", "]", "+", "cycle_names", ")", "result_size", "=", "pd", ".", "DataFrame", "(", ")", "buffer", ".", "seek", "(", "0", ")", "result_size", "=", "result_size", ".", "from_csv", "(", "buffer", ",", "sep", "=", "'\\t'", ")", "buffer", ".", "close", "(", ")", "try", ":", "df_edges", "except", "NameError", ":", "# print('Not found')", "df_edges", "=", "pd", ".", "DataFrame", "(", ")", "try", ":", "df_edges", "=", "df_edges", "[", "[", "'Source'", ",", "'OutwardLink'", ",", "'Target'", ",", "'InwardLink'", ",", "'LinkType'", ",", "'LinkID'", "]", "]", "# Specify dataframe sort order", "#df_edges.to_csv(\"myedges.csv\", sep='\\t', index=False,encoding='utf-8')", "except", "KeyError", ":", "print", "(", "'Info: No issue edges found.'", ")", "result_edges", "=", "df_edges", "# There maybe no result_size data is we might not have any change history", "try", ":", "result_size", ".", "set_index", "(", "'key'", ")", "except", "KeyError", ":", "result_size", "=", "pd", ".", "DataFrame", "(", "index", "=", "[", "'key'", "]", ",", "columns", "=", "[", "'fromDate'", ",", "'toDate'", ",", "'size'", "]", ")", "result_size", "[", "'toDate'", "]", "=", "pd", ".", "to_datetime", "(", "result_size", "[", "'toDate'", "]", ",", "format", "=", "(", "'%Y-%m-%d'", ")", ")", "result_size", "[", "'fromDate'", "]", "=", "pd", ".", "to_datetime", "(", "result_size", "[", "'fromDate'", "]", ",", "format", "=", "(", "'%Y-%m-%d'", ")", ")", "return", "result_cycle", ",", "result_size", ",", "result_edges" ]
Get data from JIRA for cycle/flow times and story points size change. Build a numerically indexed data frame with the following 'fixed' columns: `key`, 'url', 'issue_type', `summary`, `status`, and `resolution` from JIRA, as well as the value of any fields set in the `fields` dict in `settings`. If `known_values` is set (a dict of lists, with field names as keys and a list of known values for each field as values) and a field in `fields` contains a list of values, only the first value in the list of known values will be used. If 'query_attribute' is set in `settings`, a column with this name will be added, and populated with the `value` key, if any, from each criteria block under `queries` in settings. In addition, `cycle_time` will be set to the time delta between the first `accepted`-type column and the first `complete` column, or None. The remaining columns are the names of the items in the configured cycle, in order. Each cell contains the last date/time stamp when the relevant status was set. If an item moves backwards through the cycle, subsequent date/time stamps in the cycle are erased.
[ "Get", "data", "from", "JIRA", "for", "cycle", "/", "flow", "times", "and", "story", "points", "size", "change", "." ]
train
https://github.com/rnwolf/jira-metrics-extract/blob/56443211b3e1200f3def79173a21e0232332ae17/jira_metrics_extract/cycletime.py#L86-L357
rnwolf/jira-metrics-extract
jira_metrics_extract/cycletime.py
CycleTimeQueries.size_history
def size_history(self,size_data): """Return the a DataFrame, indexed by day, with columns containing story size for each issue. In addition, columns are soted by Jira Issue key. First by Project and then by id number. """ def my_merge(df1, df2): # http://stackoverflow.com/questions/34411495/pandas-merge-several-dataframes res = pd.merge(df1, df2, how='outer', left_index=True, right_index=True) cols = sorted(res.columns) pairs = [] for col1, col2 in zip(cols[:-1], cols[1:]): if col1.endswith('_x') and col2.endswith('_y'): pairs.append((col1, col2)) for col1, col2 in pairs: res[col1[:-2]] = res[col1].combine_first(res[col2]) res = res.drop([col1, col2], axis=1) return res dfs_key = [] # Group the dataframe by regiment, and for each regiment, for name, group in size_data.groupby('key'): dfs = [] for row in group.itertuples(): # print(row.Index, row.fromDate,row.toDate, row.size) dates = pd.date_range(start=row.fromDate, end=row.toDate) sizes = [row.size] * len(dates) data = {'date': dates, 'size': sizes} df2 = pd.DataFrame(data, columns=['date', 'size']) pd.to_datetime(df2['date'], format=('%Y-%m-%d')) df2.set_index(['date'], inplace=True) dfs.append(df2) # df_final = reduce(lambda left,right: pd.merge(left,right), dfs) df_key = (reduce(my_merge, dfs)) df_key.columns = [name if x == 'size' else x for x in df_key.columns] dfs_key.append(df_key) df_all = (reduce(my_merge, dfs_key)) # Sort the columns based on Jira Project code and issue number mykeys = df_all.columns.values.tolist() mykeys.sort(key=lambda x: x.split('-')[0] + '-' + str(int(x.split('-')[1])).zfill(6)) df_all = df_all[mykeys] # Reindex to make sure we have all dates start, end = df_all.index.min(), df_all.index.max() df_all = df_all.reindex(pd.date_range(start, end, freq='D'), method='ffill') return df_all
python
def size_history(self,size_data): """Return the a DataFrame, indexed by day, with columns containing story size for each issue. In addition, columns are soted by Jira Issue key. First by Project and then by id number. """ def my_merge(df1, df2): # http://stackoverflow.com/questions/34411495/pandas-merge-several-dataframes res = pd.merge(df1, df2, how='outer', left_index=True, right_index=True) cols = sorted(res.columns) pairs = [] for col1, col2 in zip(cols[:-1], cols[1:]): if col1.endswith('_x') and col2.endswith('_y'): pairs.append((col1, col2)) for col1, col2 in pairs: res[col1[:-2]] = res[col1].combine_first(res[col2]) res = res.drop([col1, col2], axis=1) return res dfs_key = [] # Group the dataframe by regiment, and for each regiment, for name, group in size_data.groupby('key'): dfs = [] for row in group.itertuples(): # print(row.Index, row.fromDate,row.toDate, row.size) dates = pd.date_range(start=row.fromDate, end=row.toDate) sizes = [row.size] * len(dates) data = {'date': dates, 'size': sizes} df2 = pd.DataFrame(data, columns=['date', 'size']) pd.to_datetime(df2['date'], format=('%Y-%m-%d')) df2.set_index(['date'], inplace=True) dfs.append(df2) # df_final = reduce(lambda left,right: pd.merge(left,right), dfs) df_key = (reduce(my_merge, dfs)) df_key.columns = [name if x == 'size' else x for x in df_key.columns] dfs_key.append(df_key) df_all = (reduce(my_merge, dfs_key)) # Sort the columns based on Jira Project code and issue number mykeys = df_all.columns.values.tolist() mykeys.sort(key=lambda x: x.split('-')[0] + '-' + str(int(x.split('-')[1])).zfill(6)) df_all = df_all[mykeys] # Reindex to make sure we have all dates start, end = df_all.index.min(), df_all.index.max() df_all = df_all.reindex(pd.date_range(start, end, freq='D'), method='ffill') return df_all
[ "def", "size_history", "(", "self", ",", "size_data", ")", ":", "def", "my_merge", "(", "df1", ",", "df2", ")", ":", "# http://stackoverflow.com/questions/34411495/pandas-merge-several-dataframes", "res", "=", "pd", ".", "merge", "(", "df1", ",", "df2", ",", "how", "=", "'outer'", ",", "left_index", "=", "True", ",", "right_index", "=", "True", ")", "cols", "=", "sorted", "(", "res", ".", "columns", ")", "pairs", "=", "[", "]", "for", "col1", ",", "col2", "in", "zip", "(", "cols", "[", ":", "-", "1", "]", ",", "cols", "[", "1", ":", "]", ")", ":", "if", "col1", ".", "endswith", "(", "'_x'", ")", "and", "col2", ".", "endswith", "(", "'_y'", ")", ":", "pairs", ".", "append", "(", "(", "col1", ",", "col2", ")", ")", "for", "col1", ",", "col2", "in", "pairs", ":", "res", "[", "col1", "[", ":", "-", "2", "]", "]", "=", "res", "[", "col1", "]", ".", "combine_first", "(", "res", "[", "col2", "]", ")", "res", "=", "res", ".", "drop", "(", "[", "col1", ",", "col2", "]", ",", "axis", "=", "1", ")", "return", "res", "dfs_key", "=", "[", "]", "# Group the dataframe by regiment, and for each regiment,", "for", "name", ",", "group", "in", "size_data", ".", "groupby", "(", "'key'", ")", ":", "dfs", "=", "[", "]", "for", "row", "in", "group", ".", "itertuples", "(", ")", ":", "# print(row.Index, row.fromDate,row.toDate, row.size)", "dates", "=", "pd", ".", "date_range", "(", "start", "=", "row", ".", "fromDate", ",", "end", "=", "row", ".", "toDate", ")", "sizes", "=", "[", "row", ".", "size", "]", "*", "len", "(", "dates", ")", "data", "=", "{", "'date'", ":", "dates", ",", "'size'", ":", "sizes", "}", "df2", "=", "pd", ".", "DataFrame", "(", "data", ",", "columns", "=", "[", "'date'", ",", "'size'", "]", ")", "pd", ".", "to_datetime", "(", "df2", "[", "'date'", "]", ",", "format", "=", "(", "'%Y-%m-%d'", ")", ")", "df2", ".", "set_index", "(", "[", "'date'", "]", ",", "inplace", "=", "True", ")", "dfs", ".", "append", "(", "df2", ")", "# df_final = reduce(lambda left,right: pd.merge(left,right), dfs)", "df_key", "=", "(", "reduce", "(", "my_merge", ",", "dfs", ")", ")", "df_key", ".", "columns", "=", "[", "name", "if", "x", "==", "'size'", "else", "x", "for", "x", "in", "df_key", ".", "columns", "]", "dfs_key", ".", "append", "(", "df_key", ")", "df_all", "=", "(", "reduce", "(", "my_merge", ",", "dfs_key", ")", ")", "# Sort the columns based on Jira Project code and issue number", "mykeys", "=", "df_all", ".", "columns", ".", "values", ".", "tolist", "(", ")", "mykeys", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", ".", "split", "(", "'-'", ")", "[", "0", "]", "+", "'-'", "+", "str", "(", "int", "(", "x", ".", "split", "(", "'-'", ")", "[", "1", "]", ")", ")", ".", "zfill", "(", "6", ")", ")", "df_all", "=", "df_all", "[", "mykeys", "]", "# Reindex to make sure we have all dates", "start", ",", "end", "=", "df_all", ".", "index", ".", "min", "(", ")", ",", "df_all", ".", "index", ".", "max", "(", ")", "df_all", "=", "df_all", ".", "reindex", "(", "pd", ".", "date_range", "(", "start", ",", "end", ",", "freq", "=", "'D'", ")", ",", "method", "=", "'ffill'", ")", "return", "df_all" ]
Return the a DataFrame, indexed by day, with columns containing story size for each issue. In addition, columns are soted by Jira Issue key. First by Project and then by id number.
[ "Return", "the", "a", "DataFrame", "indexed", "by", "day", "with", "columns", "containing", "story", "size", "for", "each", "issue", "." ]
train
https://github.com/rnwolf/jira-metrics-extract/blob/56443211b3e1200f3def79173a21e0232332ae17/jira_metrics_extract/cycletime.py#L359-L407
rnwolf/jira-metrics-extract
jira_metrics_extract/cycletime.py
CycleTimeQueries.cfd
def cfd(self, cycle_data,size_history= None, pointscolumn= None, stacked = True ): """Return the data to build a cumulative flow diagram: a DataFrame, indexed by day, with columns containing cumulative counts for each of the items in the configured cycle. In addition, a column called `cycle_time` contains the approximate average cycle time of that day based on the first "accepted" status and the first "complete" status. If stacked = True then return dataframe suitable for plotting as stacked area chart else return for platting as non-staked or line chart. """ # Define helper function def cumulativeColumnStates(df,stacked): """ Calculate the column sums, were the incoming matrix columns represents items in workflow states States progress from left to right. We what to zero out items, other than right most value to avoid counting items in prior states. :param df: :return: pandas dataframe row with sum of column items """ # Helper functions to return the right most cells in 2D array def last_number(lst): if all(map(lambda x: x == 0, lst)): return 0 elif lst[-1] != 0: return len(lst) - 1 else: return last_number(lst[:-1]) def fill_others(lst): new_lst = [0] * len(lst) new_lst[last_number(lst)] = lst[last_number(lst)] return new_lst df_zeroed = df.fillna(value=0) # ,inplace = True Get rid of non numeric items. Make a ?deep? copy if stacked: df_result = df_zeroed.apply(lambda x: fill_others(x.values.tolist()), axis=1) else: df_result = df_zeroed sum_row = df_result[df.columns].sum() # Sum Columns return pd.DataFrame(data=sum_row).T # Transpose into row dataframe and return # Helper function to return the right most cells in 2D array def keeprightmoststate(df): """ Incoming matrix columns represents items in workflow states States progress from left to right. We what to zero out items, other than right most value. :param df: :return: pandas dataframe row with sum of column items """ def last_number(lst): if all(map(lambda x: x == 0, lst)): return 0 elif lst[-1] != 0: return len(lst) - 1 else: return last_number(lst[:-1]) def fill_others(lst): new_lst = [0] * len(lst) new_lst[last_number(lst)] = lst[last_number(lst)] return new_lst df_zeroed = df.fillna(value=0) # ,inplace = True Get rid of non numeric items. Make a ?deep? copy df_result = df_zeroed.apply(lambda x: fill_others(x.values.tolist()), axis=1) return df_result # Define helper function def hide_greater_than_date(cell, adate): """ Helper function to compare date values in cells """ result = False try: celldatetime = datetime.date(cell.year, cell.month, cell.day) except: return True if celldatetime > adate: return True return False # We have a date value in cell and it is less than or equal to input date # Helper function def appendDFToCSV(df, csvFilePath, sep="\t",date_format='%Y-%m-%d', encoding='utf-8'): import os if not os.path.isfile(csvFilePath): df.to_csv(csvFilePath, mode='a', index=False, sep=sep, date_format=date_format, encoding=encoding) elif len(df.columns) != len(pd.read_csv(csvFilePath, nrows=1, sep=sep).columns): raise Exception( "Columns do not match!! Dataframe has " + str(len(df.columns)) + " columns. CSV file has " + str( len(pd.read_csv(csvFilePath, nrows=1, sep=sep).columns)) + " columns.") elif not (df.columns == pd.read_csv(csvFilePath, nrows=1, sep=sep).columns).all(): raise Exception("Columns and column order of dataframe and csv file do not match!!") else: df.to_csv(csvFilePath, mode='a', index=False, sep=sep, header=False, date_format=date_format, encoding=encoding) #print(pointscolumn) # List of all state change columns that may have date value in them cycle_names = [s['name'] for s in self.settings['cycle']] # Create list of columns that we want to return in our results dataFrame slice_columns = list(self.settings['none_sized_statuses']) # Make a COPY of the list so that we dont modify the reference. if pointscolumn: for size_state in self.settings['sized_statuses']: # states_to_size: sizedStateName = size_state + 'Sized' slice_columns.append(sizedStateName) # Check that it works if we use all columns as sized. slice_columns = [] for size_state in cycle_names: sizedStateName = size_state + 'Sized' slice_columns.append(sizedStateName) else: slice_columns = cycle_names # Build a dataframe of just the "date" columns df = cycle_data[cycle_names].copy() # Strip out times from all dates df = pd.DataFrame( np.array(df.values, dtype='<M8[ns]').astype('<M8[D]').astype('<M8[ns]'), columns=df.columns, index=df.index ) # No history provided this thus we return dataframe with just column headers. if size_history is None: return df # Get a list of dates that a issue changed state state_changes_on_dates_set = set() for state in cycle_names: state_changes_on_dates_set = state_changes_on_dates_set.union(set(df[state])) # How many unique days did a issue stage state # Remove non timestamp vlaues and sort the list state_changes_on_dates = filter(lambda x: type(x.date()) == datetime.date, sorted(list(state_changes_on_dates_set))) # Replace missing NaT values (happens if a status is skipped) with the subsequent timestamp df = df.fillna(method='bfill', axis=1) if pointscolumn: storypoints = cycle_data[pointscolumn] # As at today ids = cycle_data['key'] # create blank results dataframe df_results = pd.DataFrame() # For each date on which we had a issue state change we want to count and sum the totals for each of the given states # 'Open','Analysis','Backlog','In Process','Done','Withdrawn' timenowstr = datetime.datetime.now().strftime('-run-%Y-%m-%d_%H-%M-%S') for date_index,statechangedate in enumerate(state_changes_on_dates): if date_index%10 == 0: # Print out Progress every tenth pass #print("CFD state change {} of {} ".format(date_index,len(state_changes_on_dates))) if type(statechangedate.date()) == datetime.date: # filterdate.year,filterdate.month,filterdate.day filterdate = datetime.date(statechangedate.year, statechangedate.month, statechangedate.day) # statechangedate.datetime() # Apply function to each cell and only make it visible if issue was in state on or after the filter date df_filtered = df.applymap(lambda x: 0 if hide_greater_than_date(x, filterdate) else 1) if stacked: df_filtered=keeprightmoststate(df_filtered) if pointscolumn and (size_history is not None): # For debug #if filterdate.isoformat() == '2016-11-22': # size_history.loc[filterdate.isoformat()].to_csv("debug-size-history.csv") storypoints_series_on = size_history.loc[filterdate.isoformat()].T df_size_on_day = pd.Series.to_frame(storypoints_series_on) df_size_on_day.columns = [pointscolumn] # Make sure get size data in the same sequence as ids. left = pd.Series.to_frame(ids) right = df_size_on_day result = left.join(right, on=['key']) # http://pandas.pydata.org/pandas-docs/stable/merging.html\ df_countable = pd.concat([result, df_filtered], axis=1) # for debuging and analytics append the days state to file df_countable['date'] = filterdate.isoformat() if stacked: file_name = "daily-cfd-stacked-run-at"+ timenowstr + ".csv" else: file_name = "daily-cfd-run-at" + timenowstr + ".csv" appendDFToCSV(df_countable, file_name ) else: df_countable = df_filtered # Because we size issues with Story Points we need to add some additional columns # for each state based on size not just count if pointscolumn: for size_state in self.settings['sized_statuses']: #states_to_size: sizedStateName = size_state + 'Sized' df_countable[sizedStateName] = df_countable.apply( lambda row: (row[pointscolumn] * row[size_state] ), axis=1) # For debugging write dataframe to sheet for current day. #file_name="countable-cfd-for-day-"+ filterdate.isoformat()+timenowstr+".csv" #df_countable.to_csv(file_name, sep='\t', encoding='utf-8', quoting=csv.QUOTE_ALL) df_slice = df_countable.loc[:,slice_columns].copy() df_sub_sum = cumulativeColumnStates(df_slice,stacked) final_table = df_sub_sum.rename(index={0: filterdate}) # append to results df_results = df_results.append(final_table) df_results.sort_index(inplace=True) df= df_results # Count number of times each date occurs, preserving column order #df = pd.concat({col: df[col].value_counts() for col in df}, axis=1)[cycle_names] # Fill missing dates with 0 and run a cumulative sum #df = df.fillna(0).cumsum(axis=0) # Reindex to make sure we have all dates start, end = df.index.min(), df.index.max() try: # If we have no change history we will not have any data in the df and will get a ValueError on reindex df = df.reindex(pd.date_range(start, end, freq='D'), method='ffill') except ValueError: pass return df
python
def cfd(self, cycle_data,size_history= None, pointscolumn= None, stacked = True ): """Return the data to build a cumulative flow diagram: a DataFrame, indexed by day, with columns containing cumulative counts for each of the items in the configured cycle. In addition, a column called `cycle_time` contains the approximate average cycle time of that day based on the first "accepted" status and the first "complete" status. If stacked = True then return dataframe suitable for plotting as stacked area chart else return for platting as non-staked or line chart. """ # Define helper function def cumulativeColumnStates(df,stacked): """ Calculate the column sums, were the incoming matrix columns represents items in workflow states States progress from left to right. We what to zero out items, other than right most value to avoid counting items in prior states. :param df: :return: pandas dataframe row with sum of column items """ # Helper functions to return the right most cells in 2D array def last_number(lst): if all(map(lambda x: x == 0, lst)): return 0 elif lst[-1] != 0: return len(lst) - 1 else: return last_number(lst[:-1]) def fill_others(lst): new_lst = [0] * len(lst) new_lst[last_number(lst)] = lst[last_number(lst)] return new_lst df_zeroed = df.fillna(value=0) # ,inplace = True Get rid of non numeric items. Make a ?deep? copy if stacked: df_result = df_zeroed.apply(lambda x: fill_others(x.values.tolist()), axis=1) else: df_result = df_zeroed sum_row = df_result[df.columns].sum() # Sum Columns return pd.DataFrame(data=sum_row).T # Transpose into row dataframe and return # Helper function to return the right most cells in 2D array def keeprightmoststate(df): """ Incoming matrix columns represents items in workflow states States progress from left to right. We what to zero out items, other than right most value. :param df: :return: pandas dataframe row with sum of column items """ def last_number(lst): if all(map(lambda x: x == 0, lst)): return 0 elif lst[-1] != 0: return len(lst) - 1 else: return last_number(lst[:-1]) def fill_others(lst): new_lst = [0] * len(lst) new_lst[last_number(lst)] = lst[last_number(lst)] return new_lst df_zeroed = df.fillna(value=0) # ,inplace = True Get rid of non numeric items. Make a ?deep? copy df_result = df_zeroed.apply(lambda x: fill_others(x.values.tolist()), axis=1) return df_result # Define helper function def hide_greater_than_date(cell, adate): """ Helper function to compare date values in cells """ result = False try: celldatetime = datetime.date(cell.year, cell.month, cell.day) except: return True if celldatetime > adate: return True return False # We have a date value in cell and it is less than or equal to input date # Helper function def appendDFToCSV(df, csvFilePath, sep="\t",date_format='%Y-%m-%d', encoding='utf-8'): import os if not os.path.isfile(csvFilePath): df.to_csv(csvFilePath, mode='a', index=False, sep=sep, date_format=date_format, encoding=encoding) elif len(df.columns) != len(pd.read_csv(csvFilePath, nrows=1, sep=sep).columns): raise Exception( "Columns do not match!! Dataframe has " + str(len(df.columns)) + " columns. CSV file has " + str( len(pd.read_csv(csvFilePath, nrows=1, sep=sep).columns)) + " columns.") elif not (df.columns == pd.read_csv(csvFilePath, nrows=1, sep=sep).columns).all(): raise Exception("Columns and column order of dataframe and csv file do not match!!") else: df.to_csv(csvFilePath, mode='a', index=False, sep=sep, header=False, date_format=date_format, encoding=encoding) #print(pointscolumn) # List of all state change columns that may have date value in them cycle_names = [s['name'] for s in self.settings['cycle']] # Create list of columns that we want to return in our results dataFrame slice_columns = list(self.settings['none_sized_statuses']) # Make a COPY of the list so that we dont modify the reference. if pointscolumn: for size_state in self.settings['sized_statuses']: # states_to_size: sizedStateName = size_state + 'Sized' slice_columns.append(sizedStateName) # Check that it works if we use all columns as sized. slice_columns = [] for size_state in cycle_names: sizedStateName = size_state + 'Sized' slice_columns.append(sizedStateName) else: slice_columns = cycle_names # Build a dataframe of just the "date" columns df = cycle_data[cycle_names].copy() # Strip out times from all dates df = pd.DataFrame( np.array(df.values, dtype='<M8[ns]').astype('<M8[D]').astype('<M8[ns]'), columns=df.columns, index=df.index ) # No history provided this thus we return dataframe with just column headers. if size_history is None: return df # Get a list of dates that a issue changed state state_changes_on_dates_set = set() for state in cycle_names: state_changes_on_dates_set = state_changes_on_dates_set.union(set(df[state])) # How many unique days did a issue stage state # Remove non timestamp vlaues and sort the list state_changes_on_dates = filter(lambda x: type(x.date()) == datetime.date, sorted(list(state_changes_on_dates_set))) # Replace missing NaT values (happens if a status is skipped) with the subsequent timestamp df = df.fillna(method='bfill', axis=1) if pointscolumn: storypoints = cycle_data[pointscolumn] # As at today ids = cycle_data['key'] # create blank results dataframe df_results = pd.DataFrame() # For each date on which we had a issue state change we want to count and sum the totals for each of the given states # 'Open','Analysis','Backlog','In Process','Done','Withdrawn' timenowstr = datetime.datetime.now().strftime('-run-%Y-%m-%d_%H-%M-%S') for date_index,statechangedate in enumerate(state_changes_on_dates): if date_index%10 == 0: # Print out Progress every tenth pass #print("CFD state change {} of {} ".format(date_index,len(state_changes_on_dates))) if type(statechangedate.date()) == datetime.date: # filterdate.year,filterdate.month,filterdate.day filterdate = datetime.date(statechangedate.year, statechangedate.month, statechangedate.day) # statechangedate.datetime() # Apply function to each cell and only make it visible if issue was in state on or after the filter date df_filtered = df.applymap(lambda x: 0 if hide_greater_than_date(x, filterdate) else 1) if stacked: df_filtered=keeprightmoststate(df_filtered) if pointscolumn and (size_history is not None): # For debug #if filterdate.isoformat() == '2016-11-22': # size_history.loc[filterdate.isoformat()].to_csv("debug-size-history.csv") storypoints_series_on = size_history.loc[filterdate.isoformat()].T df_size_on_day = pd.Series.to_frame(storypoints_series_on) df_size_on_day.columns = [pointscolumn] # Make sure get size data in the same sequence as ids. left = pd.Series.to_frame(ids) right = df_size_on_day result = left.join(right, on=['key']) # http://pandas.pydata.org/pandas-docs/stable/merging.html\ df_countable = pd.concat([result, df_filtered], axis=1) # for debuging and analytics append the days state to file df_countable['date'] = filterdate.isoformat() if stacked: file_name = "daily-cfd-stacked-run-at"+ timenowstr + ".csv" else: file_name = "daily-cfd-run-at" + timenowstr + ".csv" appendDFToCSV(df_countable, file_name ) else: df_countable = df_filtered # Because we size issues with Story Points we need to add some additional columns # for each state based on size not just count if pointscolumn: for size_state in self.settings['sized_statuses']: #states_to_size: sizedStateName = size_state + 'Sized' df_countable[sizedStateName] = df_countable.apply( lambda row: (row[pointscolumn] * row[size_state] ), axis=1) # For debugging write dataframe to sheet for current day. #file_name="countable-cfd-for-day-"+ filterdate.isoformat()+timenowstr+".csv" #df_countable.to_csv(file_name, sep='\t', encoding='utf-8', quoting=csv.QUOTE_ALL) df_slice = df_countable.loc[:,slice_columns].copy() df_sub_sum = cumulativeColumnStates(df_slice,stacked) final_table = df_sub_sum.rename(index={0: filterdate}) # append to results df_results = df_results.append(final_table) df_results.sort_index(inplace=True) df= df_results # Count number of times each date occurs, preserving column order #df = pd.concat({col: df[col].value_counts() for col in df}, axis=1)[cycle_names] # Fill missing dates with 0 and run a cumulative sum #df = df.fillna(0).cumsum(axis=0) # Reindex to make sure we have all dates start, end = df.index.min(), df.index.max() try: # If we have no change history we will not have any data in the df and will get a ValueError on reindex df = df.reindex(pd.date_range(start, end, freq='D'), method='ffill') except ValueError: pass return df
[ "def", "cfd", "(", "self", ",", "cycle_data", ",", "size_history", "=", "None", ",", "pointscolumn", "=", "None", ",", "stacked", "=", "True", ")", ":", "# Define helper function", "def", "cumulativeColumnStates", "(", "df", ",", "stacked", ")", ":", "\"\"\"\n Calculate the column sums, were the incoming matrix columns represents items in workflow states\n States progress from left to right.\n We what to zero out items, other than right most value to avoid counting items in prior states.\n :param df:\n :return: pandas dataframe row with sum of column items\n \"\"\"", "# Helper functions to return the right most cells in 2D array", "def", "last_number", "(", "lst", ")", ":", "if", "all", "(", "map", "(", "lambda", "x", ":", "x", "==", "0", ",", "lst", ")", ")", ":", "return", "0", "elif", "lst", "[", "-", "1", "]", "!=", "0", ":", "return", "len", "(", "lst", ")", "-", "1", "else", ":", "return", "last_number", "(", "lst", "[", ":", "-", "1", "]", ")", "def", "fill_others", "(", "lst", ")", ":", "new_lst", "=", "[", "0", "]", "*", "len", "(", "lst", ")", "new_lst", "[", "last_number", "(", "lst", ")", "]", "=", "lst", "[", "last_number", "(", "lst", ")", "]", "return", "new_lst", "df_zeroed", "=", "df", ".", "fillna", "(", "value", "=", "0", ")", "# ,inplace = True Get rid of non numeric items. Make a ?deep? copy", "if", "stacked", ":", "df_result", "=", "df_zeroed", ".", "apply", "(", "lambda", "x", ":", "fill_others", "(", "x", ".", "values", ".", "tolist", "(", ")", ")", ",", "axis", "=", "1", ")", "else", ":", "df_result", "=", "df_zeroed", "sum_row", "=", "df_result", "[", "df", ".", "columns", "]", ".", "sum", "(", ")", "# Sum Columns", "return", "pd", ".", "DataFrame", "(", "data", "=", "sum_row", ")", ".", "T", "# Transpose into row dataframe and return", "# Helper function to return the right most cells in 2D array", "def", "keeprightmoststate", "(", "df", ")", ":", "\"\"\"\n Incoming matrix columns represents items in workflow states\n States progress from left to right.\n We what to zero out items, other than right most value.\n :param df:\n :return: pandas dataframe row with sum of column items\n \"\"\"", "def", "last_number", "(", "lst", ")", ":", "if", "all", "(", "map", "(", "lambda", "x", ":", "x", "==", "0", ",", "lst", ")", ")", ":", "return", "0", "elif", "lst", "[", "-", "1", "]", "!=", "0", ":", "return", "len", "(", "lst", ")", "-", "1", "else", ":", "return", "last_number", "(", "lst", "[", ":", "-", "1", "]", ")", "def", "fill_others", "(", "lst", ")", ":", "new_lst", "=", "[", "0", "]", "*", "len", "(", "lst", ")", "new_lst", "[", "last_number", "(", "lst", ")", "]", "=", "lst", "[", "last_number", "(", "lst", ")", "]", "return", "new_lst", "df_zeroed", "=", "df", ".", "fillna", "(", "value", "=", "0", ")", "# ,inplace = True Get rid of non numeric items. Make a ?deep? copy", "df_result", "=", "df_zeroed", ".", "apply", "(", "lambda", "x", ":", "fill_others", "(", "x", ".", "values", ".", "tolist", "(", ")", ")", ",", "axis", "=", "1", ")", "return", "df_result", "# Define helper function", "def", "hide_greater_than_date", "(", "cell", ",", "adate", ")", ":", "\"\"\" Helper function to compare date values in cells\n \"\"\"", "result", "=", "False", "try", ":", "celldatetime", "=", "datetime", ".", "date", "(", "cell", ".", "year", ",", "cell", ".", "month", ",", "cell", ".", "day", ")", "except", ":", "return", "True", "if", "celldatetime", ">", "adate", ":", "return", "True", "return", "False", "# We have a date value in cell and it is less than or equal to input date", "# Helper function", "def", "appendDFToCSV", "(", "df", ",", "csvFilePath", ",", "sep", "=", "\"\\t\"", ",", "date_format", "=", "'%Y-%m-%d'", ",", "encoding", "=", "'utf-8'", ")", ":", "import", "os", "if", "not", "os", ".", "path", ".", "isfile", "(", "csvFilePath", ")", ":", "df", ".", "to_csv", "(", "csvFilePath", ",", "mode", "=", "'a'", ",", "index", "=", "False", ",", "sep", "=", "sep", ",", "date_format", "=", "date_format", ",", "encoding", "=", "encoding", ")", "elif", "len", "(", "df", ".", "columns", ")", "!=", "len", "(", "pd", ".", "read_csv", "(", "csvFilePath", ",", "nrows", "=", "1", ",", "sep", "=", "sep", ")", ".", "columns", ")", ":", "raise", "Exception", "(", "\"Columns do not match!! Dataframe has \"", "+", "str", "(", "len", "(", "df", ".", "columns", ")", ")", "+", "\" columns. CSV file has \"", "+", "str", "(", "len", "(", "pd", ".", "read_csv", "(", "csvFilePath", ",", "nrows", "=", "1", ",", "sep", "=", "sep", ")", ".", "columns", ")", ")", "+", "\" columns.\"", ")", "elif", "not", "(", "df", ".", "columns", "==", "pd", ".", "read_csv", "(", "csvFilePath", ",", "nrows", "=", "1", ",", "sep", "=", "sep", ")", ".", "columns", ")", ".", "all", "(", ")", ":", "raise", "Exception", "(", "\"Columns and column order of dataframe and csv file do not match!!\"", ")", "else", ":", "df", ".", "to_csv", "(", "csvFilePath", ",", "mode", "=", "'a'", ",", "index", "=", "False", ",", "sep", "=", "sep", ",", "header", "=", "False", ",", "date_format", "=", "date_format", ",", "encoding", "=", "encoding", ")", "#print(pointscolumn)", "# List of all state change columns that may have date value in them", "cycle_names", "=", "[", "s", "[", "'name'", "]", "for", "s", "in", "self", ".", "settings", "[", "'cycle'", "]", "]", "# Create list of columns that we want to return in our results dataFrame", "slice_columns", "=", "list", "(", "self", ".", "settings", "[", "'none_sized_statuses'", "]", ")", "# Make a COPY of the list so that we dont modify the reference.", "if", "pointscolumn", ":", "for", "size_state", "in", "self", ".", "settings", "[", "'sized_statuses'", "]", ":", "# states_to_size:", "sizedStateName", "=", "size_state", "+", "'Sized'", "slice_columns", ".", "append", "(", "sizedStateName", ")", "# Check that it works if we use all columns as sized.", "slice_columns", "=", "[", "]", "for", "size_state", "in", "cycle_names", ":", "sizedStateName", "=", "size_state", "+", "'Sized'", "slice_columns", ".", "append", "(", "sizedStateName", ")", "else", ":", "slice_columns", "=", "cycle_names", "# Build a dataframe of just the \"date\" columns", "df", "=", "cycle_data", "[", "cycle_names", "]", ".", "copy", "(", ")", "# Strip out times from all dates", "df", "=", "pd", ".", "DataFrame", "(", "np", ".", "array", "(", "df", ".", "values", ",", "dtype", "=", "'<M8[ns]'", ")", ".", "astype", "(", "'<M8[D]'", ")", ".", "astype", "(", "'<M8[ns]'", ")", ",", "columns", "=", "df", ".", "columns", ",", "index", "=", "df", ".", "index", ")", "# No history provided this thus we return dataframe with just column headers.", "if", "size_history", "is", "None", ":", "return", "df", "# Get a list of dates that a issue changed state", "state_changes_on_dates_set", "=", "set", "(", ")", "for", "state", "in", "cycle_names", ":", "state_changes_on_dates_set", "=", "state_changes_on_dates_set", ".", "union", "(", "set", "(", "df", "[", "state", "]", ")", ")", "# How many unique days did a issue stage state", "# Remove non timestamp vlaues and sort the list", "state_changes_on_dates", "=", "filter", "(", "lambda", "x", ":", "type", "(", "x", ".", "date", "(", ")", ")", "==", "datetime", ".", "date", ",", "sorted", "(", "list", "(", "state_changes_on_dates_set", ")", ")", ")", "# Replace missing NaT values (happens if a status is skipped) with the subsequent timestamp", "df", "=", "df", ".", "fillna", "(", "method", "=", "'bfill'", ",", "axis", "=", "1", ")", "if", "pointscolumn", ":", "storypoints", "=", "cycle_data", "[", "pointscolumn", "]", "# As at today", "ids", "=", "cycle_data", "[", "'key'", "]", "# create blank results dataframe", "df_results", "=", "pd", ".", "DataFrame", "(", ")", "# For each date on which we had a issue state change we want to count and sum the totals for each of the given states", "# 'Open','Analysis','Backlog','In Process','Done','Withdrawn'", "timenowstr", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "'-run-%Y-%m-%d_%H-%M-%S'", ")", "for", "date_index", ",", "statechangedate", "in", "enumerate", "(", "state_changes_on_dates", ")", ":", "if", "date_index", "%", "10", "==", "0", ":", "# Print out Progress every tenth", "pass", "#print(\"CFD state change {} of {} \".format(date_index,len(state_changes_on_dates)))", "if", "type", "(", "statechangedate", ".", "date", "(", ")", ")", "==", "datetime", ".", "date", ":", "# filterdate.year,filterdate.month,filterdate.day", "filterdate", "=", "datetime", ".", "date", "(", "statechangedate", ".", "year", ",", "statechangedate", ".", "month", ",", "statechangedate", ".", "day", ")", "# statechangedate.datetime()", "# Apply function to each cell and only make it visible if issue was in state on or after the filter date", "df_filtered", "=", "df", ".", "applymap", "(", "lambda", "x", ":", "0", "if", "hide_greater_than_date", "(", "x", ",", "filterdate", ")", "else", "1", ")", "if", "stacked", ":", "df_filtered", "=", "keeprightmoststate", "(", "df_filtered", ")", "if", "pointscolumn", "and", "(", "size_history", "is", "not", "None", ")", ":", "# For debug", "#if filterdate.isoformat() == '2016-11-22':", "# size_history.loc[filterdate.isoformat()].to_csv(\"debug-size-history.csv\")", "storypoints_series_on", "=", "size_history", ".", "loc", "[", "filterdate", ".", "isoformat", "(", ")", "]", ".", "T", "df_size_on_day", "=", "pd", ".", "Series", ".", "to_frame", "(", "storypoints_series_on", ")", "df_size_on_day", ".", "columns", "=", "[", "pointscolumn", "]", "# Make sure get size data in the same sequence as ids.", "left", "=", "pd", ".", "Series", ".", "to_frame", "(", "ids", ")", "right", "=", "df_size_on_day", "result", "=", "left", ".", "join", "(", "right", ",", "on", "=", "[", "'key'", "]", ")", "# http://pandas.pydata.org/pandas-docs/stable/merging.html\\", "df_countable", "=", "pd", ".", "concat", "(", "[", "result", ",", "df_filtered", "]", ",", "axis", "=", "1", ")", "# for debuging and analytics append the days state to file", "df_countable", "[", "'date'", "]", "=", "filterdate", ".", "isoformat", "(", ")", "if", "stacked", ":", "file_name", "=", "\"daily-cfd-stacked-run-at\"", "+", "timenowstr", "+", "\".csv\"", "else", ":", "file_name", "=", "\"daily-cfd-run-at\"", "+", "timenowstr", "+", "\".csv\"", "appendDFToCSV", "(", "df_countable", ",", "file_name", ")", "else", ":", "df_countable", "=", "df_filtered", "# Because we size issues with Story Points we need to add some additional columns", "# for each state based on size not just count", "if", "pointscolumn", ":", "for", "size_state", "in", "self", ".", "settings", "[", "'sized_statuses'", "]", ":", "#states_to_size:", "sizedStateName", "=", "size_state", "+", "'Sized'", "df_countable", "[", "sizedStateName", "]", "=", "df_countable", ".", "apply", "(", "lambda", "row", ":", "(", "row", "[", "pointscolumn", "]", "*", "row", "[", "size_state", "]", ")", ",", "axis", "=", "1", ")", "# For debugging write dataframe to sheet for current day.", "#file_name=\"countable-cfd-for-day-\"+ filterdate.isoformat()+timenowstr+\".csv\"", "#df_countable.to_csv(file_name, sep='\\t', encoding='utf-8', quoting=csv.QUOTE_ALL)", "df_slice", "=", "df_countable", ".", "loc", "[", ":", ",", "slice_columns", "]", ".", "copy", "(", ")", "df_sub_sum", "=", "cumulativeColumnStates", "(", "df_slice", ",", "stacked", ")", "final_table", "=", "df_sub_sum", ".", "rename", "(", "index", "=", "{", "0", ":", "filterdate", "}", ")", "# append to results", "df_results", "=", "df_results", ".", "append", "(", "final_table", ")", "df_results", ".", "sort_index", "(", "inplace", "=", "True", ")", "df", "=", "df_results", "# Count number of times each date occurs, preserving column order", "#df = pd.concat({col: df[col].value_counts() for col in df}, axis=1)[cycle_names]", "# Fill missing dates with 0 and run a cumulative sum", "#df = df.fillna(0).cumsum(axis=0)", "# Reindex to make sure we have all dates", "start", ",", "end", "=", "df", ".", "index", ".", "min", "(", ")", ",", "df", ".", "index", ".", "max", "(", ")", "try", ":", "# If we have no change history we will not have any data in the df and will get a ValueError on reindex", "df", "=", "df", ".", "reindex", "(", "pd", ".", "date_range", "(", "start", ",", "end", ",", "freq", "=", "'D'", ")", ",", "method", "=", "'ffill'", ")", "except", "ValueError", ":", "pass", "return", "df" ]
Return the data to build a cumulative flow diagram: a DataFrame, indexed by day, with columns containing cumulative counts for each of the items in the configured cycle. In addition, a column called `cycle_time` contains the approximate average cycle time of that day based on the first "accepted" status and the first "complete" status. If stacked = True then return dataframe suitable for plotting as stacked area chart else return for platting as non-staked or line chart.
[ "Return", "the", "data", "to", "build", "a", "cumulative", "flow", "diagram", ":", "a", "DataFrame", "indexed", "by", "day", "with", "columns", "containing", "cumulative", "counts", "for", "each", "of", "the", "items", "in", "the", "configured", "cycle", "." ]
train
https://github.com/rnwolf/jira-metrics-extract/blob/56443211b3e1200f3def79173a21e0232332ae17/jira_metrics_extract/cycletime.py#L410-L641
rnwolf/jira-metrics-extract
jira_metrics_extract/cycletime.py
CycleTimeQueries.histogram
def histogram(self, cycle_data, bins=10): """Return histogram data for the cycle times in `cycle_data`. Returns a dictionary with keys `bin_values` and `bin_edges` of numpy arrays """ values, edges = np.histogram(cycle_data['cycle_time'].astype('timedelta64[D]').dropna(), bins=bins) index = [] for i, v in enumerate(edges): if i == 0: continue index.append("%.01f to %.01f" % (edges[i - 1], edges[i],)) return pd.Series(values, name="Items", index=index)
python
def histogram(self, cycle_data, bins=10): """Return histogram data for the cycle times in `cycle_data`. Returns a dictionary with keys `bin_values` and `bin_edges` of numpy arrays """ values, edges = np.histogram(cycle_data['cycle_time'].astype('timedelta64[D]').dropna(), bins=bins) index = [] for i, v in enumerate(edges): if i == 0: continue index.append("%.01f to %.01f" % (edges[i - 1], edges[i],)) return pd.Series(values, name="Items", index=index)
[ "def", "histogram", "(", "self", ",", "cycle_data", ",", "bins", "=", "10", ")", ":", "values", ",", "edges", "=", "np", ".", "histogram", "(", "cycle_data", "[", "'cycle_time'", "]", ".", "astype", "(", "'timedelta64[D]'", ")", ".", "dropna", "(", ")", ",", "bins", "=", "bins", ")", "index", "=", "[", "]", "for", "i", ",", "v", "in", "enumerate", "(", "edges", ")", ":", "if", "i", "==", "0", ":", "continue", "index", ".", "append", "(", "\"%.01f to %.01f\"", "%", "(", "edges", "[", "i", "-", "1", "]", ",", "edges", "[", "i", "]", ",", ")", ")", "return", "pd", ".", "Series", "(", "values", ",", "name", "=", "\"Items\"", ",", "index", "=", "index", ")" ]
Return histogram data for the cycle times in `cycle_data`. Returns a dictionary with keys `bin_values` and `bin_edges` of numpy arrays
[ "Return", "histogram", "data", "for", "the", "cycle", "times", "in", "cycle_data", ".", "Returns", "a", "dictionary", "with", "keys", "bin_values", "and", "bin_edges", "of", "numpy", "arrays" ]
train
https://github.com/rnwolf/jira-metrics-extract/blob/56443211b3e1200f3def79173a21e0232332ae17/jira_metrics_extract/cycletime.py#L644-L656
rnwolf/jira-metrics-extract
jira_metrics_extract/cycletime.py
CycleTimeQueries.throughput_data
def throughput_data(self, cycle_data, frequency='1D',pointscolumn= None): """Return a data frame with columns `completed_timestamp` of the given frequency, either `count`, where count is the number of items 'sum', where sum is the sum of value specified by pointscolumn. Expected to be 'StoryPoints' completed at that timestamp (e.g. daily). """ if len(cycle_data)<1: return None # Note completed items yet, return None if pointscolumn: return cycle_data[['completed_timestamp', pointscolumn]] \ .rename(columns={pointscolumn: 'sum'}) \ .groupby('completed_timestamp').sum() \ .resample(frequency).sum() \ .fillna(0) else: return cycle_data[['completed_timestamp', 'key']] \ .rename(columns={'key': 'count'}) \ .groupby('completed_timestamp').count() \ .resample(frequency).sum() \ .fillna(0)
python
def throughput_data(self, cycle_data, frequency='1D',pointscolumn= None): """Return a data frame with columns `completed_timestamp` of the given frequency, either `count`, where count is the number of items 'sum', where sum is the sum of value specified by pointscolumn. Expected to be 'StoryPoints' completed at that timestamp (e.g. daily). """ if len(cycle_data)<1: return None # Note completed items yet, return None if pointscolumn: return cycle_data[['completed_timestamp', pointscolumn]] \ .rename(columns={pointscolumn: 'sum'}) \ .groupby('completed_timestamp').sum() \ .resample(frequency).sum() \ .fillna(0) else: return cycle_data[['completed_timestamp', 'key']] \ .rename(columns={'key': 'count'}) \ .groupby('completed_timestamp').count() \ .resample(frequency).sum() \ .fillna(0)
[ "def", "throughput_data", "(", "self", ",", "cycle_data", ",", "frequency", "=", "'1D'", ",", "pointscolumn", "=", "None", ")", ":", "if", "len", "(", "cycle_data", ")", "<", "1", ":", "return", "None", "# Note completed items yet, return None", "if", "pointscolumn", ":", "return", "cycle_data", "[", "[", "'completed_timestamp'", ",", "pointscolumn", "]", "]", ".", "rename", "(", "columns", "=", "{", "pointscolumn", ":", "'sum'", "}", ")", ".", "groupby", "(", "'completed_timestamp'", ")", ".", "sum", "(", ")", ".", "resample", "(", "frequency", ")", ".", "sum", "(", ")", ".", "fillna", "(", "0", ")", "else", ":", "return", "cycle_data", "[", "[", "'completed_timestamp'", ",", "'key'", "]", "]", ".", "rename", "(", "columns", "=", "{", "'key'", ":", "'count'", "}", ")", ".", "groupby", "(", "'completed_timestamp'", ")", ".", "count", "(", ")", ".", "resample", "(", "frequency", ")", ".", "sum", "(", ")", ".", "fillna", "(", "0", ")" ]
Return a data frame with columns `completed_timestamp` of the given frequency, either `count`, where count is the number of items 'sum', where sum is the sum of value specified by pointscolumn. Expected to be 'StoryPoints' completed at that timestamp (e.g. daily).
[ "Return", "a", "data", "frame", "with", "columns", "completed_timestamp", "of", "the", "given", "frequency", "either", "count", "where", "count", "is", "the", "number", "of", "items", "sum", "where", "sum", "is", "the", "sum", "of", "value", "specified", "by", "pointscolumn", ".", "Expected", "to", "be", "StoryPoints", "completed", "at", "that", "timestamp", "(", "e", ".", "g", ".", "daily", ")", "." ]
train
https://github.com/rnwolf/jira-metrics-extract/blob/56443211b3e1200f3def79173a21e0232332ae17/jira_metrics_extract/cycletime.py#L658-L679
rnwolf/jira-metrics-extract
jira_metrics_extract/cycletime.py
CycleTimeQueries.scatterplot
def scatterplot(self, cycle_data): """Return scatterplot data for the cycle times in `cycle_data`. Returns a data frame containing only those items in `cycle_data` where values are set for `completed_timestamp` and `cycle_time`, and with those two columns as the first two, both normalised to whole days, and with `completed_timestamp` renamed to `completed_date`. """ columns = list(cycle_data.columns) columns.remove('cycle_time') columns.remove('completed_timestamp') columns = ['completed_timestamp', 'cycle_time'] + columns data = ( cycle_data[columns] .dropna(subset=['cycle_time', 'completed_timestamp']) .rename(columns={'completed_timestamp': 'completed_date'}) ) data['cycle_time'] = data['cycle_time'].astype('timedelta64[D]') data['completed_date'] = data['completed_date'].map(pd.Timestamp.date) return data
python
def scatterplot(self, cycle_data): """Return scatterplot data for the cycle times in `cycle_data`. Returns a data frame containing only those items in `cycle_data` where values are set for `completed_timestamp` and `cycle_time`, and with those two columns as the first two, both normalised to whole days, and with `completed_timestamp` renamed to `completed_date`. """ columns = list(cycle_data.columns) columns.remove('cycle_time') columns.remove('completed_timestamp') columns = ['completed_timestamp', 'cycle_time'] + columns data = ( cycle_data[columns] .dropna(subset=['cycle_time', 'completed_timestamp']) .rename(columns={'completed_timestamp': 'completed_date'}) ) data['cycle_time'] = data['cycle_time'].astype('timedelta64[D]') data['completed_date'] = data['completed_date'].map(pd.Timestamp.date) return data
[ "def", "scatterplot", "(", "self", ",", "cycle_data", ")", ":", "columns", "=", "list", "(", "cycle_data", ".", "columns", ")", "columns", ".", "remove", "(", "'cycle_time'", ")", "columns", ".", "remove", "(", "'completed_timestamp'", ")", "columns", "=", "[", "'completed_timestamp'", ",", "'cycle_time'", "]", "+", "columns", "data", "=", "(", "cycle_data", "[", "columns", "]", ".", "dropna", "(", "subset", "=", "[", "'cycle_time'", ",", "'completed_timestamp'", "]", ")", ".", "rename", "(", "columns", "=", "{", "'completed_timestamp'", ":", "'completed_date'", "}", ")", ")", "data", "[", "'cycle_time'", "]", "=", "data", "[", "'cycle_time'", "]", ".", "astype", "(", "'timedelta64[D]'", ")", "data", "[", "'completed_date'", "]", "=", "data", "[", "'completed_date'", "]", ".", "map", "(", "pd", ".", "Timestamp", ".", "date", ")", "return", "data" ]
Return scatterplot data for the cycle times in `cycle_data`. Returns a data frame containing only those items in `cycle_data` where values are set for `completed_timestamp` and `cycle_time`, and with those two columns as the first two, both normalised to whole days, and with `completed_timestamp` renamed to `completed_date`.
[ "Return", "scatterplot", "data", "for", "the", "cycle", "times", "in", "cycle_data", ".", "Returns", "a", "data", "frame", "containing", "only", "those", "items", "in", "cycle_data", "where", "values", "are", "set", "for", "completed_timestamp", "and", "cycle_time", "and", "with", "those", "two", "columns", "as", "the", "first", "two", "both", "normalised", "to", "whole", "days", "and", "with", "completed_timestamp", "renamed", "to", "completed_date", "." ]
train
https://github.com/rnwolf/jira-metrics-extract/blob/56443211b3e1200f3def79173a21e0232332ae17/jira_metrics_extract/cycletime.py#L681-L703
deep-compute/logagg
logagg/nsqsender.py
NSQSender._is_ready
def _is_ready(self, topic_name): ''' Is NSQ running and have space to receive messages? ''' url = 'http://%s/stats?format=json&topic=%s' % (self.nsqd_http_address, topic_name) #Cheacking for ephmeral channels if '#' in topic_name: topic_name, tag =topic_name.split("#", 1) try: data = self.session.get(url).json() ''' data = {u'start_time': 1516164866, u'version': u'1.0.0-compat', \ u'health': u'OK', u'topics': [{u'message_count': 19019, \ u'paused': False, u'topic_name': u'test_topic', u'channels': [], \ u'depth': 19019, u'backend_depth': 9019, u'e2e_processing_latency': {u'count': 0, \ u'percentiles': None}}]} ''' topics = data.get('topics', []) topics = [t for t in topics if t['topic_name'] == topic_name] if not topics: raise Exception('topic_missing_at_nsq') topic = topics[0] depth = topic['depth'] depth += sum(c.get('depth', 0) for c in topic['channels']) self.log.debug('nsq_depth_check', topic=topic_name, depth=depth, max_depth=self.nsq_max_depth) if depth < self.nsq_max_depth: return else: raise Exception('nsq_is_full_waiting_to_clear') except: raise
python
def _is_ready(self, topic_name): ''' Is NSQ running and have space to receive messages? ''' url = 'http://%s/stats?format=json&topic=%s' % (self.nsqd_http_address, topic_name) #Cheacking for ephmeral channels if '#' in topic_name: topic_name, tag =topic_name.split("#", 1) try: data = self.session.get(url).json() ''' data = {u'start_time': 1516164866, u'version': u'1.0.0-compat', \ u'health': u'OK', u'topics': [{u'message_count': 19019, \ u'paused': False, u'topic_name': u'test_topic', u'channels': [], \ u'depth': 19019, u'backend_depth': 9019, u'e2e_processing_latency': {u'count': 0, \ u'percentiles': None}}]} ''' topics = data.get('topics', []) topics = [t for t in topics if t['topic_name'] == topic_name] if not topics: raise Exception('topic_missing_at_nsq') topic = topics[0] depth = topic['depth'] depth += sum(c.get('depth', 0) for c in topic['channels']) self.log.debug('nsq_depth_check', topic=topic_name, depth=depth, max_depth=self.nsq_max_depth) if depth < self.nsq_max_depth: return else: raise Exception('nsq_is_full_waiting_to_clear') except: raise
[ "def", "_is_ready", "(", "self", ",", "topic_name", ")", ":", "url", "=", "'http://%s/stats?format=json&topic=%s'", "%", "(", "self", ".", "nsqd_http_address", ",", "topic_name", ")", "#Cheacking for ephmeral channels", "if", "'#'", "in", "topic_name", ":", "topic_name", ",", "tag", "=", "topic_name", ".", "split", "(", "\"#\"", ",", "1", ")", "try", ":", "data", "=", "self", ".", "session", ".", "get", "(", "url", ")", ".", "json", "(", ")", "'''\n data = {u'start_time': 1516164866, u'version': u'1.0.0-compat', \\\n u'health': u'OK', u'topics': [{u'message_count': 19019, \\\n u'paused': False, u'topic_name': u'test_topic', u'channels': [], \\\n u'depth': 19019, u'backend_depth': 9019, u'e2e_processing_latency': {u'count': 0, \\\n u'percentiles': None}}]}\n '''", "topics", "=", "data", ".", "get", "(", "'topics'", ",", "[", "]", ")", "topics", "=", "[", "t", "for", "t", "in", "topics", "if", "t", "[", "'topic_name'", "]", "==", "topic_name", "]", "if", "not", "topics", ":", "raise", "Exception", "(", "'topic_missing_at_nsq'", ")", "topic", "=", "topics", "[", "0", "]", "depth", "=", "topic", "[", "'depth'", "]", "depth", "+=", "sum", "(", "c", ".", "get", "(", "'depth'", ",", "0", ")", "for", "c", "in", "topic", "[", "'channels'", "]", ")", "self", ".", "log", ".", "debug", "(", "'nsq_depth_check'", ",", "topic", "=", "topic_name", ",", "depth", "=", "depth", ",", "max_depth", "=", "self", ".", "nsq_max_depth", ")", "if", "depth", "<", "self", ".", "nsq_max_depth", ":", "return", "else", ":", "raise", "Exception", "(", "'nsq_is_full_waiting_to_clear'", ")", "except", ":", "raise" ]
Is NSQ running and have space to receive messages?
[ "Is", "NSQ", "running", "and", "have", "space", "to", "receive", "messages?" ]
train
https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/nsqsender.py#L39-L74
cimm-kzn/CGRtools
CGRtools/containers/query.py
QueryContainer._matcher
def _matcher(self, other): """ QueryContainer < MoleculeContainer QueryContainer < QueryContainer[more general] QueryContainer < QueryCGRContainer[more general] """ if isinstance(other, MoleculeContainer): return GraphMatcher(other, self, lambda x, y: y == x, lambda x, y: y == x) elif isinstance(other, (QueryContainer, QueryCGRContainer)): return GraphMatcher(other, self, lambda x, y: x == y, lambda x, y: x == y) raise TypeError('only query-molecule, query-query or query-cgr_query possible')
python
def _matcher(self, other): """ QueryContainer < MoleculeContainer QueryContainer < QueryContainer[more general] QueryContainer < QueryCGRContainer[more general] """ if isinstance(other, MoleculeContainer): return GraphMatcher(other, self, lambda x, y: y == x, lambda x, y: y == x) elif isinstance(other, (QueryContainer, QueryCGRContainer)): return GraphMatcher(other, self, lambda x, y: x == y, lambda x, y: x == y) raise TypeError('only query-molecule, query-query or query-cgr_query possible')
[ "def", "_matcher", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "MoleculeContainer", ")", ":", "return", "GraphMatcher", "(", "other", ",", "self", ",", "lambda", "x", ",", "y", ":", "y", "==", "x", ",", "lambda", "x", ",", "y", ":", "y", "==", "x", ")", "elif", "isinstance", "(", "other", ",", "(", "QueryContainer", ",", "QueryCGRContainer", ")", ")", ":", "return", "GraphMatcher", "(", "other", ",", "self", ",", "lambda", "x", ",", "y", ":", "x", "==", "y", ",", "lambda", "x", ",", "y", ":", "x", "==", "y", ")", "raise", "TypeError", "(", "'only query-molecule, query-query or query-cgr_query possible'", ")" ]
QueryContainer < MoleculeContainer QueryContainer < QueryContainer[more general] QueryContainer < QueryCGRContainer[more general]
[ "QueryContainer", "<", "MoleculeContainer", "QueryContainer", "<", "QueryContainer", "[", "more", "general", "]", "QueryContainer", "<", "QueryCGRContainer", "[", "more", "general", "]" ]
train
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/containers/query.py#L31-L41
cimm-kzn/CGRtools
CGRtools/containers/query.py
QueryCGRContainer._matcher
def _matcher(self, other): """ QueryCGRContainer < CGRContainer QueryContainer < QueryCGRContainer[more general] """ if isinstance(other, CGRContainer): return GraphMatcher(other, self, lambda x, y: y == x, lambda x, y: y == x) elif isinstance(other, QueryCGRContainer): return GraphMatcher(other, self, lambda x, y: x == y, lambda x, y: x == y) raise TypeError('only cgr_query-cgr or cgr_query-cgr_query possible')
python
def _matcher(self, other): """ QueryCGRContainer < CGRContainer QueryContainer < QueryCGRContainer[more general] """ if isinstance(other, CGRContainer): return GraphMatcher(other, self, lambda x, y: y == x, lambda x, y: y == x) elif isinstance(other, QueryCGRContainer): return GraphMatcher(other, self, lambda x, y: x == y, lambda x, y: x == y) raise TypeError('only cgr_query-cgr or cgr_query-cgr_query possible')
[ "def", "_matcher", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "CGRContainer", ")", ":", "return", "GraphMatcher", "(", "other", ",", "self", ",", "lambda", "x", ",", "y", ":", "y", "==", "x", ",", "lambda", "x", ",", "y", ":", "y", "==", "x", ")", "elif", "isinstance", "(", "other", ",", "QueryCGRContainer", ")", ":", "return", "GraphMatcher", "(", "other", ",", "self", ",", "lambda", "x", ",", "y", ":", "x", "==", "y", ",", "lambda", "x", ",", "y", ":", "x", "==", "y", ")", "raise", "TypeError", "(", "'only cgr_query-cgr or cgr_query-cgr_query possible'", ")" ]
QueryCGRContainer < CGRContainer QueryContainer < QueryCGRContainer[more general]
[ "QueryCGRContainer", "<", "CGRContainer", "QueryContainer", "<", "QueryCGRContainer", "[", "more", "general", "]" ]
train
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/containers/query.py#L48-L57
cimm-kzn/CGRtools
CGRtools/algorithms/calculate2d.py
Calculate2D.calculate2d
def calculate2d(self, force=False, scale=1): """ recalculate 2d coordinates. currently rings can be calculated badly. :param scale: rescale calculated positions. :param force: ignore existing coordinates of atoms """ dist = {} # length forces for n, m_bond in self._adj.items(): dist[n] = {} for m in m_bond: dist[n][m] = .825 # angle forces for n, m_bond in self._adj.items(): if len(m_bond) == 2: # single-single or single-double bonds has angle = 120, other 180 (m1, b1), (m2, b2) = m_bond.items() dist[m1][m2] = dist[m2][m1] = 1.43 if b1.order + b2.order in (2, 3) else 1.7 # +.05 elif len(m_bond) == 3: m1, m2, m3 = m_bond dist[m1][m2] = dist[m1][m3] = dist[m2][m3] = dist[m3][m2] = dist[m2][m1] = dist[m3][m1] = 1.43 elif len(m_bond) == 4: # 1 # # 2 X 4 # # 3 m1, m2, m3, m4 = m_bond dist[m1][m2] = dist[m1][m4] = dist[m2][m1] = dist[m2][m3] = 1.17 dist[m3][m2] = dist[m3][m4] = dist[m4][m1] = dist[m4][m3] = 1.17 dist[m1][m3] = dist[m3][m1] = dist[m2][m4] = dist[m4][m2] = 1.7 # +.05 # cycle forces for r in self.sssr: if len(r) == 6: # 6 # # 1 5 # # 2 4 # # 3 m1, m2, m3, m4, m5, m6 = r dist[m1][m4] = dist[m4][m1] = dist[m2][m5] = dist[m5][m2] = dist[m3][m6] = dist[m6][m3] = 1.7 # +.05 if force: pos = None else: pos = {n: (atom.x or uniform(0, .01), atom.y or uniform(0, .01)) for n, atom in self.atoms()} for n, xy in kamada_kawai_layout(self, dist=dict(dist), pos=pos, scale=scale).items(): atom = self._node[n] atom.x, atom.y = xy self.flush_cache()
python
def calculate2d(self, force=False, scale=1): """ recalculate 2d coordinates. currently rings can be calculated badly. :param scale: rescale calculated positions. :param force: ignore existing coordinates of atoms """ dist = {} # length forces for n, m_bond in self._adj.items(): dist[n] = {} for m in m_bond: dist[n][m] = .825 # angle forces for n, m_bond in self._adj.items(): if len(m_bond) == 2: # single-single or single-double bonds has angle = 120, other 180 (m1, b1), (m2, b2) = m_bond.items() dist[m1][m2] = dist[m2][m1] = 1.43 if b1.order + b2.order in (2, 3) else 1.7 # +.05 elif len(m_bond) == 3: m1, m2, m3 = m_bond dist[m1][m2] = dist[m1][m3] = dist[m2][m3] = dist[m3][m2] = dist[m2][m1] = dist[m3][m1] = 1.43 elif len(m_bond) == 4: # 1 # # 2 X 4 # # 3 m1, m2, m3, m4 = m_bond dist[m1][m2] = dist[m1][m4] = dist[m2][m1] = dist[m2][m3] = 1.17 dist[m3][m2] = dist[m3][m4] = dist[m4][m1] = dist[m4][m3] = 1.17 dist[m1][m3] = dist[m3][m1] = dist[m2][m4] = dist[m4][m2] = 1.7 # +.05 # cycle forces for r in self.sssr: if len(r) == 6: # 6 # # 1 5 # # 2 4 # # 3 m1, m2, m3, m4, m5, m6 = r dist[m1][m4] = dist[m4][m1] = dist[m2][m5] = dist[m5][m2] = dist[m3][m6] = dist[m6][m3] = 1.7 # +.05 if force: pos = None else: pos = {n: (atom.x or uniform(0, .01), atom.y or uniform(0, .01)) for n, atom in self.atoms()} for n, xy in kamada_kawai_layout(self, dist=dict(dist), pos=pos, scale=scale).items(): atom = self._node[n] atom.x, atom.y = xy self.flush_cache()
[ "def", "calculate2d", "(", "self", ",", "force", "=", "False", ",", "scale", "=", "1", ")", ":", "dist", "=", "{", "}", "# length forces", "for", "n", ",", "m_bond", "in", "self", ".", "_adj", ".", "items", "(", ")", ":", "dist", "[", "n", "]", "=", "{", "}", "for", "m", "in", "m_bond", ":", "dist", "[", "n", "]", "[", "m", "]", "=", ".825", "# angle forces", "for", "n", ",", "m_bond", "in", "self", ".", "_adj", ".", "items", "(", ")", ":", "if", "len", "(", "m_bond", ")", "==", "2", ":", "# single-single or single-double bonds has angle = 120, other 180", "(", "m1", ",", "b1", ")", ",", "(", "m2", ",", "b2", ")", "=", "m_bond", ".", "items", "(", ")", "dist", "[", "m1", "]", "[", "m2", "]", "=", "dist", "[", "m2", "]", "[", "m1", "]", "=", "1.43", "if", "b1", ".", "order", "+", "b2", ".", "order", "in", "(", "2", ",", "3", ")", "else", "1.7", "# +.05", "elif", "len", "(", "m_bond", ")", "==", "3", ":", "m1", ",", "m2", ",", "m3", "=", "m_bond", "dist", "[", "m1", "]", "[", "m2", "]", "=", "dist", "[", "m1", "]", "[", "m3", "]", "=", "dist", "[", "m2", "]", "[", "m3", "]", "=", "dist", "[", "m3", "]", "[", "m2", "]", "=", "dist", "[", "m2", "]", "[", "m1", "]", "=", "dist", "[", "m3", "]", "[", "m1", "]", "=", "1.43", "elif", "len", "(", "m_bond", ")", "==", "4", ":", "# 1", "#", "# 2 X 4", "#", "# 3", "m1", ",", "m2", ",", "m3", ",", "m4", "=", "m_bond", "dist", "[", "m1", "]", "[", "m2", "]", "=", "dist", "[", "m1", "]", "[", "m4", "]", "=", "dist", "[", "m2", "]", "[", "m1", "]", "=", "dist", "[", "m2", "]", "[", "m3", "]", "=", "1.17", "dist", "[", "m3", "]", "[", "m2", "]", "=", "dist", "[", "m3", "]", "[", "m4", "]", "=", "dist", "[", "m4", "]", "[", "m1", "]", "=", "dist", "[", "m4", "]", "[", "m3", "]", "=", "1.17", "dist", "[", "m1", "]", "[", "m3", "]", "=", "dist", "[", "m3", "]", "[", "m1", "]", "=", "dist", "[", "m2", "]", "[", "m4", "]", "=", "dist", "[", "m4", "]", "[", "m2", "]", "=", "1.7", "# +.05", "# cycle forces", "for", "r", "in", "self", ".", "sssr", ":", "if", "len", "(", "r", ")", "==", "6", ":", "# 6", "#", "# 1 5", "#", "# 2 4", "#", "# 3", "m1", ",", "m2", ",", "m3", ",", "m4", ",", "m5", ",", "m6", "=", "r", "dist", "[", "m1", "]", "[", "m4", "]", "=", "dist", "[", "m4", "]", "[", "m1", "]", "=", "dist", "[", "m2", "]", "[", "m5", "]", "=", "dist", "[", "m5", "]", "[", "m2", "]", "=", "dist", "[", "m3", "]", "[", "m6", "]", "=", "dist", "[", "m6", "]", "[", "m3", "]", "=", "1.7", "# +.05", "if", "force", ":", "pos", "=", "None", "else", ":", "pos", "=", "{", "n", ":", "(", "atom", ".", "x", "or", "uniform", "(", "0", ",", ".01", ")", ",", "atom", ".", "y", "or", "uniform", "(", "0", ",", ".01", ")", ")", "for", "n", ",", "atom", "in", "self", ".", "atoms", "(", ")", "}", "for", "n", ",", "xy", "in", "kamada_kawai_layout", "(", "self", ",", "dist", "=", "dict", "(", "dist", ")", ",", "pos", "=", "pos", ",", "scale", "=", "scale", ")", ".", "items", "(", ")", ":", "atom", "=", "self", ".", "_node", "[", "n", "]", "atom", ".", "x", ",", "atom", ".", "y", "=", "xy", "self", ".", "flush_cache", "(", ")" ]
recalculate 2d coordinates. currently rings can be calculated badly. :param scale: rescale calculated positions. :param force: ignore existing coordinates of atoms
[ "recalculate", "2d", "coordinates", ".", "currently", "rings", "can", "be", "calculated", "badly", "." ]
train
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/algorithms/calculate2d.py#L24-L79
LordDarkula/chess_py
chess_py/pieces/knight.py
Knight.possible_moves
def possible_moves(self, position): """ Finds all possible knight moves :type: position Board :rtype: list """ for direction in [0, 1, 2, 3]: angles = self._rotate_direction_ninety_degrees(direction) for angle in angles: try: end_loc = self.location.shift(angle).shift(direction).shift(direction) if position.is_square_empty(end_loc): status = notation_const.MOVEMENT elif not position.piece_at_square(end_loc).color == self.color: status = notation_const.CAPTURE else: continue yield Move(end_loc=end_loc, piece=self, status=status, start_loc=self.location) except IndexError: pass
python
def possible_moves(self, position): """ Finds all possible knight moves :type: position Board :rtype: list """ for direction in [0, 1, 2, 3]: angles = self._rotate_direction_ninety_degrees(direction) for angle in angles: try: end_loc = self.location.shift(angle).shift(direction).shift(direction) if position.is_square_empty(end_loc): status = notation_const.MOVEMENT elif not position.piece_at_square(end_loc).color == self.color: status = notation_const.CAPTURE else: continue yield Move(end_loc=end_loc, piece=self, status=status, start_loc=self.location) except IndexError: pass
[ "def", "possible_moves", "(", "self", ",", "position", ")", ":", "for", "direction", "in", "[", "0", ",", "1", ",", "2", ",", "3", "]", ":", "angles", "=", "self", ".", "_rotate_direction_ninety_degrees", "(", "direction", ")", "for", "angle", "in", "angles", ":", "try", ":", "end_loc", "=", "self", ".", "location", ".", "shift", "(", "angle", ")", ".", "shift", "(", "direction", ")", ".", "shift", "(", "direction", ")", "if", "position", ".", "is_square_empty", "(", "end_loc", ")", ":", "status", "=", "notation_const", ".", "MOVEMENT", "elif", "not", "position", ".", "piece_at_square", "(", "end_loc", ")", ".", "color", "==", "self", ".", "color", ":", "status", "=", "notation_const", ".", "CAPTURE", "else", ":", "continue", "yield", "Move", "(", "end_loc", "=", "end_loc", ",", "piece", "=", "self", ",", "status", "=", "status", ",", "start_loc", "=", "self", ".", "location", ")", "except", "IndexError", ":", "pass" ]
Finds all possible knight moves :type: position Board :rtype: list
[ "Finds", "all", "possible", "knight", "moves", ":", "type", ":", "position", "Board", ":", "rtype", ":", "list" ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/pieces/knight.py#L57-L81
cimm-kzn/CGRtools
CGRtools/containers/cgr.py
CGRContainer.centers_list
def centers_list(self): """ get a list of lists of atoms of reaction centers """ center = set() adj = defaultdict(set) for n, atom in self.atoms(): if atom._reactant != atom._product: center.add(n) for n, m, bond in self.bonds(): if bond._reactant != bond._product: adj[n].add(m) adj[m].add(n) center.add(n) center.add(m) out = [] while center: n = center.pop() if n in adj: c = set(self.__plain_bfs(adj, n)) out.append(list(c)) center.difference_update(c) else: out.append([n]) return out
python
def centers_list(self): """ get a list of lists of atoms of reaction centers """ center = set() adj = defaultdict(set) for n, atom in self.atoms(): if atom._reactant != atom._product: center.add(n) for n, m, bond in self.bonds(): if bond._reactant != bond._product: adj[n].add(m) adj[m].add(n) center.add(n) center.add(m) out = [] while center: n = center.pop() if n in adj: c = set(self.__plain_bfs(adj, n)) out.append(list(c)) center.difference_update(c) else: out.append([n]) return out
[ "def", "centers_list", "(", "self", ")", ":", "center", "=", "set", "(", ")", "adj", "=", "defaultdict", "(", "set", ")", "for", "n", ",", "atom", "in", "self", ".", "atoms", "(", ")", ":", "if", "atom", ".", "_reactant", "!=", "atom", ".", "_product", ":", "center", ".", "add", "(", "n", ")", "for", "n", ",", "m", ",", "bond", "in", "self", ".", "bonds", "(", ")", ":", "if", "bond", ".", "_reactant", "!=", "bond", ".", "_product", ":", "adj", "[", "n", "]", ".", "add", "(", "m", ")", "adj", "[", "m", "]", ".", "add", "(", "n", ")", "center", ".", "add", "(", "n", ")", "center", ".", "add", "(", "m", ")", "out", "=", "[", "]", "while", "center", ":", "n", "=", "center", ".", "pop", "(", ")", "if", "n", "in", "adj", ":", "c", "=", "set", "(", "self", ".", "__plain_bfs", "(", "adj", ",", "n", ")", ")", "out", ".", "append", "(", "list", "(", "c", ")", ")", "center", ".", "difference_update", "(", "c", ")", "else", ":", "out", ".", "append", "(", "[", "n", "]", ")", "return", "out" ]
get a list of lists of atoms of reaction centers
[ "get", "a", "list", "of", "lists", "of", "atoms", "of", "reaction", "centers" ]
train
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/containers/cgr.py#L37-L63
cimm-kzn/CGRtools
CGRtools/containers/cgr.py
CGRContainer.center_atoms
def center_atoms(self): """ get list of atoms of reaction center (atoms with dynamic: bonds, charges, radicals). """ nodes = set() for n, atom in self.atoms(): if atom._reactant != atom._product: nodes.add(n) for n, m, bond in self.bonds(): if bond._reactant != bond._product: nodes.add(n) nodes.add(m) return list(nodes)
python
def center_atoms(self): """ get list of atoms of reaction center (atoms with dynamic: bonds, charges, radicals). """ nodes = set() for n, atom in self.atoms(): if atom._reactant != atom._product: nodes.add(n) for n, m, bond in self.bonds(): if bond._reactant != bond._product: nodes.add(n) nodes.add(m) return list(nodes)
[ "def", "center_atoms", "(", "self", ")", ":", "nodes", "=", "set", "(", ")", "for", "n", ",", "atom", "in", "self", ".", "atoms", "(", ")", ":", "if", "atom", ".", "_reactant", "!=", "atom", ".", "_product", ":", "nodes", ".", "add", "(", "n", ")", "for", "n", ",", "m", ",", "bond", "in", "self", ".", "bonds", "(", ")", ":", "if", "bond", ".", "_reactant", "!=", "bond", ".", "_product", ":", "nodes", ".", "add", "(", "n", ")", "nodes", ".", "add", "(", "m", ")", "return", "list", "(", "nodes", ")" ]
get list of atoms of reaction center (atoms with dynamic: bonds, charges, radicals).
[ "get", "list", "of", "atoms", "of", "reaction", "center", "(", "atoms", "with", "dynamic", ":", "bonds", "charges", "radicals", ")", "." ]
train
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/containers/cgr.py#L66-L79
cimm-kzn/CGRtools
CGRtools/containers/cgr.py
CGRContainer.center_bonds
def center_bonds(self): """ get list of bonds of reaction center (bonds with dynamic orders). """ return [(n, m) for n, m, bond in self.bonds() if bond._reactant != bond._product]
python
def center_bonds(self): """ get list of bonds of reaction center (bonds with dynamic orders). """ return [(n, m) for n, m, bond in self.bonds() if bond._reactant != bond._product]
[ "def", "center_bonds", "(", "self", ")", ":", "return", "[", "(", "n", ",", "m", ")", "for", "n", ",", "m", ",", "bond", "in", "self", ".", "bonds", "(", ")", "if", "bond", ".", "_reactant", "!=", "bond", ".", "_product", "]" ]
get list of bonds of reaction center (bonds with dynamic orders).
[ "get", "list", "of", "bonds", "of", "reaction", "center", "(", "bonds", "with", "dynamic", "orders", ")", "." ]
train
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/containers/cgr.py#L82-L85
cimm-kzn/CGRtools
CGRtools/containers/cgr.py
CGRContainer.reset_query_marks
def reset_query_marks(self): """ set or reset hyb and neighbors marks to atoms. """ for i, atom in self.atoms(): neighbors = 0 hybridization = 1 p_neighbors = 0 p_hybridization = 1 # hyb 1- sp3; 2- sp2; 3- sp1; 4- aromatic for j, bond in self._adj[i].items(): isnth = self._node[j].element != 'H' order = bond.order if order: if isnth: neighbors += 1 if hybridization not in (3, 4): if order == 4: hybridization = 4 elif order == 3: hybridization = 3 elif order == 2: if hybridization == 2: hybridization = 3 else: hybridization = 2 order = bond.p_order if order: if isnth: p_neighbors += 1 if p_hybridization not in (3, 4): if order == 4: p_hybridization = 4 elif order == 3: p_hybridization = 3 elif order == 2: if p_hybridization == 2: p_hybridization = 3 else: p_hybridization = 2 atom._reactant._neighbors = neighbors atom._reactant._hybridization = hybridization atom._product._neighbors = p_neighbors atom._product._hybridization = p_hybridization atom.__dict__.clear() # flush cache self.flush_cache()
python
def reset_query_marks(self): """ set or reset hyb and neighbors marks to atoms. """ for i, atom in self.atoms(): neighbors = 0 hybridization = 1 p_neighbors = 0 p_hybridization = 1 # hyb 1- sp3; 2- sp2; 3- sp1; 4- aromatic for j, bond in self._adj[i].items(): isnth = self._node[j].element != 'H' order = bond.order if order: if isnth: neighbors += 1 if hybridization not in (3, 4): if order == 4: hybridization = 4 elif order == 3: hybridization = 3 elif order == 2: if hybridization == 2: hybridization = 3 else: hybridization = 2 order = bond.p_order if order: if isnth: p_neighbors += 1 if p_hybridization not in (3, 4): if order == 4: p_hybridization = 4 elif order == 3: p_hybridization = 3 elif order == 2: if p_hybridization == 2: p_hybridization = 3 else: p_hybridization = 2 atom._reactant._neighbors = neighbors atom._reactant._hybridization = hybridization atom._product._neighbors = p_neighbors atom._product._hybridization = p_hybridization atom.__dict__.clear() # flush cache self.flush_cache()
[ "def", "reset_query_marks", "(", "self", ")", ":", "for", "i", ",", "atom", "in", "self", ".", "atoms", "(", ")", ":", "neighbors", "=", "0", "hybridization", "=", "1", "p_neighbors", "=", "0", "p_hybridization", "=", "1", "# hyb 1- sp3; 2- sp2; 3- sp1; 4- aromatic", "for", "j", ",", "bond", "in", "self", ".", "_adj", "[", "i", "]", ".", "items", "(", ")", ":", "isnth", "=", "self", ".", "_node", "[", "j", "]", ".", "element", "!=", "'H'", "order", "=", "bond", ".", "order", "if", "order", ":", "if", "isnth", ":", "neighbors", "+=", "1", "if", "hybridization", "not", "in", "(", "3", ",", "4", ")", ":", "if", "order", "==", "4", ":", "hybridization", "=", "4", "elif", "order", "==", "3", ":", "hybridization", "=", "3", "elif", "order", "==", "2", ":", "if", "hybridization", "==", "2", ":", "hybridization", "=", "3", "else", ":", "hybridization", "=", "2", "order", "=", "bond", ".", "p_order", "if", "order", ":", "if", "isnth", ":", "p_neighbors", "+=", "1", "if", "p_hybridization", "not", "in", "(", "3", ",", "4", ")", ":", "if", "order", "==", "4", ":", "p_hybridization", "=", "4", "elif", "order", "==", "3", ":", "p_hybridization", "=", "3", "elif", "order", "==", "2", ":", "if", "p_hybridization", "==", "2", ":", "p_hybridization", "=", "3", "else", ":", "p_hybridization", "=", "2", "atom", ".", "_reactant", ".", "_neighbors", "=", "neighbors", "atom", ".", "_reactant", ".", "_hybridization", "=", "hybridization", "atom", ".", "_product", ".", "_neighbors", "=", "p_neighbors", "atom", ".", "_product", ".", "_hybridization", "=", "p_hybridization", "atom", ".", "__dict__", ".", "clear", "(", ")", "# flush cache", "self", ".", "flush_cache", "(", ")" ]
set or reset hyb and neighbors marks to atoms.
[ "set", "or", "reset", "hyb", "and", "neighbors", "marks", "to", "atoms", "." ]
train
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/containers/cgr.py#L87-L134
cimm-kzn/CGRtools
CGRtools/containers/cgr.py
CGRContainer.substructure
def substructure(self, atoms, meta=False, as_view=True): """ create substructure containing atoms from nbunch list :param atoms: list of atoms numbers of substructure :param meta: if True metadata will be copied to substructure :param as_view: If True, the returned graph-view provides a read-only view of the original structure scaffold without actually copying any data """ s = super().substructure(atoms, meta, as_view) if as_view: s.reset_query_marks = frozen return s
python
def substructure(self, atoms, meta=False, as_view=True): """ create substructure containing atoms from nbunch list :param atoms: list of atoms numbers of substructure :param meta: if True metadata will be copied to substructure :param as_view: If True, the returned graph-view provides a read-only view of the original structure scaffold without actually copying any data """ s = super().substructure(atoms, meta, as_view) if as_view: s.reset_query_marks = frozen return s
[ "def", "substructure", "(", "self", ",", "atoms", ",", "meta", "=", "False", ",", "as_view", "=", "True", ")", ":", "s", "=", "super", "(", ")", ".", "substructure", "(", "atoms", ",", "meta", ",", "as_view", ")", "if", "as_view", ":", "s", ".", "reset_query_marks", "=", "frozen", "return", "s" ]
create substructure containing atoms from nbunch list :param atoms: list of atoms numbers of substructure :param meta: if True metadata will be copied to substructure :param as_view: If True, the returned graph-view provides a read-only view of the original structure scaffold without actually copying any data
[ "create", "substructure", "containing", "atoms", "from", "nbunch", "list" ]
train
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/containers/cgr.py#L136-L148
cimm-kzn/CGRtools
CGRtools/containers/cgr.py
CGRContainer._matcher
def _matcher(self, other): """ CGRContainer < CGRContainer """ if isinstance(other, CGRContainer): return GraphMatcher(other, self, lambda x, y: x == y, lambda x, y: x == y) raise TypeError('only cgr-cgr possible')
python
def _matcher(self, other): """ CGRContainer < CGRContainer """ if isinstance(other, CGRContainer): return GraphMatcher(other, self, lambda x, y: x == y, lambda x, y: x == y) raise TypeError('only cgr-cgr possible')
[ "def", "_matcher", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "CGRContainer", ")", ":", "return", "GraphMatcher", "(", "other", ",", "self", ",", "lambda", "x", ",", "y", ":", "x", "==", "y", ",", "lambda", "x", ",", "y", ":", "x", "==", "y", ")", "raise", "TypeError", "(", "'only cgr-cgr possible'", ")" ]
CGRContainer < CGRContainer
[ "CGRContainer", "<", "CGRContainer" ]
train
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/containers/cgr.py#L150-L156
cimm-kzn/CGRtools
CGRtools/containers/cgr.py
CGRContainer.__plain_bfs
def __plain_bfs(adj, source): """modified NX fast BFS node generator""" seen = set() nextlevel = {source} while nextlevel: thislevel = nextlevel nextlevel = set() for v in thislevel: if v not in seen: yield v seen.add(v) nextlevel.update(adj[v])
python
def __plain_bfs(adj, source): """modified NX fast BFS node generator""" seen = set() nextlevel = {source} while nextlevel: thislevel = nextlevel nextlevel = set() for v in thislevel: if v not in seen: yield v seen.add(v) nextlevel.update(adj[v])
[ "def", "__plain_bfs", "(", "adj", ",", "source", ")", ":", "seen", "=", "set", "(", ")", "nextlevel", "=", "{", "source", "}", "while", "nextlevel", ":", "thislevel", "=", "nextlevel", "nextlevel", "=", "set", "(", ")", "for", "v", "in", "thislevel", ":", "if", "v", "not", "in", "seen", ":", "yield", "v", "seen", ".", "add", "(", "v", ")", "nextlevel", ".", "update", "(", "adj", "[", "v", "]", ")" ]
modified NX fast BFS node generator
[ "modified", "NX", "fast", "BFS", "node", "generator" ]
train
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/containers/cgr.py#L159-L170
cocaine/cocaine-framework-python
cocaine/detail/defaults.py
DefaultOptions.token
def token(self): """ Returns authorization token provided by Cocaine. The real meaning of the token is determined by its type. For example OAUTH2 token will have "bearer" type. :return: A tuple of token type and body. """ if self._token is None: token_type = os.getenv(TOKEN_TYPE_KEY, '') token_body = os.getenv(TOKEN_BODY_KEY, '') self._token = _Token(token_type, token_body) return self._token
python
def token(self): """ Returns authorization token provided by Cocaine. The real meaning of the token is determined by its type. For example OAUTH2 token will have "bearer" type. :return: A tuple of token type and body. """ if self._token is None: token_type = os.getenv(TOKEN_TYPE_KEY, '') token_body = os.getenv(TOKEN_BODY_KEY, '') self._token = _Token(token_type, token_body) return self._token
[ "def", "token", "(", "self", ")", ":", "if", "self", ".", "_token", "is", "None", ":", "token_type", "=", "os", ".", "getenv", "(", "TOKEN_TYPE_KEY", ",", "''", ")", "token_body", "=", "os", ".", "getenv", "(", "TOKEN_BODY_KEY", ",", "''", ")", "self", ".", "_token", "=", "_Token", "(", "token_type", ",", "token_body", ")", "return", "self", ".", "_token" ]
Returns authorization token provided by Cocaine. The real meaning of the token is determined by its type. For example OAUTH2 token will have "bearer" type. :return: A tuple of token type and body.
[ "Returns", "authorization", "token", "provided", "by", "Cocaine", "." ]
train
https://github.com/cocaine/cocaine-framework-python/blob/d8a30074b6338bac4389eb996e00d404338115e4/cocaine/detail/defaults.py#L116-L129
cocaine/cocaine-framework-python
cocaine/detail/logger.py
Logger._send
def _send(self): """ Send a message lazy formatted with args. External log attributes can be passed via named attribute `extra`, like in logging from the standart library. Note: * Attrs must be dict, otherwise the whole message would be skipped. * The key field in an attr is converted to string. * The value is sent as is if isinstance of (str, unicode, int, float, long, bool), otherwise we convert the value to string. """ buff = BytesIO() while True: msgs = list() try: msg = yield self.queue.get() # we need to connect first, as we issue verbosity request just after connection # and channels should strictly go in ascending order if not self._connected: yield self.connect() try: while True: msgs.append(msg) counter = next(self.counter) msgpack_pack([counter, EMIT, msg], buff) msg = self.queue.get_nowait() except queues.QueueEmpty: pass try: yield self.pipe.write(buff.getvalue()) except Exception: pass # clean the buffer or we will end up without memory buff.truncate(0) except Exception: for message in msgs: self._log_to_fallback(message)
python
def _send(self): """ Send a message lazy formatted with args. External log attributes can be passed via named attribute `extra`, like in logging from the standart library. Note: * Attrs must be dict, otherwise the whole message would be skipped. * The key field in an attr is converted to string. * The value is sent as is if isinstance of (str, unicode, int, float, long, bool), otherwise we convert the value to string. """ buff = BytesIO() while True: msgs = list() try: msg = yield self.queue.get() # we need to connect first, as we issue verbosity request just after connection # and channels should strictly go in ascending order if not self._connected: yield self.connect() try: while True: msgs.append(msg) counter = next(self.counter) msgpack_pack([counter, EMIT, msg], buff) msg = self.queue.get_nowait() except queues.QueueEmpty: pass try: yield self.pipe.write(buff.getvalue()) except Exception: pass # clean the buffer or we will end up without memory buff.truncate(0) except Exception: for message in msgs: self._log_to_fallback(message)
[ "def", "_send", "(", "self", ")", ":", "buff", "=", "BytesIO", "(", ")", "while", "True", ":", "msgs", "=", "list", "(", ")", "try", ":", "msg", "=", "yield", "self", ".", "queue", ".", "get", "(", ")", "# we need to connect first, as we issue verbosity request just after connection", "# and channels should strictly go in ascending order", "if", "not", "self", ".", "_connected", ":", "yield", "self", ".", "connect", "(", ")", "try", ":", "while", "True", ":", "msgs", ".", "append", "(", "msg", ")", "counter", "=", "next", "(", "self", ".", "counter", ")", "msgpack_pack", "(", "[", "counter", ",", "EMIT", ",", "msg", "]", ",", "buff", ")", "msg", "=", "self", ".", "queue", ".", "get_nowait", "(", ")", "except", "queues", ".", "QueueEmpty", ":", "pass", "try", ":", "yield", "self", ".", "pipe", ".", "write", "(", "buff", ".", "getvalue", "(", ")", ")", "except", "Exception", ":", "pass", "# clean the buffer or we will end up without memory", "buff", ".", "truncate", "(", "0", ")", "except", "Exception", ":", "for", "message", "in", "msgs", ":", "self", ".", "_log_to_fallback", "(", "message", ")" ]
Send a message lazy formatted with args. External log attributes can be passed via named attribute `extra`, like in logging from the standart library. Note: * Attrs must be dict, otherwise the whole message would be skipped. * The key field in an attr is converted to string. * The value is sent as is if isinstance of (str, unicode, int, float, long, bool), otherwise we convert the value to string.
[ "Send", "a", "message", "lazy", "formatted", "with", "args", ".", "External", "log", "attributes", "can", "be", "passed", "via", "named", "attribute", "extra", "like", "in", "logging", "from", "the", "standart", "library", "." ]
train
https://github.com/cocaine/cocaine-framework-python/blob/d8a30074b6338bac4389eb996e00d404338115e4/cocaine/detail/logger.py#L147-L186
LordDarkula/chess_py
chess_py/pieces/rook.py
Rook.moves_in_direction
def moves_in_direction(self, direction, position): """ Finds moves in a given direction :type: direction: lambda :type: position: Board :rtype: list """ current_square = self.location while True: try: current_square = direction(current_square) except IndexError: return if self.contains_opposite_color_piece(current_square, position): yield self.create_move(current_square, notation_const.CAPTURE) if not position.is_square_empty(current_square): return yield self.create_move(current_square, notation_const.MOVEMENT)
python
def moves_in_direction(self, direction, position): """ Finds moves in a given direction :type: direction: lambda :type: position: Board :rtype: list """ current_square = self.location while True: try: current_square = direction(current_square) except IndexError: return if self.contains_opposite_color_piece(current_square, position): yield self.create_move(current_square, notation_const.CAPTURE) if not position.is_square_empty(current_square): return yield self.create_move(current_square, notation_const.MOVEMENT)
[ "def", "moves_in_direction", "(", "self", ",", "direction", ",", "position", ")", ":", "current_square", "=", "self", ".", "location", "while", "True", ":", "try", ":", "current_square", "=", "direction", "(", "current_square", ")", "except", "IndexError", ":", "return", "if", "self", ".", "contains_opposite_color_piece", "(", "current_square", ",", "position", ")", ":", "yield", "self", ".", "create_move", "(", "current_square", ",", "notation_const", ".", "CAPTURE", ")", "if", "not", "position", ".", "is_square_empty", "(", "current_square", ")", ":", "return", "yield", "self", ".", "create_move", "(", "current_square", ",", "notation_const", ".", "MOVEMENT", ")" ]
Finds moves in a given direction :type: direction: lambda :type: position: Board :rtype: list
[ "Finds", "moves", "in", "a", "given", "direction" ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/pieces/rook.py#L48-L70
LordDarkula/chess_py
chess_py/pieces/rook.py
Rook.possible_moves
def possible_moves(self, position): """ Returns all possible rook moves. :type: position: Board :rtype: list """ for move in itertools.chain(*[self.moves_in_direction(fn, position) for fn in self.cross_fn]): yield move
python
def possible_moves(self, position): """ Returns all possible rook moves. :type: position: Board :rtype: list """ for move in itertools.chain(*[self.moves_in_direction(fn, position) for fn in self.cross_fn]): yield move
[ "def", "possible_moves", "(", "self", ",", "position", ")", ":", "for", "move", "in", "itertools", ".", "chain", "(", "*", "[", "self", ".", "moves_in_direction", "(", "fn", ",", "position", ")", "for", "fn", "in", "self", ".", "cross_fn", "]", ")", ":", "yield", "move" ]
Returns all possible rook moves. :type: position: Board :rtype: list
[ "Returns", "all", "possible", "rook", "moves", "." ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/pieces/rook.py#L72-L80
spacetelescope/synphot_refactor
synphot/utils.py
overlap_status
def overlap_status(a, b): """Check overlap between two arrays. Parameters ---------- a, b : array-like Arrays to check. Assumed to be in the same unit. Returns ------- result : {'full', 'partial', 'none'} * 'full' - ``a`` is within or same as ``b`` * 'partial' - ``a`` partially overlaps with ``b`` * 'none' - ``a`` does not overlap ``b`` """ # Get the endpoints a1, a2 = a.min(), a.max() b1, b2 = b.min(), b.max() # Do the comparison if a1 >= b1 and a2 <= b2: result = 'full' elif a2 < b1 or b2 < a1: result = 'none' else: result = 'partial' return result
python
def overlap_status(a, b): """Check overlap between two arrays. Parameters ---------- a, b : array-like Arrays to check. Assumed to be in the same unit. Returns ------- result : {'full', 'partial', 'none'} * 'full' - ``a`` is within or same as ``b`` * 'partial' - ``a`` partially overlaps with ``b`` * 'none' - ``a`` does not overlap ``b`` """ # Get the endpoints a1, a2 = a.min(), a.max() b1, b2 = b.min(), b.max() # Do the comparison if a1 >= b1 and a2 <= b2: result = 'full' elif a2 < b1 or b2 < a1: result = 'none' else: result = 'partial' return result
[ "def", "overlap_status", "(", "a", ",", "b", ")", ":", "# Get the endpoints", "a1", ",", "a2", "=", "a", ".", "min", "(", ")", ",", "a", ".", "max", "(", ")", "b1", ",", "b2", "=", "b", ".", "min", "(", ")", ",", "b", ".", "max", "(", ")", "# Do the comparison", "if", "a1", ">=", "b1", "and", "a2", "<=", "b2", ":", "result", "=", "'full'", "elif", "a2", "<", "b1", "or", "b2", "<", "a1", ":", "result", "=", "'none'", "else", ":", "result", "=", "'partial'", "return", "result" ]
Check overlap between two arrays. Parameters ---------- a, b : array-like Arrays to check. Assumed to be in the same unit. Returns ------- result : {'full', 'partial', 'none'} * 'full' - ``a`` is within or same as ``b`` * 'partial' - ``a`` partially overlaps with ``b`` * 'none' - ``a`` does not overlap ``b``
[ "Check", "overlap", "between", "two", "arrays", "." ]
train
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/utils.py#L23-L51
spacetelescope/synphot_refactor
synphot/utils.py
validate_totalflux
def validate_totalflux(totalflux): """Check integrated flux for invalid values. Parameters ---------- totalflux : float Integrated flux. Raises ------ synphot.exceptions.SynphotError Input is zero, negative, or not a number. """ if totalflux <= 0.0: raise exceptions.SynphotError('Integrated flux is <= 0') elif np.isnan(totalflux): raise exceptions.SynphotError('Integrated flux is NaN') elif np.isinf(totalflux): raise exceptions.SynphotError('Integrated flux is infinite')
python
def validate_totalflux(totalflux): """Check integrated flux for invalid values. Parameters ---------- totalflux : float Integrated flux. Raises ------ synphot.exceptions.SynphotError Input is zero, negative, or not a number. """ if totalflux <= 0.0: raise exceptions.SynphotError('Integrated flux is <= 0') elif np.isnan(totalflux): raise exceptions.SynphotError('Integrated flux is NaN') elif np.isinf(totalflux): raise exceptions.SynphotError('Integrated flux is infinite')
[ "def", "validate_totalflux", "(", "totalflux", ")", ":", "if", "totalflux", "<=", "0.0", ":", "raise", "exceptions", ".", "SynphotError", "(", "'Integrated flux is <= 0'", ")", "elif", "np", ".", "isnan", "(", "totalflux", ")", ":", "raise", "exceptions", ".", "SynphotError", "(", "'Integrated flux is NaN'", ")", "elif", "np", ".", "isinf", "(", "totalflux", ")", ":", "raise", "exceptions", ".", "SynphotError", "(", "'Integrated flux is infinite'", ")" ]
Check integrated flux for invalid values. Parameters ---------- totalflux : float Integrated flux. Raises ------ synphot.exceptions.SynphotError Input is zero, negative, or not a number.
[ "Check", "integrated", "flux", "for", "invalid", "values", "." ]
train
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/utils.py#L54-L73
spacetelescope/synphot_refactor
synphot/utils.py
validate_wavelengths
def validate_wavelengths(wavelengths): """Check wavelengths for ``synphot`` compatibility. Wavelengths must satisfy these conditions: * valid unit type, if given * no zeroes * monotonic ascending or descending * no duplicate values Parameters ---------- wavelengths : array-like or `~astropy.units.quantity.Quantity` Wavelength values. Raises ------ synphot.exceptions.SynphotError Wavelengths unit type is invalid. synphot.exceptions.DuplicateWavelength Wavelength array contains duplicate entries. synphot.exceptions.UnsortedWavelength Wavelength array is not monotonic. synphot.exceptions.ZeroWavelength Negative or zero wavelength occurs in wavelength array. """ if isinstance(wavelengths, u.Quantity): units.validate_wave_unit(wavelengths.unit) wave = wavelengths.value else: wave = wavelengths if np.isscalar(wave): wave = [wave] wave = np.asarray(wave) # Check for zeroes if np.any(wave <= 0): raise exceptions.ZeroWavelength( 'Negative or zero wavelength occurs in wavelength array', rows=np.where(wave <= 0)[0]) # Check for monotonicity sorted_wave = np.sort(wave) if not np.alltrue(sorted_wave == wave): if np.alltrue(sorted_wave[::-1] == wave): pass # Monotonic descending is allowed else: raise exceptions.UnsortedWavelength( 'Wavelength array is not monotonic', rows=np.where(sorted_wave != wave)[0]) # Check for duplicate values if wave.size > 1: dw = sorted_wave[1:] - sorted_wave[:-1] if np.any(dw == 0): raise exceptions.DuplicateWavelength( 'Wavelength array contains duplicate entries', rows=np.where(dw == 0)[0])
python
def validate_wavelengths(wavelengths): """Check wavelengths for ``synphot`` compatibility. Wavelengths must satisfy these conditions: * valid unit type, if given * no zeroes * monotonic ascending or descending * no duplicate values Parameters ---------- wavelengths : array-like or `~astropy.units.quantity.Quantity` Wavelength values. Raises ------ synphot.exceptions.SynphotError Wavelengths unit type is invalid. synphot.exceptions.DuplicateWavelength Wavelength array contains duplicate entries. synphot.exceptions.UnsortedWavelength Wavelength array is not monotonic. synphot.exceptions.ZeroWavelength Negative or zero wavelength occurs in wavelength array. """ if isinstance(wavelengths, u.Quantity): units.validate_wave_unit(wavelengths.unit) wave = wavelengths.value else: wave = wavelengths if np.isscalar(wave): wave = [wave] wave = np.asarray(wave) # Check for zeroes if np.any(wave <= 0): raise exceptions.ZeroWavelength( 'Negative or zero wavelength occurs in wavelength array', rows=np.where(wave <= 0)[0]) # Check for monotonicity sorted_wave = np.sort(wave) if not np.alltrue(sorted_wave == wave): if np.alltrue(sorted_wave[::-1] == wave): pass # Monotonic descending is allowed else: raise exceptions.UnsortedWavelength( 'Wavelength array is not monotonic', rows=np.where(sorted_wave != wave)[0]) # Check for duplicate values if wave.size > 1: dw = sorted_wave[1:] - sorted_wave[:-1] if np.any(dw == 0): raise exceptions.DuplicateWavelength( 'Wavelength array contains duplicate entries', rows=np.where(dw == 0)[0])
[ "def", "validate_wavelengths", "(", "wavelengths", ")", ":", "if", "isinstance", "(", "wavelengths", ",", "u", ".", "Quantity", ")", ":", "units", ".", "validate_wave_unit", "(", "wavelengths", ".", "unit", ")", "wave", "=", "wavelengths", ".", "value", "else", ":", "wave", "=", "wavelengths", "if", "np", ".", "isscalar", "(", "wave", ")", ":", "wave", "=", "[", "wave", "]", "wave", "=", "np", ".", "asarray", "(", "wave", ")", "# Check for zeroes", "if", "np", ".", "any", "(", "wave", "<=", "0", ")", ":", "raise", "exceptions", ".", "ZeroWavelength", "(", "'Negative or zero wavelength occurs in wavelength array'", ",", "rows", "=", "np", ".", "where", "(", "wave", "<=", "0", ")", "[", "0", "]", ")", "# Check for monotonicity", "sorted_wave", "=", "np", ".", "sort", "(", "wave", ")", "if", "not", "np", ".", "alltrue", "(", "sorted_wave", "==", "wave", ")", ":", "if", "np", ".", "alltrue", "(", "sorted_wave", "[", ":", ":", "-", "1", "]", "==", "wave", ")", ":", "pass", "# Monotonic descending is allowed", "else", ":", "raise", "exceptions", ".", "UnsortedWavelength", "(", "'Wavelength array is not monotonic'", ",", "rows", "=", "np", ".", "where", "(", "sorted_wave", "!=", "wave", ")", "[", "0", "]", ")", "# Check for duplicate values", "if", "wave", ".", "size", ">", "1", ":", "dw", "=", "sorted_wave", "[", "1", ":", "]", "-", "sorted_wave", "[", ":", "-", "1", "]", "if", "np", ".", "any", "(", "dw", "==", "0", ")", ":", "raise", "exceptions", ".", "DuplicateWavelength", "(", "'Wavelength array contains duplicate entries'", ",", "rows", "=", "np", ".", "where", "(", "dw", "==", "0", ")", "[", "0", "]", ")" ]
Check wavelengths for ``synphot`` compatibility. Wavelengths must satisfy these conditions: * valid unit type, if given * no zeroes * monotonic ascending or descending * no duplicate values Parameters ---------- wavelengths : array-like or `~astropy.units.quantity.Quantity` Wavelength values. Raises ------ synphot.exceptions.SynphotError Wavelengths unit type is invalid. synphot.exceptions.DuplicateWavelength Wavelength array contains duplicate entries. synphot.exceptions.UnsortedWavelength Wavelength array is not monotonic. synphot.exceptions.ZeroWavelength Negative or zero wavelength occurs in wavelength array.
[ "Check", "wavelengths", "for", "synphot", "compatibility", "." ]
train
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/utils.py#L76-L139
spacetelescope/synphot_refactor
synphot/utils.py
generate_wavelengths
def generate_wavelengths(minwave=500, maxwave=26000, num=10000, delta=None, log=True, wave_unit=u.AA): """Generate wavelength array to be used for spectrum sampling. .. math:: minwave \\le \\lambda < maxwave Parameters ---------- minwave, maxwave : float Lower and upper limits of the wavelengths. These must be values in linear space regardless of ``log``. num : int The number of wavelength values. This is only used when ``delta=None``. delta : float or `None` Delta between wavelength values. When ``log=True``, this is the spacing in log space. log : bool If `True`, the wavelength values are evenly spaced in log scale. Otherwise, spacing is linear. wave_unit : str or `~astropy.units.core.Unit` Wavelength unit. Default is Angstrom. Returns ------- waveset : `~astropy.units.quantity.Quantity` Generated wavelength set. waveset_str : str Info string associated with the result. """ wave_unit = units.validate_unit(wave_unit) if delta is not None: num = None waveset_str = 'Min: {0}, Max: {1}, Num: {2}, Delta: {3}, Log: {4}'.format( minwave, maxwave, num, delta, log) # Log space if log: logmin = np.log10(minwave) logmax = np.log10(maxwave) if delta is None: waveset = np.logspace(logmin, logmax, num, endpoint=False) else: waveset = 10 ** np.arange(logmin, logmax, delta) # Linear space else: if delta is None: waveset = np.linspace(minwave, maxwave, num, endpoint=False) else: waveset = np.arange(minwave, maxwave, delta) return waveset.astype(np.float64) * wave_unit, waveset_str
python
def generate_wavelengths(minwave=500, maxwave=26000, num=10000, delta=None, log=True, wave_unit=u.AA): """Generate wavelength array to be used for spectrum sampling. .. math:: minwave \\le \\lambda < maxwave Parameters ---------- minwave, maxwave : float Lower and upper limits of the wavelengths. These must be values in linear space regardless of ``log``. num : int The number of wavelength values. This is only used when ``delta=None``. delta : float or `None` Delta between wavelength values. When ``log=True``, this is the spacing in log space. log : bool If `True`, the wavelength values are evenly spaced in log scale. Otherwise, spacing is linear. wave_unit : str or `~astropy.units.core.Unit` Wavelength unit. Default is Angstrom. Returns ------- waveset : `~astropy.units.quantity.Quantity` Generated wavelength set. waveset_str : str Info string associated with the result. """ wave_unit = units.validate_unit(wave_unit) if delta is not None: num = None waveset_str = 'Min: {0}, Max: {1}, Num: {2}, Delta: {3}, Log: {4}'.format( minwave, maxwave, num, delta, log) # Log space if log: logmin = np.log10(minwave) logmax = np.log10(maxwave) if delta is None: waveset = np.logspace(logmin, logmax, num, endpoint=False) else: waveset = 10 ** np.arange(logmin, logmax, delta) # Linear space else: if delta is None: waveset = np.linspace(minwave, maxwave, num, endpoint=False) else: waveset = np.arange(minwave, maxwave, delta) return waveset.astype(np.float64) * wave_unit, waveset_str
[ "def", "generate_wavelengths", "(", "minwave", "=", "500", ",", "maxwave", "=", "26000", ",", "num", "=", "10000", ",", "delta", "=", "None", ",", "log", "=", "True", ",", "wave_unit", "=", "u", ".", "AA", ")", ":", "wave_unit", "=", "units", ".", "validate_unit", "(", "wave_unit", ")", "if", "delta", "is", "not", "None", ":", "num", "=", "None", "waveset_str", "=", "'Min: {0}, Max: {1}, Num: {2}, Delta: {3}, Log: {4}'", ".", "format", "(", "minwave", ",", "maxwave", ",", "num", ",", "delta", ",", "log", ")", "# Log space", "if", "log", ":", "logmin", "=", "np", ".", "log10", "(", "minwave", ")", "logmax", "=", "np", ".", "log10", "(", "maxwave", ")", "if", "delta", "is", "None", ":", "waveset", "=", "np", ".", "logspace", "(", "logmin", ",", "logmax", ",", "num", ",", "endpoint", "=", "False", ")", "else", ":", "waveset", "=", "10", "**", "np", ".", "arange", "(", "logmin", ",", "logmax", ",", "delta", ")", "# Linear space", "else", ":", "if", "delta", "is", "None", ":", "waveset", "=", "np", ".", "linspace", "(", "minwave", ",", "maxwave", ",", "num", ",", "endpoint", "=", "False", ")", "else", ":", "waveset", "=", "np", ".", "arange", "(", "minwave", ",", "maxwave", ",", "delta", ")", "return", "waveset", ".", "astype", "(", "np", ".", "float64", ")", "*", "wave_unit", ",", "waveset_str" ]
Generate wavelength array to be used for spectrum sampling. .. math:: minwave \\le \\lambda < maxwave Parameters ---------- minwave, maxwave : float Lower and upper limits of the wavelengths. These must be values in linear space regardless of ``log``. num : int The number of wavelength values. This is only used when ``delta=None``. delta : float or `None` Delta between wavelength values. When ``log=True``, this is the spacing in log space. log : bool If `True`, the wavelength values are evenly spaced in log scale. Otherwise, spacing is linear. wave_unit : str or `~astropy.units.core.Unit` Wavelength unit. Default is Angstrom. Returns ------- waveset : `~astropy.units.quantity.Quantity` Generated wavelength set. waveset_str : str Info string associated with the result.
[ "Generate", "wavelength", "array", "to", "be", "used", "for", "spectrum", "sampling", "." ]
train
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/utils.py#L142-L205
spacetelescope/synphot_refactor
synphot/utils.py
merge_wavelengths
def merge_wavelengths(waveset1, waveset2, threshold=1e-12): """Return the union of the two sets of wavelengths using :func:`numpy.union1d`. The merged wavelengths may sometimes contain numbers which are nearly equal but differ at levels as small as 1e-14. Having values this close together can cause problems down the line. So, here we test whether any such small differences are present, with a small difference defined as less than ``threshold``. If a small difference is present, the lower of the too-close pair is removed. Parameters ---------- waveset1, waveset2 : array-like or `None` Wavelength values, assumed to be in the same unit already. Also see :func:`~synphot.models.get_waveset`. threshold : float, optional Merged wavelength values are considered "too close together" when the difference is smaller than this number. The default is 1e-12. Returns ------- out_wavelengths : array-like or `None` Merged wavelengths. `None` if undefined. """ if waveset1 is None and waveset2 is None: out_wavelengths = None elif waveset1 is not None and waveset2 is None: out_wavelengths = waveset1 elif waveset1 is None and waveset2 is not None: out_wavelengths = waveset2 else: out_wavelengths = np.union1d(waveset1, waveset2) delta = out_wavelengths[1:] - out_wavelengths[:-1] i_good = np.where(delta > threshold) # Remove "too close together" duplicates if len(i_good[0]) < delta.size: out_wavelengths = np.append( out_wavelengths[i_good], out_wavelengths[-1]) return out_wavelengths
python
def merge_wavelengths(waveset1, waveset2, threshold=1e-12): """Return the union of the two sets of wavelengths using :func:`numpy.union1d`. The merged wavelengths may sometimes contain numbers which are nearly equal but differ at levels as small as 1e-14. Having values this close together can cause problems down the line. So, here we test whether any such small differences are present, with a small difference defined as less than ``threshold``. If a small difference is present, the lower of the too-close pair is removed. Parameters ---------- waveset1, waveset2 : array-like or `None` Wavelength values, assumed to be in the same unit already. Also see :func:`~synphot.models.get_waveset`. threshold : float, optional Merged wavelength values are considered "too close together" when the difference is smaller than this number. The default is 1e-12. Returns ------- out_wavelengths : array-like or `None` Merged wavelengths. `None` if undefined. """ if waveset1 is None and waveset2 is None: out_wavelengths = None elif waveset1 is not None and waveset2 is None: out_wavelengths = waveset1 elif waveset1 is None and waveset2 is not None: out_wavelengths = waveset2 else: out_wavelengths = np.union1d(waveset1, waveset2) delta = out_wavelengths[1:] - out_wavelengths[:-1] i_good = np.where(delta > threshold) # Remove "too close together" duplicates if len(i_good[0]) < delta.size: out_wavelengths = np.append( out_wavelengths[i_good], out_wavelengths[-1]) return out_wavelengths
[ "def", "merge_wavelengths", "(", "waveset1", ",", "waveset2", ",", "threshold", "=", "1e-12", ")", ":", "if", "waveset1", "is", "None", "and", "waveset2", "is", "None", ":", "out_wavelengths", "=", "None", "elif", "waveset1", "is", "not", "None", "and", "waveset2", "is", "None", ":", "out_wavelengths", "=", "waveset1", "elif", "waveset1", "is", "None", "and", "waveset2", "is", "not", "None", ":", "out_wavelengths", "=", "waveset2", "else", ":", "out_wavelengths", "=", "np", ".", "union1d", "(", "waveset1", ",", "waveset2", ")", "delta", "=", "out_wavelengths", "[", "1", ":", "]", "-", "out_wavelengths", "[", ":", "-", "1", "]", "i_good", "=", "np", ".", "where", "(", "delta", ">", "threshold", ")", "# Remove \"too close together\" duplicates", "if", "len", "(", "i_good", "[", "0", "]", ")", "<", "delta", ".", "size", ":", "out_wavelengths", "=", "np", ".", "append", "(", "out_wavelengths", "[", "i_good", "]", ",", "out_wavelengths", "[", "-", "1", "]", ")", "return", "out_wavelengths" ]
Return the union of the two sets of wavelengths using :func:`numpy.union1d`. The merged wavelengths may sometimes contain numbers which are nearly equal but differ at levels as small as 1e-14. Having values this close together can cause problems down the line. So, here we test whether any such small differences are present, with a small difference defined as less than ``threshold``. If a small difference is present, the lower of the too-close pair is removed. Parameters ---------- waveset1, waveset2 : array-like or `None` Wavelength values, assumed to be in the same unit already. Also see :func:`~synphot.models.get_waveset`. threshold : float, optional Merged wavelength values are considered "too close together" when the difference is smaller than this number. The default is 1e-12. Returns ------- out_wavelengths : array-like or `None` Merged wavelengths. `None` if undefined.
[ "Return", "the", "union", "of", "the", "two", "sets", "of", "wavelengths", "using", ":", "func", ":", "numpy", ".", "union1d", "." ]
train
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/utils.py#L208-L252
spacetelescope/synphot_refactor
synphot/utils.py
download_data
def download_data(cdbs_root, verbose=True, dry_run=False): """Download CDBS data files to given root directory. Download is skipped if a data file already exists. Parameters ---------- cdbs_root : str Root directory for CDBS data files. verbose : bool Print extra information to screen. dry_run : bool Go through the logic but skip the actual download. This would return a list of files that *would have been* downloaded without network calls. Use this option for debugging or testing. Raises ------ OSError Problem with directory. Returns ------- file_list : list of str A list of downloaded files. """ from .config import conf # Avoid potential circular import if not os.path.exists(cdbs_root): os.makedirs(cdbs_root, exist_ok=True) if verbose: # pragma: no cover print('Created {}'.format(cdbs_root)) elif not os.path.isdir(cdbs_root): raise OSError('{} must be a directory'.format(cdbs_root)) host = 'http://ssb.stsci.edu/cdbs/' file_list = [] if not cdbs_root.endswith(os.sep): cdbs_root += os.sep # See https://github.com/astropy/astropy/issues/8524 for cfgitem in conf.__class__.__dict__.values(): if (not isinstance(cfgitem, ConfigItem) or not cfgitem.name.endswith('file')): continue url = cfgitem() if not url.startswith(host): if verbose: # pragma: no cover print('{} is not from {}, skipping download'.format( url, host)) continue dst = url.replace(host, cdbs_root).replace('/', os.sep) if os.path.exists(dst): if verbose: # pragma: no cover print('{} already exists, skipping download'.format(dst)) continue # Create sub-directories, if needed. subdirs = os.path.dirname(dst) os.makedirs(subdirs, exist_ok=True) if not dry_run: # pragma: no cover try: src = download_file(url) copyfile(src, dst) except Exception as exc: print('Download failed - {}'.format(str(exc))) continue file_list.append(dst) if verbose: # pragma: no cover print('{} downloaded to {}'.format(url, dst)) return file_list
python
def download_data(cdbs_root, verbose=True, dry_run=False): """Download CDBS data files to given root directory. Download is skipped if a data file already exists. Parameters ---------- cdbs_root : str Root directory for CDBS data files. verbose : bool Print extra information to screen. dry_run : bool Go through the logic but skip the actual download. This would return a list of files that *would have been* downloaded without network calls. Use this option for debugging or testing. Raises ------ OSError Problem with directory. Returns ------- file_list : list of str A list of downloaded files. """ from .config import conf # Avoid potential circular import if not os.path.exists(cdbs_root): os.makedirs(cdbs_root, exist_ok=True) if verbose: # pragma: no cover print('Created {}'.format(cdbs_root)) elif not os.path.isdir(cdbs_root): raise OSError('{} must be a directory'.format(cdbs_root)) host = 'http://ssb.stsci.edu/cdbs/' file_list = [] if not cdbs_root.endswith(os.sep): cdbs_root += os.sep # See https://github.com/astropy/astropy/issues/8524 for cfgitem in conf.__class__.__dict__.values(): if (not isinstance(cfgitem, ConfigItem) or not cfgitem.name.endswith('file')): continue url = cfgitem() if not url.startswith(host): if verbose: # pragma: no cover print('{} is not from {}, skipping download'.format( url, host)) continue dst = url.replace(host, cdbs_root).replace('/', os.sep) if os.path.exists(dst): if verbose: # pragma: no cover print('{} already exists, skipping download'.format(dst)) continue # Create sub-directories, if needed. subdirs = os.path.dirname(dst) os.makedirs(subdirs, exist_ok=True) if not dry_run: # pragma: no cover try: src = download_file(url) copyfile(src, dst) except Exception as exc: print('Download failed - {}'.format(str(exc))) continue file_list.append(dst) if verbose: # pragma: no cover print('{} downloaded to {}'.format(url, dst)) return file_list
[ "def", "download_data", "(", "cdbs_root", ",", "verbose", "=", "True", ",", "dry_run", "=", "False", ")", ":", "from", ".", "config", "import", "conf", "# Avoid potential circular import", "if", "not", "os", ".", "path", ".", "exists", "(", "cdbs_root", ")", ":", "os", ".", "makedirs", "(", "cdbs_root", ",", "exist_ok", "=", "True", ")", "if", "verbose", ":", "# pragma: no cover", "print", "(", "'Created {}'", ".", "format", "(", "cdbs_root", ")", ")", "elif", "not", "os", ".", "path", ".", "isdir", "(", "cdbs_root", ")", ":", "raise", "OSError", "(", "'{} must be a directory'", ".", "format", "(", "cdbs_root", ")", ")", "host", "=", "'http://ssb.stsci.edu/cdbs/'", "file_list", "=", "[", "]", "if", "not", "cdbs_root", ".", "endswith", "(", "os", ".", "sep", ")", ":", "cdbs_root", "+=", "os", ".", "sep", "# See https://github.com/astropy/astropy/issues/8524", "for", "cfgitem", "in", "conf", ".", "__class__", ".", "__dict__", ".", "values", "(", ")", ":", "if", "(", "not", "isinstance", "(", "cfgitem", ",", "ConfigItem", ")", "or", "not", "cfgitem", ".", "name", ".", "endswith", "(", "'file'", ")", ")", ":", "continue", "url", "=", "cfgitem", "(", ")", "if", "not", "url", ".", "startswith", "(", "host", ")", ":", "if", "verbose", ":", "# pragma: no cover", "print", "(", "'{} is not from {}, skipping download'", ".", "format", "(", "url", ",", "host", ")", ")", "continue", "dst", "=", "url", ".", "replace", "(", "host", ",", "cdbs_root", ")", ".", "replace", "(", "'/'", ",", "os", ".", "sep", ")", "if", "os", ".", "path", ".", "exists", "(", "dst", ")", ":", "if", "verbose", ":", "# pragma: no cover", "print", "(", "'{} already exists, skipping download'", ".", "format", "(", "dst", ")", ")", "continue", "# Create sub-directories, if needed.", "subdirs", "=", "os", ".", "path", ".", "dirname", "(", "dst", ")", "os", ".", "makedirs", "(", "subdirs", ",", "exist_ok", "=", "True", ")", "if", "not", "dry_run", ":", "# pragma: no cover", "try", ":", "src", "=", "download_file", "(", "url", ")", "copyfile", "(", "src", ",", "dst", ")", "except", "Exception", "as", "exc", ":", "print", "(", "'Download failed - {}'", ".", "format", "(", "str", "(", "exc", ")", ")", ")", "continue", "file_list", ".", "append", "(", "dst", ")", "if", "verbose", ":", "# pragma: no cover", "print", "(", "'{} downloaded to {}'", ".", "format", "(", "url", ",", "dst", ")", ")", "return", "file_list" ]
Download CDBS data files to given root directory. Download is skipped if a data file already exists. Parameters ---------- cdbs_root : str Root directory for CDBS data files. verbose : bool Print extra information to screen. dry_run : bool Go through the logic but skip the actual download. This would return a list of files that *would have been* downloaded without network calls. Use this option for debugging or testing. Raises ------ OSError Problem with directory. Returns ------- file_list : list of str A list of downloaded files.
[ "Download", "CDBS", "data", "files", "to", "given", "root", "directory", ".", "Download", "is", "skipped", "if", "a", "data", "file", "already", "exists", "." ]
train
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/utils.py#L255-L336
Julius2342/pyvlx
examples/demo.py
main
async def main(loop): """Demonstrate functionality of PyVLX.""" pyvlx = PyVLX('pyvlx.yaml', loop=loop) # Alternative: # pyvlx = PyVLX(host="192.168.2.127", password="velux123", loop=loop) # Runing scenes: await pyvlx.load_scenes() await pyvlx.scenes["All Windows Closed"].run() # Changing position of windows: await pyvlx.load_nodes() await pyvlx.nodes['Bath'].open() await pyvlx.nodes['Bath'].close() await pyvlx.nodes['Bath'].set_position(Position(position_percent=45)) # Changing of on-off switches: # await pyvlx.nodes['CoffeeMaker'].set_on() # await pyvlx.nodes['CoffeeMaker'].set_off() # You can easily rename nodes: # await pyvlx.nodes["Window 10"].rename("Window 11") await pyvlx.disconnect()
python
async def main(loop): """Demonstrate functionality of PyVLX.""" pyvlx = PyVLX('pyvlx.yaml', loop=loop) # Alternative: # pyvlx = PyVLX(host="192.168.2.127", password="velux123", loop=loop) # Runing scenes: await pyvlx.load_scenes() await pyvlx.scenes["All Windows Closed"].run() # Changing position of windows: await pyvlx.load_nodes() await pyvlx.nodes['Bath'].open() await pyvlx.nodes['Bath'].close() await pyvlx.nodes['Bath'].set_position(Position(position_percent=45)) # Changing of on-off switches: # await pyvlx.nodes['CoffeeMaker'].set_on() # await pyvlx.nodes['CoffeeMaker'].set_off() # You can easily rename nodes: # await pyvlx.nodes["Window 10"].rename("Window 11") await pyvlx.disconnect()
[ "async", "def", "main", "(", "loop", ")", ":", "pyvlx", "=", "PyVLX", "(", "'pyvlx.yaml'", ",", "loop", "=", "loop", ")", "# Alternative:", "# pyvlx = PyVLX(host=\"192.168.2.127\", password=\"velux123\", loop=loop)", "# Runing scenes:", "await", "pyvlx", ".", "load_scenes", "(", ")", "await", "pyvlx", ".", "scenes", "[", "\"All Windows Closed\"", "]", ".", "run", "(", ")", "# Changing position of windows:", "await", "pyvlx", ".", "load_nodes", "(", ")", "await", "pyvlx", ".", "nodes", "[", "'Bath'", "]", ".", "open", "(", ")", "await", "pyvlx", ".", "nodes", "[", "'Bath'", "]", ".", "close", "(", ")", "await", "pyvlx", ".", "nodes", "[", "'Bath'", "]", ".", "set_position", "(", "Position", "(", "position_percent", "=", "45", ")", ")", "# Changing of on-off switches:", "# await pyvlx.nodes['CoffeeMaker'].set_on()", "# await pyvlx.nodes['CoffeeMaker'].set_off()", "# You can easily rename nodes:", "# await pyvlx.nodes[\"Window 10\"].rename(\"Window 11\")", "await", "pyvlx", ".", "disconnect", "(", ")" ]
Demonstrate functionality of PyVLX.
[ "Demonstrate", "functionality", "of", "PyVLX", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/examples/demo.py#L7-L30
Julius2342/pyvlx
pyvlx/frames/frame_password_enter.py
FramePasswordEnterRequest.get_payload
def get_payload(self): """Return Payload.""" if self.password is None: raise PyVLXException("password is none") if len(self.password) > self.MAX_SIZE: raise PyVLXException("password is too long") return string_to_bytes(self.password, self.MAX_SIZE)
python
def get_payload(self): """Return Payload.""" if self.password is None: raise PyVLXException("password is none") if len(self.password) > self.MAX_SIZE: raise PyVLXException("password is too long") return string_to_bytes(self.password, self.MAX_SIZE)
[ "def", "get_payload", "(", "self", ")", ":", "if", "self", ".", "password", "is", "None", ":", "raise", "PyVLXException", "(", "\"password is none\"", ")", "if", "len", "(", "self", ".", "password", ")", ">", "self", ".", "MAX_SIZE", ":", "raise", "PyVLXException", "(", "\"password is too long\"", ")", "return", "string_to_bytes", "(", "self", ".", "password", ",", "self", ".", "MAX_SIZE", ")" ]
Return Payload.
[ "Return", "Payload", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/frames/frame_password_enter.py#L22-L28
Julius2342/pyvlx
pyvlx/nodes.py
Nodes.add
def add(self, node): """Add Node, replace existing node if node with node_id is present.""" if not isinstance(node, Node): raise TypeError() for i, j in enumerate(self.__nodes): if j.node_id == node.node_id: self.__nodes[i] = node return self.__nodes.append(node)
python
def add(self, node): """Add Node, replace existing node if node with node_id is present.""" if not isinstance(node, Node): raise TypeError() for i, j in enumerate(self.__nodes): if j.node_id == node.node_id: self.__nodes[i] = node return self.__nodes.append(node)
[ "def", "add", "(", "self", ",", "node", ")", ":", "if", "not", "isinstance", "(", "node", ",", "Node", ")", ":", "raise", "TypeError", "(", ")", "for", "i", ",", "j", "in", "enumerate", "(", "self", ".", "__nodes", ")", ":", "if", "j", ".", "node_id", "==", "node", ".", "node_id", ":", "self", ".", "__nodes", "[", "i", "]", "=", "node", "return", "self", ".", "__nodes", ".", "append", "(", "node", ")" ]
Add Node, replace existing node if node with node_id is present.
[ "Add", "Node", "replace", "existing", "node", "if", "node", "with", "node_id", "is", "present", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/nodes.py#L51-L59
Julius2342/pyvlx
pyvlx/nodes.py
Nodes.load
async def load(self, node_id=None): """Load nodes from KLF 200, if no node_id is specified all nodes are loaded.""" if node_id is not None: await self._load_node(node_id=node_id) else: await self._load_all_nodes()
python
async def load(self, node_id=None): """Load nodes from KLF 200, if no node_id is specified all nodes are loaded.""" if node_id is not None: await self._load_node(node_id=node_id) else: await self._load_all_nodes()
[ "async", "def", "load", "(", "self", ",", "node_id", "=", "None", ")", ":", "if", "node_id", "is", "not", "None", ":", "await", "self", ".", "_load_node", "(", "node_id", "=", "node_id", ")", "else", ":", "await", "self", ".", "_load_all_nodes", "(", ")" ]
Load nodes from KLF 200, if no node_id is specified all nodes are loaded.
[ "Load", "nodes", "from", "KLF", "200", "if", "no", "node_id", "is", "specified", "all", "nodes", "are", "loaded", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/nodes.py#L65-L70
Julius2342/pyvlx
pyvlx/nodes.py
Nodes._load_node
async def _load_node(self, node_id): """Load single node via API.""" get_node_information = GetNodeInformation(pyvlx=self.pyvlx, node_id=node_id) await get_node_information.do_api_call() if not get_node_information.success: raise PyVLXException("Unable to retrieve node information") notification_frame = get_node_information.notification_frame node = convert_frame_to_node(self.pyvlx, notification_frame) if node is not None: self.add(node)
python
async def _load_node(self, node_id): """Load single node via API.""" get_node_information = GetNodeInformation(pyvlx=self.pyvlx, node_id=node_id) await get_node_information.do_api_call() if not get_node_information.success: raise PyVLXException("Unable to retrieve node information") notification_frame = get_node_information.notification_frame node = convert_frame_to_node(self.pyvlx, notification_frame) if node is not None: self.add(node)
[ "async", "def", "_load_node", "(", "self", ",", "node_id", ")", ":", "get_node_information", "=", "GetNodeInformation", "(", "pyvlx", "=", "self", ".", "pyvlx", ",", "node_id", "=", "node_id", ")", "await", "get_node_information", ".", "do_api_call", "(", ")", "if", "not", "get_node_information", ".", "success", ":", "raise", "PyVLXException", "(", "\"Unable to retrieve node information\"", ")", "notification_frame", "=", "get_node_information", ".", "notification_frame", "node", "=", "convert_frame_to_node", "(", "self", ".", "pyvlx", ",", "notification_frame", ")", "if", "node", "is", "not", "None", ":", "self", ".", "add", "(", "node", ")" ]
Load single node via API.
[ "Load", "single", "node", "via", "API", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/nodes.py#L72-L81
Julius2342/pyvlx
pyvlx/nodes.py
Nodes._load_all_nodes
async def _load_all_nodes(self): """Load all nodes via API.""" get_all_nodes_information = GetAllNodesInformation(pyvlx=self.pyvlx) await get_all_nodes_information.do_api_call() if not get_all_nodes_information.success: raise PyVLXException("Unable to retrieve node information") self.clear() for notification_frame in get_all_nodes_information.notification_frames: node = convert_frame_to_node(self.pyvlx, notification_frame) if node is not None: self.add(node)
python
async def _load_all_nodes(self): """Load all nodes via API.""" get_all_nodes_information = GetAllNodesInformation(pyvlx=self.pyvlx) await get_all_nodes_information.do_api_call() if not get_all_nodes_information.success: raise PyVLXException("Unable to retrieve node information") self.clear() for notification_frame in get_all_nodes_information.notification_frames: node = convert_frame_to_node(self.pyvlx, notification_frame) if node is not None: self.add(node)
[ "async", "def", "_load_all_nodes", "(", "self", ")", ":", "get_all_nodes_information", "=", "GetAllNodesInformation", "(", "pyvlx", "=", "self", ".", "pyvlx", ")", "await", "get_all_nodes_information", ".", "do_api_call", "(", ")", "if", "not", "get_all_nodes_information", ".", "success", ":", "raise", "PyVLXException", "(", "\"Unable to retrieve node information\"", ")", "self", ".", "clear", "(", ")", "for", "notification_frame", "in", "get_all_nodes_information", ".", "notification_frames", ":", "node", "=", "convert_frame_to_node", "(", "self", ".", "pyvlx", ",", "notification_frame", ")", "if", "node", "is", "not", "None", ":", "self", ".", "add", "(", "node", ")" ]
Load all nodes via API.
[ "Load", "all", "nodes", "via", "API", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/nodes.py#L83-L93
Julius2342/pyvlx
pyvlx/get_all_nodes_information.py
GetAllNodesInformation.handle_frame
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if isinstance(frame, FrameGetAllNodesInformationConfirmation): self.number_of_nodes = frame.number_of_nodes # We are still waiting for FrameGetAllNodesInformationNotification return False if isinstance(frame, FrameGetAllNodesInformationNotification): self.notification_frames.append(frame) if isinstance(frame, FrameGetAllNodesInformationFinishedNotification): if self.number_of_nodes != len(self.notification_frames): PYVLXLOG.warning("Number of received scenes does not match expected number") self.success = True return True return False
python
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if isinstance(frame, FrameGetAllNodesInformationConfirmation): self.number_of_nodes = frame.number_of_nodes # We are still waiting for FrameGetAllNodesInformationNotification return False if isinstance(frame, FrameGetAllNodesInformationNotification): self.notification_frames.append(frame) if isinstance(frame, FrameGetAllNodesInformationFinishedNotification): if self.number_of_nodes != len(self.notification_frames): PYVLXLOG.warning("Number of received scenes does not match expected number") self.success = True return True return False
[ "async", "def", "handle_frame", "(", "self", ",", "frame", ")", ":", "if", "isinstance", "(", "frame", ",", "FrameGetAllNodesInformationConfirmation", ")", ":", "self", ".", "number_of_nodes", "=", "frame", ".", "number_of_nodes", "# We are still waiting for FrameGetAllNodesInformationNotification", "return", "False", "if", "isinstance", "(", "frame", ",", "FrameGetAllNodesInformationNotification", ")", ":", "self", ".", "notification_frames", ".", "append", "(", "frame", ")", "if", "isinstance", "(", "frame", ",", "FrameGetAllNodesInformationFinishedNotification", ")", ":", "if", "self", ".", "number_of_nodes", "!=", "len", "(", "self", ".", "notification_frames", ")", ":", "PYVLXLOG", ".", "warning", "(", "\"Number of received scenes does not match expected number\"", ")", "self", ".", "success", "=", "True", "return", "True", "return", "False" ]
Handle incoming API frame, return True if this was the expected frame.
[ "Handle", "incoming", "API", "frame", "return", "True", "if", "this", "was", "the", "expected", "frame", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/get_all_nodes_information.py#L21-L34
Julius2342/pyvlx
pyvlx/get_node_information.py
GetNodeInformation.handle_frame
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if isinstance(frame, FrameGetNodeInformationConfirmation) and frame.node_id == self.node_id: # We are still waiting for GetNodeInformationNotification return False if isinstance(frame, FrameGetNodeInformationNotification) and frame.node_id == self.node_id: self.notification_frame = frame self.success = True return True return False
python
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if isinstance(frame, FrameGetNodeInformationConfirmation) and frame.node_id == self.node_id: # We are still waiting for GetNodeInformationNotification return False if isinstance(frame, FrameGetNodeInformationNotification) and frame.node_id == self.node_id: self.notification_frame = frame self.success = True return True return False
[ "async", "def", "handle_frame", "(", "self", ",", "frame", ")", ":", "if", "isinstance", "(", "frame", ",", "FrameGetNodeInformationConfirmation", ")", "and", "frame", ".", "node_id", "==", "self", ".", "node_id", ":", "# We are still waiting for GetNodeInformationNotification", "return", "False", "if", "isinstance", "(", "frame", ",", "FrameGetNodeInformationNotification", ")", "and", "frame", ".", "node_id", "==", "self", ".", "node_id", ":", "self", ".", "notification_frame", "=", "frame", "self", ".", "success", "=", "True", "return", "True", "return", "False" ]
Handle incoming API frame, return True if this was the expected frame.
[ "Handle", "incoming", "API", "frame", "return", "True", "if", "this", "was", "the", "expected", "frame", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/get_node_information.py#L18-L27
Julius2342/pyvlx
pyvlx/heartbeat.py
Heartbeat.start
def start(self): """Create loop task.""" self.run_task = self.pyvlx.loop.create_task( self.loop())
python
def start(self): """Create loop task.""" self.run_task = self.pyvlx.loop.create_task( self.loop())
[ "def", "start", "(", "self", ")", ":", "self", ".", "run_task", "=", "self", ".", "pyvlx", ".", "loop", ".", "create_task", "(", "self", ".", "loop", "(", ")", ")" ]
Create loop task.
[ "Create", "loop", "task", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/heartbeat.py#L25-L28
Julius2342/pyvlx
pyvlx/heartbeat.py
Heartbeat.stop
async def stop(self): """Stop heartbeat.""" self.stopped = True self.loop_event.set() # Waiting for shutdown of loop() await self.stopped_event.wait()
python
async def stop(self): """Stop heartbeat.""" self.stopped = True self.loop_event.set() # Waiting for shutdown of loop() await self.stopped_event.wait()
[ "async", "def", "stop", "(", "self", ")", ":", "self", ".", "stopped", "=", "True", "self", ".", "loop_event", ".", "set", "(", ")", "# Waiting for shutdown of loop()", "await", "self", ".", "stopped_event", ".", "wait", "(", ")" ]
Stop heartbeat.
[ "Stop", "heartbeat", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/heartbeat.py#L30-L35
Julius2342/pyvlx
pyvlx/heartbeat.py
Heartbeat.loop
async def loop(self): """Pulse every timeout seconds until stopped.""" while not self.stopped: self.timeout_handle = self.pyvlx.connection.loop.call_later( self.timeout_in_seconds, self.loop_timeout) await self.loop_event.wait() if not self.stopped: self.loop_event.clear() await self.pulse() self.cancel_loop_timeout() self.stopped_event.set()
python
async def loop(self): """Pulse every timeout seconds until stopped.""" while not self.stopped: self.timeout_handle = self.pyvlx.connection.loop.call_later( self.timeout_in_seconds, self.loop_timeout) await self.loop_event.wait() if not self.stopped: self.loop_event.clear() await self.pulse() self.cancel_loop_timeout() self.stopped_event.set()
[ "async", "def", "loop", "(", "self", ")", ":", "while", "not", "self", ".", "stopped", ":", "self", ".", "timeout_handle", "=", "self", ".", "pyvlx", ".", "connection", ".", "loop", ".", "call_later", "(", "self", ".", "timeout_in_seconds", ",", "self", ".", "loop_timeout", ")", "await", "self", ".", "loop_event", ".", "wait", "(", ")", "if", "not", "self", ".", "stopped", ":", "self", ".", "loop_event", ".", "clear", "(", ")", "await", "self", ".", "pulse", "(", ")", "self", ".", "cancel_loop_timeout", "(", ")", "self", ".", "stopped_event", ".", "set", "(", ")" ]
Pulse every timeout seconds until stopped.
[ "Pulse", "every", "timeout", "seconds", "until", "stopped", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/heartbeat.py#L37-L47
Julius2342/pyvlx
pyvlx/heartbeat.py
Heartbeat.pulse
async def pulse(self): """Send get state request to API to keep the connection alive.""" get_state = GetState(pyvlx=self.pyvlx) await get_state.do_api_call() if not get_state.success: raise PyVLXException("Unable to send get state.")
python
async def pulse(self): """Send get state request to API to keep the connection alive.""" get_state = GetState(pyvlx=self.pyvlx) await get_state.do_api_call() if not get_state.success: raise PyVLXException("Unable to send get state.")
[ "async", "def", "pulse", "(", "self", ")", ":", "get_state", "=", "GetState", "(", "pyvlx", "=", "self", ".", "pyvlx", ")", "await", "get_state", ".", "do_api_call", "(", ")", "if", "not", "get_state", ".", "success", ":", "raise", "PyVLXException", "(", "\"Unable to send get state.\"", ")" ]
Send get state request to API to keep the connection alive.
[ "Send", "get", "state", "request", "to", "API", "to", "keep", "the", "connection", "alive", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/heartbeat.py#L59-L64
Julius2342/pyvlx
pyvlx/frames/frame_get_state.py
FrameGetStateConfirmation.get_payload
def get_payload(self): """Return Payload.""" payload = bytes([self.gateway_state.value, self.gateway_sub_state.value]) payload += bytes(4) # State date, reserved for future use return payload
python
def get_payload(self): """Return Payload.""" payload = bytes([self.gateway_state.value, self.gateway_sub_state.value]) payload += bytes(4) # State date, reserved for future use return payload
[ "def", "get_payload", "(", "self", ")", ":", "payload", "=", "bytes", "(", "[", "self", ".", "gateway_state", ".", "value", ",", "self", ".", "gateway_sub_state", ".", "value", "]", ")", "payload", "+=", "bytes", "(", "4", ")", "# State date, reserved for future use", "return", "payload" ]
Return Payload.
[ "Return", "Payload", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/frames/frame_get_state.py#L53-L57
Julius2342/pyvlx
pyvlx/frames/frame_get_state.py
FrameGetStateConfirmation.from_payload
def from_payload(self, payload): """Init frame from binary data.""" self.gateway_state = GatewayState(payload[0]) self.gateway_sub_state = GatewaySubState(payload[1])
python
def from_payload(self, payload): """Init frame from binary data.""" self.gateway_state = GatewayState(payload[0]) self.gateway_sub_state = GatewaySubState(payload[1])
[ "def", "from_payload", "(", "self", ",", "payload", ")", ":", "self", ".", "gateway_state", "=", "GatewayState", "(", "payload", "[", "0", "]", ")", "self", ".", "gateway_sub_state", "=", "GatewaySubState", "(", "payload", "[", "1", "]", ")" ]
Init frame from binary data.
[ "Init", "frame", "from", "binary", "data", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/frames/frame_get_state.py#L59-L62
Julius2342/pyvlx
pyvlx/string_helper.py
string_to_bytes
def string_to_bytes(string, size): """Convert string to bytes add padding.""" if len(string) > size: raise PyVLXException("string_to_bytes::string_to_large") encoded = bytes(string, encoding='utf-8') return encoded + bytes(size-len(encoded))
python
def string_to_bytes(string, size): """Convert string to bytes add padding.""" if len(string) > size: raise PyVLXException("string_to_bytes::string_to_large") encoded = bytes(string, encoding='utf-8') return encoded + bytes(size-len(encoded))
[ "def", "string_to_bytes", "(", "string", ",", "size", ")", ":", "if", "len", "(", "string", ")", ">", "size", ":", "raise", "PyVLXException", "(", "\"string_to_bytes::string_to_large\"", ")", "encoded", "=", "bytes", "(", "string", ",", "encoding", "=", "'utf-8'", ")", "return", "encoded", "+", "bytes", "(", "size", "-", "len", "(", "encoded", ")", ")" ]
Convert string to bytes add padding.
[ "Convert", "string", "to", "bytes", "add", "padding", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/string_helper.py#L5-L10
Julius2342/pyvlx
pyvlx/string_helper.py
bytes_to_string
def bytes_to_string(raw): """Convert bytes to string.""" ret = bytes() for byte in raw: if byte == 0x00: return ret.decode("utf-8") ret += bytes([byte]) return ret.decode("utf-8")
python
def bytes_to_string(raw): """Convert bytes to string.""" ret = bytes() for byte in raw: if byte == 0x00: return ret.decode("utf-8") ret += bytes([byte]) return ret.decode("utf-8")
[ "def", "bytes_to_string", "(", "raw", ")", ":", "ret", "=", "bytes", "(", ")", "for", "byte", "in", "raw", ":", "if", "byte", "==", "0x00", ":", "return", "ret", ".", "decode", "(", "\"utf-8\"", ")", "ret", "+=", "bytes", "(", "[", "byte", "]", ")", "return", "ret", ".", "decode", "(", "\"utf-8\"", ")" ]
Convert bytes to string.
[ "Convert", "bytes", "to", "string", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/string_helper.py#L13-L20
Julius2342/pyvlx
pyvlx/frames/frame_node_state_position_changed_notification.py
FrameNodeStatePositionChangedNotification.get_payload
def get_payload(self): """Return Payload.""" payload = bytes([self.node_id]) payload += bytes([self.state]) payload += bytes(self.current_position.raw) payload += bytes(self.target.raw) payload += bytes(self.current_position_fp1.raw) payload += bytes(self.current_position_fp2.raw) payload += bytes(self.current_position_fp3.raw) payload += bytes(self.current_position_fp4.raw) payload += bytes([self.remaining_time >> 8 & 255, self.remaining_time & 255]) payload += struct.pack(">I", self.timestamp) return payload
python
def get_payload(self): """Return Payload.""" payload = bytes([self.node_id]) payload += bytes([self.state]) payload += bytes(self.current_position.raw) payload += bytes(self.target.raw) payload += bytes(self.current_position_fp1.raw) payload += bytes(self.current_position_fp2.raw) payload += bytes(self.current_position_fp3.raw) payload += bytes(self.current_position_fp4.raw) payload += bytes([self.remaining_time >> 8 & 255, self.remaining_time & 255]) payload += struct.pack(">I", self.timestamp) return payload
[ "def", "get_payload", "(", "self", ")", ":", "payload", "=", "bytes", "(", "[", "self", ".", "node_id", "]", ")", "payload", "+=", "bytes", "(", "[", "self", ".", "state", "]", ")", "payload", "+=", "bytes", "(", "self", ".", "current_position", ".", "raw", ")", "payload", "+=", "bytes", "(", "self", ".", "target", ".", "raw", ")", "payload", "+=", "bytes", "(", "self", ".", "current_position_fp1", ".", "raw", ")", "payload", "+=", "bytes", "(", "self", ".", "current_position_fp2", ".", "raw", ")", "payload", "+=", "bytes", "(", "self", ".", "current_position_fp3", ".", "raw", ")", "payload", "+=", "bytes", "(", "self", ".", "current_position_fp4", ".", "raw", ")", "payload", "+=", "bytes", "(", "[", "self", ".", "remaining_time", ">>", "8", "&", "255", ",", "self", ".", "remaining_time", "&", "255", "]", ")", "payload", "+=", "struct", ".", "pack", "(", "\">I\"", ",", "self", ".", "timestamp", ")", "return", "payload" ]
Return Payload.
[ "Return", "Payload", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/frames/frame_node_state_position_changed_notification.py#L30-L42
Julius2342/pyvlx
pyvlx/frames/frame_node_state_position_changed_notification.py
FrameNodeStatePositionChangedNotification.from_payload
def from_payload(self, payload): """Init frame from binary data.""" self.node_id = payload[0] self.state = payload[1] self.current_position = Parameter(payload[2:4]) self.target = Parameter(payload[4:6]) self.current_position_fp1 = Parameter(payload[6:8]) self.current_position_fp2 = Parameter(payload[8:10]) self.current_position_fp3 = Parameter(payload[10:12]) self.current_position_fp4 = Parameter(payload[12:14]) self.remaining_time = payload[14] * 256 + payload[15] # @VELUX: looks like your timestamp is wrong. Looks like # you are only transmitting the two lower bytes. self.timestamp = struct.unpack(">I", payload[16:20])[0]
python
def from_payload(self, payload): """Init frame from binary data.""" self.node_id = payload[0] self.state = payload[1] self.current_position = Parameter(payload[2:4]) self.target = Parameter(payload[4:6]) self.current_position_fp1 = Parameter(payload[6:8]) self.current_position_fp2 = Parameter(payload[8:10]) self.current_position_fp3 = Parameter(payload[10:12]) self.current_position_fp4 = Parameter(payload[12:14]) self.remaining_time = payload[14] * 256 + payload[15] # @VELUX: looks like your timestamp is wrong. Looks like # you are only transmitting the two lower bytes. self.timestamp = struct.unpack(">I", payload[16:20])[0]
[ "def", "from_payload", "(", "self", ",", "payload", ")", ":", "self", ".", "node_id", "=", "payload", "[", "0", "]", "self", ".", "state", "=", "payload", "[", "1", "]", "self", ".", "current_position", "=", "Parameter", "(", "payload", "[", "2", ":", "4", "]", ")", "self", ".", "target", "=", "Parameter", "(", "payload", "[", "4", ":", "6", "]", ")", "self", ".", "current_position_fp1", "=", "Parameter", "(", "payload", "[", "6", ":", "8", "]", ")", "self", ".", "current_position_fp2", "=", "Parameter", "(", "payload", "[", "8", ":", "10", "]", ")", "self", ".", "current_position_fp3", "=", "Parameter", "(", "payload", "[", "10", ":", "12", "]", ")", "self", ".", "current_position_fp4", "=", "Parameter", "(", "payload", "[", "12", ":", "14", "]", ")", "self", ".", "remaining_time", "=", "payload", "[", "14", "]", "*", "256", "+", "payload", "[", "15", "]", "# @VELUX: looks like your timestamp is wrong. Looks like", "# you are only transmitting the two lower bytes.", "self", ".", "timestamp", "=", "struct", ".", "unpack", "(", "\">I\"", ",", "payload", "[", "16", ":", "20", "]", ")", "[", "0", "]" ]
Init frame from binary data.
[ "Init", "frame", "from", "binary", "data", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/frames/frame_node_state_position_changed_notification.py#L44-L57
Julius2342/pyvlx
pyvlx/house_status_monitor.py
house_status_monitor_enable
async def house_status_monitor_enable(pyvlx): """Enable house status monitor.""" status_monitor_enable = HouseStatusMonitorEnable(pyvlx=pyvlx) await status_monitor_enable.do_api_call() if not status_monitor_enable.success: raise PyVLXException("Unable enable house status monitor.")
python
async def house_status_monitor_enable(pyvlx): """Enable house status monitor.""" status_monitor_enable = HouseStatusMonitorEnable(pyvlx=pyvlx) await status_monitor_enable.do_api_call() if not status_monitor_enable.success: raise PyVLXException("Unable enable house status monitor.")
[ "async", "def", "house_status_monitor_enable", "(", "pyvlx", ")", ":", "status_monitor_enable", "=", "HouseStatusMonitorEnable", "(", "pyvlx", "=", "pyvlx", ")", "await", "status_monitor_enable", ".", "do_api_call", "(", ")", "if", "not", "status_monitor_enable", ".", "success", ":", "raise", "PyVLXException", "(", "\"Unable enable house status monitor.\"", ")" ]
Enable house status monitor.
[ "Enable", "house", "status", "monitor", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/house_status_monitor.py#L51-L56
Julius2342/pyvlx
pyvlx/house_status_monitor.py
house_status_monitor_disable
async def house_status_monitor_disable(pyvlx): """Disable house status monitor.""" status_monitor_disable = HouseStatusMonitorDisable(pyvlx=pyvlx) await status_monitor_disable.do_api_call() if not status_monitor_disable.success: raise PyVLXException("Unable disable house status monitor.")
python
async def house_status_monitor_disable(pyvlx): """Disable house status monitor.""" status_monitor_disable = HouseStatusMonitorDisable(pyvlx=pyvlx) await status_monitor_disable.do_api_call() if not status_monitor_disable.success: raise PyVLXException("Unable disable house status monitor.")
[ "async", "def", "house_status_monitor_disable", "(", "pyvlx", ")", ":", "status_monitor_disable", "=", "HouseStatusMonitorDisable", "(", "pyvlx", "=", "pyvlx", ")", "await", "status_monitor_disable", ".", "do_api_call", "(", ")", "if", "not", "status_monitor_disable", ".", "success", ":", "raise", "PyVLXException", "(", "\"Unable disable house status monitor.\"", ")" ]
Disable house status monitor.
[ "Disable", "house", "status", "monitor", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/house_status_monitor.py#L59-L64
Julius2342/pyvlx
pyvlx/house_status_monitor.py
HouseStatusMonitorEnable.handle_frame
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if not isinstance(frame, FrameHouseStatusMonitorEnableConfirmation): return False self.success = True return True
python
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if not isinstance(frame, FrameHouseStatusMonitorEnableConfirmation): return False self.success = True return True
[ "async", "def", "handle_frame", "(", "self", ",", "frame", ")", ":", "if", "not", "isinstance", "(", "frame", ",", "FrameHouseStatusMonitorEnableConfirmation", ")", ":", "return", "False", "self", ".", "success", "=", "True", "return", "True" ]
Handle incoming API frame, return True if this was the expected frame.
[ "Handle", "incoming", "API", "frame", "return", "True", "if", "this", "was", "the", "expected", "frame", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/house_status_monitor.py#L19-L24
Julius2342/pyvlx
pyvlx/house_status_monitor.py
HouseStatusMonitorDisable.handle_frame
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if not isinstance(frame, FrameHouseStatusMonitorDisableConfirmation): return False self.success = True return True
python
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if not isinstance(frame, FrameHouseStatusMonitorDisableConfirmation): return False self.success = True return True
[ "async", "def", "handle_frame", "(", "self", ",", "frame", ")", ":", "if", "not", "isinstance", "(", "frame", ",", "FrameHouseStatusMonitorDisableConfirmation", ")", ":", "return", "False", "self", ".", "success", "=", "True", "return", "True" ]
Handle incoming API frame, return True if this was the expected frame.
[ "Handle", "incoming", "API", "frame", "return", "True", "if", "this", "was", "the", "expected", "frame", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/house_status_monitor.py#L39-L44
tbielawa/bitmath
bitmath/integrations.py
BitmathType
def BitmathType(bmstring): """An 'argument type' for integrations with the argparse module. For more information, see https://docs.python.org/2/library/argparse.html#type Of particular interest to us is this bit: ``type=`` can take any callable that takes a single string argument and returns the converted value I.e., ``type`` can be a function (such as this function) or a class which implements the ``__call__`` method. Example usage of the bitmath.BitmathType argparser type: >>> import bitmath >>> import argparse >>> parser = argparse.ArgumentParser() >>> parser.add_argument("--file-size", type=bitmath.BitmathType) >>> parser.parse_args("--file-size 1337MiB".split()) Namespace(file_size=MiB(1337.0)) Invalid usage includes any input that the bitmath.parse_string function already rejects. Additionally, **UNQUOTED** arguments with spaces in them are rejected (shlex.split used in the following examples to conserve single quotes in the parse_args call): >>> parser = argparse.ArgumentParser() >>> parser.add_argument("--file-size", type=bitmath.BitmathType) >>> import shlex >>> # The following is ACCEPTABLE USAGE: ... >>> parser.parse_args(shlex.split("--file-size '1337 MiB'")) Namespace(file_size=MiB(1337.0)) >>> # The following is INCORRECT USAGE because the string "1337 MiB" is not quoted! ... >>> parser.parse_args(shlex.split("--file-size 1337 MiB")) error: argument --file-size: 1337 can not be parsed into a valid bitmath object """ try: argvalue = bitmath.parse_string(bmstring) except ValueError: raise argparse.ArgumentTypeError("'%s' can not be parsed into a valid bitmath object" % bmstring) else: return argvalue
python
def BitmathType(bmstring): """An 'argument type' for integrations with the argparse module. For more information, see https://docs.python.org/2/library/argparse.html#type Of particular interest to us is this bit: ``type=`` can take any callable that takes a single string argument and returns the converted value I.e., ``type`` can be a function (such as this function) or a class which implements the ``__call__`` method. Example usage of the bitmath.BitmathType argparser type: >>> import bitmath >>> import argparse >>> parser = argparse.ArgumentParser() >>> parser.add_argument("--file-size", type=bitmath.BitmathType) >>> parser.parse_args("--file-size 1337MiB".split()) Namespace(file_size=MiB(1337.0)) Invalid usage includes any input that the bitmath.parse_string function already rejects. Additionally, **UNQUOTED** arguments with spaces in them are rejected (shlex.split used in the following examples to conserve single quotes in the parse_args call): >>> parser = argparse.ArgumentParser() >>> parser.add_argument("--file-size", type=bitmath.BitmathType) >>> import shlex >>> # The following is ACCEPTABLE USAGE: ... >>> parser.parse_args(shlex.split("--file-size '1337 MiB'")) Namespace(file_size=MiB(1337.0)) >>> # The following is INCORRECT USAGE because the string "1337 MiB" is not quoted! ... >>> parser.parse_args(shlex.split("--file-size 1337 MiB")) error: argument --file-size: 1337 can not be parsed into a valid bitmath object """ try: argvalue = bitmath.parse_string(bmstring) except ValueError: raise argparse.ArgumentTypeError("'%s' can not be parsed into a valid bitmath object" % bmstring) else: return argvalue
[ "def", "BitmathType", "(", "bmstring", ")", ":", "try", ":", "argvalue", "=", "bitmath", ".", "parse_string", "(", "bmstring", ")", "except", "ValueError", ":", "raise", "argparse", ".", "ArgumentTypeError", "(", "\"'%s' can not be parsed into a valid bitmath object\"", "%", "bmstring", ")", "else", ":", "return", "argvalue" ]
An 'argument type' for integrations with the argparse module. For more information, see https://docs.python.org/2/library/argparse.html#type Of particular interest to us is this bit: ``type=`` can take any callable that takes a single string argument and returns the converted value I.e., ``type`` can be a function (such as this function) or a class which implements the ``__call__`` method. Example usage of the bitmath.BitmathType argparser type: >>> import bitmath >>> import argparse >>> parser = argparse.ArgumentParser() >>> parser.add_argument("--file-size", type=bitmath.BitmathType) >>> parser.parse_args("--file-size 1337MiB".split()) Namespace(file_size=MiB(1337.0)) Invalid usage includes any input that the bitmath.parse_string function already rejects. Additionally, **UNQUOTED** arguments with spaces in them are rejected (shlex.split used in the following examples to conserve single quotes in the parse_args call): >>> parser = argparse.ArgumentParser() >>> parser.add_argument("--file-size", type=bitmath.BitmathType) >>> import shlex >>> # The following is ACCEPTABLE USAGE: ... >>> parser.parse_args(shlex.split("--file-size '1337 MiB'")) Namespace(file_size=MiB(1337.0)) >>> # The following is INCORRECT USAGE because the string "1337 MiB" is not quoted! ... >>> parser.parse_args(shlex.split("--file-size 1337 MiB")) error: argument --file-size: 1337 can not be parsed into a valid bitmath object
[ "An", "argument", "type", "for", "integrations", "with", "the", "argparse", "module", "." ]
train
https://github.com/tbielawa/bitmath/blob/58ad3ac5f076cc6e53f36a91af055c6028c850a5/bitmath/integrations.py#L33-L80
tbielawa/bitmath
bitmath/integrations.py
BitmathFileTransferSpeed.update
def update(self, pbar): """Updates the widget with the current NIST/SI speed. Basically, this calculates the average rate of update and figures out how to make a "pretty" prefix unit""" if pbar.seconds_elapsed < 2e-6 or pbar.currval < 2e-6: scaled = bitmath.Byte() else: speed = pbar.currval / pbar.seconds_elapsed scaled = bitmath.Byte(speed).best_prefix(system=self.system) return scaled.format(self.format)
python
def update(self, pbar): """Updates the widget with the current NIST/SI speed. Basically, this calculates the average rate of update and figures out how to make a "pretty" prefix unit""" if pbar.seconds_elapsed < 2e-6 or pbar.currval < 2e-6: scaled = bitmath.Byte() else: speed = pbar.currval / pbar.seconds_elapsed scaled = bitmath.Byte(speed).best_prefix(system=self.system) return scaled.format(self.format)
[ "def", "update", "(", "self", ",", "pbar", ")", ":", "if", "pbar", ".", "seconds_elapsed", "<", "2e-6", "or", "pbar", ".", "currval", "<", "2e-6", ":", "scaled", "=", "bitmath", ".", "Byte", "(", ")", "else", ":", "speed", "=", "pbar", ".", "currval", "/", "pbar", ".", "seconds_elapsed", "scaled", "=", "bitmath", ".", "Byte", "(", "speed", ")", ".", "best_prefix", "(", "system", "=", "self", ".", "system", ")", "return", "scaled", ".", "format", "(", "self", ".", "format", ")" ]
Updates the widget with the current NIST/SI speed. Basically, this calculates the average rate of update and figures out how to make a "pretty" prefix unit
[ "Updates", "the", "widget", "with", "the", "current", "NIST", "/", "SI", "speed", "." ]
train
https://github.com/tbielawa/bitmath/blob/58ad3ac5f076cc6e53f36a91af055c6028c850a5/bitmath/integrations.py#L92-L104
Julius2342/pyvlx
pyvlx/command_send.py
CommandSend.handle_frame
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if isinstance(frame, FrameCommandSendConfirmation) and frame.session_id == self.session_id: if frame.status == CommandSendConfirmationStatus.ACCEPTED: self.success = True return not self.wait_for_completion if isinstance(frame, FrameCommandRemainingTimeNotification) and frame.session_id == self.session_id: # Ignoring FrameCommandRemainingTimeNotification return False if isinstance(frame, FrameCommandRunStatusNotification) and frame.session_id == self.session_id: # At the moment I don't reall understand what the FrameCommandRunStatusNotification is good for. # Ignoring these packets for now return False if isinstance(frame, FrameSessionFinishedNotification) and frame.session_id == self.session_id: return True return False
python
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if isinstance(frame, FrameCommandSendConfirmation) and frame.session_id == self.session_id: if frame.status == CommandSendConfirmationStatus.ACCEPTED: self.success = True return not self.wait_for_completion if isinstance(frame, FrameCommandRemainingTimeNotification) and frame.session_id == self.session_id: # Ignoring FrameCommandRemainingTimeNotification return False if isinstance(frame, FrameCommandRunStatusNotification) and frame.session_id == self.session_id: # At the moment I don't reall understand what the FrameCommandRunStatusNotification is good for. # Ignoring these packets for now return False if isinstance(frame, FrameSessionFinishedNotification) and frame.session_id == self.session_id: return True return False
[ "async", "def", "handle_frame", "(", "self", ",", "frame", ")", ":", "if", "isinstance", "(", "frame", ",", "FrameCommandSendConfirmation", ")", "and", "frame", ".", "session_id", "==", "self", ".", "session_id", ":", "if", "frame", ".", "status", "==", "CommandSendConfirmationStatus", ".", "ACCEPTED", ":", "self", ".", "success", "=", "True", "return", "not", "self", ".", "wait_for_completion", "if", "isinstance", "(", "frame", ",", "FrameCommandRemainingTimeNotification", ")", "and", "frame", ".", "session_id", "==", "self", ".", "session_id", ":", "# Ignoring FrameCommandRemainingTimeNotification", "return", "False", "if", "isinstance", "(", "frame", ",", "FrameCommandRunStatusNotification", ")", "and", "frame", ".", "session_id", "==", "self", ".", "session_id", ":", "# At the moment I don't reall understand what the FrameCommandRunStatusNotification is good for.", "# Ignoring these packets for now", "return", "False", "if", "isinstance", "(", "frame", ",", "FrameSessionFinishedNotification", ")", "and", "frame", ".", "session_id", "==", "self", ".", "session_id", ":", "return", "True", "return", "False" ]
Handle incoming API frame, return True if this was the expected frame.
[ "Handle", "incoming", "API", "frame", "return", "True", "if", "this", "was", "the", "expected", "frame", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/command_send.py#L22-L37
Julius2342/pyvlx
pyvlx/command_send.py
CommandSend.request_frame
def request_frame(self): """Construct initiating frame.""" self.session_id = get_new_session_id() return FrameCommandSendRequest(node_ids=[self.node_id], parameter=self.parameter, session_id=self.session_id)
python
def request_frame(self): """Construct initiating frame.""" self.session_id = get_new_session_id() return FrameCommandSendRequest(node_ids=[self.node_id], parameter=self.parameter, session_id=self.session_id)
[ "def", "request_frame", "(", "self", ")", ":", "self", ".", "session_id", "=", "get_new_session_id", "(", ")", "return", "FrameCommandSendRequest", "(", "node_ids", "=", "[", "self", ".", "node_id", "]", ",", "parameter", "=", "self", ".", "parameter", ",", "session_id", "=", "self", ".", "session_id", ")" ]
Construct initiating frame.
[ "Construct", "initiating", "frame", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/command_send.py#L39-L42
Julius2342/pyvlx
pyvlx/config.py
Config.read_config
def read_config(self, path): """Read configuration file.""" PYVLXLOG.info('Reading config file: %s', path) try: with open(path, 'r') as filehandle: doc = yaml.safe_load(filehandle) self.test_configuration(doc, path) self.host = doc['config']['host'] self.password = doc['config']['password'] if 'port' in doc['config']: self.port = doc['config']['port'] except FileNotFoundError as ex: raise PyVLXException('file does not exist: {0}'.format(ex))
python
def read_config(self, path): """Read configuration file.""" PYVLXLOG.info('Reading config file: %s', path) try: with open(path, 'r') as filehandle: doc = yaml.safe_load(filehandle) self.test_configuration(doc, path) self.host = doc['config']['host'] self.password = doc['config']['password'] if 'port' in doc['config']: self.port = doc['config']['port'] except FileNotFoundError as ex: raise PyVLXException('file does not exist: {0}'.format(ex))
[ "def", "read_config", "(", "self", ",", "path", ")", ":", "PYVLXLOG", ".", "info", "(", "'Reading config file: %s'", ",", "path", ")", "try", ":", "with", "open", "(", "path", ",", "'r'", ")", "as", "filehandle", ":", "doc", "=", "yaml", ".", "safe_load", "(", "filehandle", ")", "self", ".", "test_configuration", "(", "doc", ",", "path", ")", "self", ".", "host", "=", "doc", "[", "'config'", "]", "[", "'host'", "]", "self", ".", "password", "=", "doc", "[", "'config'", "]", "[", "'password'", "]", "if", "'port'", "in", "doc", "[", "'config'", "]", ":", "self", ".", "port", "=", "doc", "[", "'config'", "]", "[", "'port'", "]", "except", "FileNotFoundError", "as", "ex", ":", "raise", "PyVLXException", "(", "'file does not exist: {0}'", ".", "format", "(", "ex", ")", ")" ]
Read configuration file.
[ "Read", "configuration", "file", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/config.py#L22-L34
pyQode/pyqode.qt
pyqode/qt/__init__.py
setup_apiv2
def setup_apiv2(): """ Setup apiv2 when using PyQt4 and Python2. """ # setup PyQt api to version 2 if sys.version_info[0] == 2: logging.getLogger(__name__).debug( 'setting up SIP API to version 2') import sip try: sip.setapi("QString", 2) sip.setapi("QVariant", 2) except ValueError: logging.getLogger(__name__).critical( "failed to set up sip api to version 2 for PyQt4") raise ImportError('PyQt4')
python
def setup_apiv2(): """ Setup apiv2 when using PyQt4 and Python2. """ # setup PyQt api to version 2 if sys.version_info[0] == 2: logging.getLogger(__name__).debug( 'setting up SIP API to version 2') import sip try: sip.setapi("QString", 2) sip.setapi("QVariant", 2) except ValueError: logging.getLogger(__name__).critical( "failed to set up sip api to version 2 for PyQt4") raise ImportError('PyQt4')
[ "def", "setup_apiv2", "(", ")", ":", "# setup PyQt api to version 2", "if", "sys", ".", "version_info", "[", "0", "]", "==", "2", ":", "logging", ".", "getLogger", "(", "__name__", ")", ".", "debug", "(", "'setting up SIP API to version 2'", ")", "import", "sip", "try", ":", "sip", ".", "setapi", "(", "\"QString\"", ",", "2", ")", "sip", ".", "setapi", "(", "\"QVariant\"", ",", "2", ")", "except", "ValueError", ":", "logging", ".", "getLogger", "(", "__name__", ")", ".", "critical", "(", "\"failed to set up sip api to version 2 for PyQt4\"", ")", "raise", "ImportError", "(", "'PyQt4'", ")" ]
Setup apiv2 when using PyQt4 and Python2.
[ "Setup", "apiv2", "when", "using", "PyQt4", "and", "Python2", "." ]
train
https://github.com/pyQode/pyqode.qt/blob/56ee08fdcd4d9c4441dcf85f89b51d4ae3a727bd/pyqode/qt/__init__.py#L79-L94
pyQode/pyqode.qt
pyqode/qt/__init__.py
autodetect
def autodetect(): """ Auto-detects and use the first available QT_API by importing them in the following order: 1) PyQt5 2) PyQt4 3) PySide """ logging.getLogger(__name__).debug('auto-detecting QT_API') try: logging.getLogger(__name__).debug('trying PyQt5') import PyQt5 os.environ[QT_API] = PYQT5_API[0] logging.getLogger(__name__).debug('imported PyQt5') except ImportError: try: logging.getLogger(__name__).debug('trying PyQt4') setup_apiv2() import PyQt4 os.environ[QT_API] = PYQT4_API[0] logging.getLogger(__name__).debug('imported PyQt4') except ImportError: try: logging.getLogger(__name__).debug('trying PySide') import PySide os.environ[QT_API] = PYSIDE_API[0] logging.getLogger(__name__).debug('imported PySide') except ImportError: raise PythonQtError('No Qt bindings could be found')
python
def autodetect(): """ Auto-detects and use the first available QT_API by importing them in the following order: 1) PyQt5 2) PyQt4 3) PySide """ logging.getLogger(__name__).debug('auto-detecting QT_API') try: logging.getLogger(__name__).debug('trying PyQt5') import PyQt5 os.environ[QT_API] = PYQT5_API[0] logging.getLogger(__name__).debug('imported PyQt5') except ImportError: try: logging.getLogger(__name__).debug('trying PyQt4') setup_apiv2() import PyQt4 os.environ[QT_API] = PYQT4_API[0] logging.getLogger(__name__).debug('imported PyQt4') except ImportError: try: logging.getLogger(__name__).debug('trying PySide') import PySide os.environ[QT_API] = PYSIDE_API[0] logging.getLogger(__name__).debug('imported PySide') except ImportError: raise PythonQtError('No Qt bindings could be found')
[ "def", "autodetect", "(", ")", ":", "logging", ".", "getLogger", "(", "__name__", ")", ".", "debug", "(", "'auto-detecting QT_API'", ")", "try", ":", "logging", ".", "getLogger", "(", "__name__", ")", ".", "debug", "(", "'trying PyQt5'", ")", "import", "PyQt5", "os", ".", "environ", "[", "QT_API", "]", "=", "PYQT5_API", "[", "0", "]", "logging", ".", "getLogger", "(", "__name__", ")", ".", "debug", "(", "'imported PyQt5'", ")", "except", "ImportError", ":", "try", ":", "logging", ".", "getLogger", "(", "__name__", ")", ".", "debug", "(", "'trying PyQt4'", ")", "setup_apiv2", "(", ")", "import", "PyQt4", "os", ".", "environ", "[", "QT_API", "]", "=", "PYQT4_API", "[", "0", "]", "logging", ".", "getLogger", "(", "__name__", ")", ".", "debug", "(", "'imported PyQt4'", ")", "except", "ImportError", ":", "try", ":", "logging", ".", "getLogger", "(", "__name__", ")", ".", "debug", "(", "'trying PySide'", ")", "import", "PySide", "os", ".", "environ", "[", "QT_API", "]", "=", "PYSIDE_API", "[", "0", "]", "logging", ".", "getLogger", "(", "__name__", ")", ".", "debug", "(", "'imported PySide'", ")", "except", "ImportError", ":", "raise", "PythonQtError", "(", "'No Qt bindings could be found'", ")" ]
Auto-detects and use the first available QT_API by importing them in the following order: 1) PyQt5 2) PyQt4 3) PySide
[ "Auto", "-", "detects", "and", "use", "the", "first", "available", "QT_API", "by", "importing", "them", "in", "the", "following", "order", ":" ]
train
https://github.com/pyQode/pyqode.qt/blob/56ee08fdcd4d9c4441dcf85f89b51d4ae3a727bd/pyqode/qt/__init__.py#L97-L126
Julius2342/pyvlx
old_api/pyvlx/rollershutter.py
RollerShutter.from_config
def from_config(cls, pyvlx, item): """Read roller shutter from config.""" name = item['name'] ident = item['id'] subtype = item['subtype'] typeid = item['typeId'] return cls(pyvlx, ident, name, subtype, typeid)
python
def from_config(cls, pyvlx, item): """Read roller shutter from config.""" name = item['name'] ident = item['id'] subtype = item['subtype'] typeid = item['typeId'] return cls(pyvlx, ident, name, subtype, typeid)
[ "def", "from_config", "(", "cls", ",", "pyvlx", ",", "item", ")", ":", "name", "=", "item", "[", "'name'", "]", "ident", "=", "item", "[", "'id'", "]", "subtype", "=", "item", "[", "'subtype'", "]", "typeid", "=", "item", "[", "'typeId'", "]", "return", "cls", "(", "pyvlx", ",", "ident", ",", "name", ",", "subtype", ",", "typeid", ")" ]
Read roller shutter from config.
[ "Read", "roller", "shutter", "from", "config", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/old_api/pyvlx/rollershutter.py#L19-L25
Julius2342/pyvlx
pyvlx/get_scene_list.py
GetSceneList.handle_frame
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if isinstance(frame, FrameGetSceneListConfirmation): self.count_scenes = frame.count_scenes if self.count_scenes == 0: self.success = True return True # We are still waiting for FrameGetSceneListNotification(s) return False if isinstance(frame, FrameGetSceneListNotification): self.scenes.extend(frame.scenes) if frame.remaining_scenes != 0: # We are still waiting for FrameGetSceneListConfirmation(s) return False if self.count_scenes != len(self.scenes): PYVLXLOG.warning("Warning: number of received scenes does not match expected number") self.success = True return True return False
python
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if isinstance(frame, FrameGetSceneListConfirmation): self.count_scenes = frame.count_scenes if self.count_scenes == 0: self.success = True return True # We are still waiting for FrameGetSceneListNotification(s) return False if isinstance(frame, FrameGetSceneListNotification): self.scenes.extend(frame.scenes) if frame.remaining_scenes != 0: # We are still waiting for FrameGetSceneListConfirmation(s) return False if self.count_scenes != len(self.scenes): PYVLXLOG.warning("Warning: number of received scenes does not match expected number") self.success = True return True return False
[ "async", "def", "handle_frame", "(", "self", ",", "frame", ")", ":", "if", "isinstance", "(", "frame", ",", "FrameGetSceneListConfirmation", ")", ":", "self", ".", "count_scenes", "=", "frame", ".", "count_scenes", "if", "self", ".", "count_scenes", "==", "0", ":", "self", ".", "success", "=", "True", "return", "True", "# We are still waiting for FrameGetSceneListNotification(s)", "return", "False", "if", "isinstance", "(", "frame", ",", "FrameGetSceneListNotification", ")", ":", "self", ".", "scenes", ".", "extend", "(", "frame", ".", "scenes", ")", "if", "frame", ".", "remaining_scenes", "!=", "0", ":", "# We are still waiting for FrameGetSceneListConfirmation(s)", "return", "False", "if", "self", ".", "count_scenes", "!=", "len", "(", "self", ".", "scenes", ")", ":", "PYVLXLOG", ".", "warning", "(", "\"Warning: number of received scenes does not match expected number\"", ")", "self", ".", "success", "=", "True", "return", "True", "return", "False" ]
Handle incoming API frame, return True if this was the expected frame.
[ "Handle", "incoming", "API", "frame", "return", "True", "if", "this", "was", "the", "expected", "frame", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/get_scene_list.py#L19-L37
Julius2342/pyvlx
pyvlx/frames/frame_get_scene_list.py
FrameGetSceneListNotification.get_payload
def get_payload(self): """Return Payload.""" ret = bytes([len(self.scenes)]) for number, name in self.scenes: ret += bytes([number]) ret += string_to_bytes(name, 64) ret += bytes([self.remaining_scenes]) return ret
python
def get_payload(self): """Return Payload.""" ret = bytes([len(self.scenes)]) for number, name in self.scenes: ret += bytes([number]) ret += string_to_bytes(name, 64) ret += bytes([self.remaining_scenes]) return ret
[ "def", "get_payload", "(", "self", ")", ":", "ret", "=", "bytes", "(", "[", "len", "(", "self", ".", "scenes", ")", "]", ")", "for", "number", ",", "name", "in", "self", ".", "scenes", ":", "ret", "+=", "bytes", "(", "[", "number", "]", ")", "ret", "+=", "string_to_bytes", "(", "name", ",", "64", ")", "ret", "+=", "bytes", "(", "[", "self", ".", "remaining_scenes", "]", ")", "return", "ret" ]
Return Payload.
[ "Return", "Payload", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/frames/frame_get_scene_list.py#L51-L58
Julius2342/pyvlx
pyvlx/frames/frame_get_scene_list.py
FrameGetSceneListNotification.from_payload
def from_payload(self, payload): """Init frame from binary data.""" number_of_objects = payload[0] self.remaining_scenes = payload[-1] predicted_len = number_of_objects * 65 + 2 if len(payload) != predicted_len: raise PyVLXException('scene_list_notification_wrong_length') self.scenes = [] for i in range(number_of_objects): scene = payload[(i*65+1):(i*65+66)] number = scene[0] name = bytes_to_string(scene[1:]) self.scenes.append((number, name))
python
def from_payload(self, payload): """Init frame from binary data.""" number_of_objects = payload[0] self.remaining_scenes = payload[-1] predicted_len = number_of_objects * 65 + 2 if len(payload) != predicted_len: raise PyVLXException('scene_list_notification_wrong_length') self.scenes = [] for i in range(number_of_objects): scene = payload[(i*65+1):(i*65+66)] number = scene[0] name = bytes_to_string(scene[1:]) self.scenes.append((number, name))
[ "def", "from_payload", "(", "self", ",", "payload", ")", ":", "number_of_objects", "=", "payload", "[", "0", "]", "self", ".", "remaining_scenes", "=", "payload", "[", "-", "1", "]", "predicted_len", "=", "number_of_objects", "*", "65", "+", "2", "if", "len", "(", "payload", ")", "!=", "predicted_len", ":", "raise", "PyVLXException", "(", "'scene_list_notification_wrong_length'", ")", "self", ".", "scenes", "=", "[", "]", "for", "i", "in", "range", "(", "number_of_objects", ")", ":", "scene", "=", "payload", "[", "(", "i", "*", "65", "+", "1", ")", ":", "(", "i", "*", "65", "+", "66", ")", "]", "number", "=", "scene", "[", "0", "]", "name", "=", "bytes_to_string", "(", "scene", "[", "1", ":", "]", ")", "self", ".", "scenes", ".", "append", "(", "(", "number", ",", "name", ")", ")" ]
Init frame from binary data.
[ "Init", "frame", "from", "binary", "data", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/frames/frame_get_scene_list.py#L60-L72
spacetelescope/synphot_refactor
synphot/specio.py
read_remote_spec
def read_remote_spec(filename, encoding='binary', cache=True, show_progress=True, **kwargs): """Read FITS or ASCII spectrum from a remote location. Parameters ---------- filename : str Spectrum filename. encoding, cache, show_progress See :func:`~astropy.utils.data.get_readable_fileobj`. kwargs : dict Keywords acceptable by :func:`read_fits_spec` (if FITS) or :func:`read_ascii_spec` (if ASCII). Returns ------- header : dict Metadata. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. """ with get_readable_fileobj(filename, encoding=encoding, cache=cache, show_progress=show_progress) as fd: header, wavelengths, fluxes = read_spec(fd, fname=filename, **kwargs) return header, wavelengths, fluxes
python
def read_remote_spec(filename, encoding='binary', cache=True, show_progress=True, **kwargs): """Read FITS or ASCII spectrum from a remote location. Parameters ---------- filename : str Spectrum filename. encoding, cache, show_progress See :func:`~astropy.utils.data.get_readable_fileobj`. kwargs : dict Keywords acceptable by :func:`read_fits_spec` (if FITS) or :func:`read_ascii_spec` (if ASCII). Returns ------- header : dict Metadata. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. """ with get_readable_fileobj(filename, encoding=encoding, cache=cache, show_progress=show_progress) as fd: header, wavelengths, fluxes = read_spec(fd, fname=filename, **kwargs) return header, wavelengths, fluxes
[ "def", "read_remote_spec", "(", "filename", ",", "encoding", "=", "'binary'", ",", "cache", "=", "True", ",", "show_progress", "=", "True", ",", "*", "*", "kwargs", ")", ":", "with", "get_readable_fileobj", "(", "filename", ",", "encoding", "=", "encoding", ",", "cache", "=", "cache", ",", "show_progress", "=", "show_progress", ")", "as", "fd", ":", "header", ",", "wavelengths", ",", "fluxes", "=", "read_spec", "(", "fd", ",", "fname", "=", "filename", ",", "*", "*", "kwargs", ")", "return", "header", ",", "wavelengths", ",", "fluxes" ]
Read FITS or ASCII spectrum from a remote location. Parameters ---------- filename : str Spectrum filename. encoding, cache, show_progress See :func:`~astropy.utils.data.get_readable_fileobj`. kwargs : dict Keywords acceptable by :func:`read_fits_spec` (if FITS) or :func:`read_ascii_spec` (if ASCII). Returns ------- header : dict Metadata. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum.
[ "Read", "FITS", "or", "ASCII", "spectrum", "from", "a", "remote", "location", "." ]
train
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/specio.py#L26-L55
spacetelescope/synphot_refactor
synphot/specio.py
read_spec
def read_spec(filename, fname='', **kwargs): """Read FITS or ASCII spectrum. Parameters ---------- filename : str or file pointer Spectrum file name or pointer. fname : str Filename. This is *only* used if ``filename`` is a pointer. kwargs : dict Keywords acceptable by :func:`read_fits_spec` (if FITS) or :func:`read_ascii_spec` (if ASCII). Returns ------- header : dict Metadata. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. Raises ------ synphot.exceptions.SynphotError Read failed. """ if isinstance(filename, str): fname = filename elif not fname: # pragma: no cover raise exceptions.SynphotError('Cannot determine filename.') if fname.endswith('fits') or fname.endswith('fit'): read_func = read_fits_spec else: read_func = read_ascii_spec return read_func(filename, **kwargs)
python
def read_spec(filename, fname='', **kwargs): """Read FITS or ASCII spectrum. Parameters ---------- filename : str or file pointer Spectrum file name or pointer. fname : str Filename. This is *only* used if ``filename`` is a pointer. kwargs : dict Keywords acceptable by :func:`read_fits_spec` (if FITS) or :func:`read_ascii_spec` (if ASCII). Returns ------- header : dict Metadata. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. Raises ------ synphot.exceptions.SynphotError Read failed. """ if isinstance(filename, str): fname = filename elif not fname: # pragma: no cover raise exceptions.SynphotError('Cannot determine filename.') if fname.endswith('fits') or fname.endswith('fit'): read_func = read_fits_spec else: read_func = read_ascii_spec return read_func(filename, **kwargs)
[ "def", "read_spec", "(", "filename", ",", "fname", "=", "''", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "filename", ",", "str", ")", ":", "fname", "=", "filename", "elif", "not", "fname", ":", "# pragma: no cover", "raise", "exceptions", ".", "SynphotError", "(", "'Cannot determine filename.'", ")", "if", "fname", ".", "endswith", "(", "'fits'", ")", "or", "fname", ".", "endswith", "(", "'fit'", ")", ":", "read_func", "=", "read_fits_spec", "else", ":", "read_func", "=", "read_ascii_spec", "return", "read_func", "(", "filename", ",", "*", "*", "kwargs", ")" ]
Read FITS or ASCII spectrum. Parameters ---------- filename : str or file pointer Spectrum file name or pointer. fname : str Filename. This is *only* used if ``filename`` is a pointer. kwargs : dict Keywords acceptable by :func:`read_fits_spec` (if FITS) or :func:`read_ascii_spec` (if ASCII). Returns ------- header : dict Metadata. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. Raises ------ synphot.exceptions.SynphotError Read failed.
[ "Read", "FITS", "or", "ASCII", "spectrum", "." ]
train
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/specio.py#L58-L97
spacetelescope/synphot_refactor
synphot/specio.py
read_ascii_spec
def read_ascii_spec(filename, wave_unit=u.AA, flux_unit=units.FLAM, **kwargs): """Read ASCII spectrum. ASCII table must have following columns: #. Wavelength data #. Flux data It can have more than 2 columns but the rest is ignored. Comments are discarded. Parameters ---------- filename : str or file pointer Spectrum file name or pointer. wave_unit, flux_unit : str or `~astropy.units.core.Unit` Wavelength and flux units, which default to Angstrom and FLAM, respectively. kwargs : dict Keywords accepted by :func:`astropy.io.ascii.ui.read`. Returns ------- header : dict This is just an empty dictionary, so returned values are the same as :func:`read_fits_spec`. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. They are set to 'float64' percision. """ header = {} dat = ascii.read(filename, **kwargs) wave_unit = units.validate_unit(wave_unit) flux_unit = units.validate_unit(flux_unit) wavelengths = dat.columns[0].data.astype(np.float64) * wave_unit fluxes = dat.columns[1].data.astype(np.float64) * flux_unit return header, wavelengths, fluxes
python
def read_ascii_spec(filename, wave_unit=u.AA, flux_unit=units.FLAM, **kwargs): """Read ASCII spectrum. ASCII table must have following columns: #. Wavelength data #. Flux data It can have more than 2 columns but the rest is ignored. Comments are discarded. Parameters ---------- filename : str or file pointer Spectrum file name or pointer. wave_unit, flux_unit : str or `~astropy.units.core.Unit` Wavelength and flux units, which default to Angstrom and FLAM, respectively. kwargs : dict Keywords accepted by :func:`astropy.io.ascii.ui.read`. Returns ------- header : dict This is just an empty dictionary, so returned values are the same as :func:`read_fits_spec`. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. They are set to 'float64' percision. """ header = {} dat = ascii.read(filename, **kwargs) wave_unit = units.validate_unit(wave_unit) flux_unit = units.validate_unit(flux_unit) wavelengths = dat.columns[0].data.astype(np.float64) * wave_unit fluxes = dat.columns[1].data.astype(np.float64) * flux_unit return header, wavelengths, fluxes
[ "def", "read_ascii_spec", "(", "filename", ",", "wave_unit", "=", "u", ".", "AA", ",", "flux_unit", "=", "units", ".", "FLAM", ",", "*", "*", "kwargs", ")", ":", "header", "=", "{", "}", "dat", "=", "ascii", ".", "read", "(", "filename", ",", "*", "*", "kwargs", ")", "wave_unit", "=", "units", ".", "validate_unit", "(", "wave_unit", ")", "flux_unit", "=", "units", ".", "validate_unit", "(", "flux_unit", ")", "wavelengths", "=", "dat", ".", "columns", "[", "0", "]", ".", "data", ".", "astype", "(", "np", ".", "float64", ")", "*", "wave_unit", "fluxes", "=", "dat", ".", "columns", "[", "1", "]", ".", "data", ".", "astype", "(", "np", ".", "float64", ")", "*", "flux_unit", "return", "header", ",", "wavelengths", ",", "fluxes" ]
Read ASCII spectrum. ASCII table must have following columns: #. Wavelength data #. Flux data It can have more than 2 columns but the rest is ignored. Comments are discarded. Parameters ---------- filename : str or file pointer Spectrum file name or pointer. wave_unit, flux_unit : str or `~astropy.units.core.Unit` Wavelength and flux units, which default to Angstrom and FLAM, respectively. kwargs : dict Keywords accepted by :func:`astropy.io.ascii.ui.read`. Returns ------- header : dict This is just an empty dictionary, so returned values are the same as :func:`read_fits_spec`. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. They are set to 'float64' percision.
[ "Read", "ASCII", "spectrum", "." ]
train
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/specio.py#L100-L144
spacetelescope/synphot_refactor
synphot/specio.py
read_fits_spec
def read_fits_spec(filename, ext=1, wave_col='WAVELENGTH', flux_col='FLUX', wave_unit=u.AA, flux_unit=units.FLAM): """Read FITS spectrum. Wavelength and flux units are extracted from ``TUNIT1`` and ``TUNIT2`` keywords, respectively, from data table (not primary) header. If these keywords are not present, units are taken from ``wave_unit`` and ``flux_unit`` instead. Parameters ---------- filename : str or file pointer Spectrum file name or pointer. ext: int FITS extension with table data. Default is 1. wave_col, flux_col : str Wavelength and flux column names (case-insensitive). wave_unit, flux_unit : str or `~astropy.units.core.Unit` Wavelength and flux units, which default to Angstrom and FLAM, respectively. These are *only* used if ``TUNIT1`` and ``TUNIT2`` keywords are not present in table (not primary) header. Returns ------- header : dict Primary header only. Extension header is discarded. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. """ fs = fits.open(filename) header = dict(fs[str('PRIMARY')].header) wave_dat = fs[ext].data.field(wave_col).copy() flux_dat = fs[ext].data.field(flux_col).copy() fits_wave_unit = fs[ext].header.get('TUNIT1') fits_flux_unit = fs[ext].header.get('TUNIT2') if fits_wave_unit is not None: try: wave_unit = units.validate_unit(fits_wave_unit) except (exceptions.SynphotError, ValueError) as e: # pragma: no cover warnings.warn( '{0} from FITS header is not valid wavelength unit, using ' '{1}: {2}'.format(fits_wave_unit, wave_unit, e), AstropyUserWarning) if fits_flux_unit is not None: try: flux_unit = units.validate_unit(fits_flux_unit) except (exceptions.SynphotError, ValueError) as e: # pragma: no cover warnings.warn( '{0} from FITS header is not valid flux unit, using ' '{1}: {2}'.format(fits_flux_unit, flux_unit, e), AstropyUserWarning) wave_unit = units.validate_unit(wave_unit) flux_unit = units.validate_unit(flux_unit) wavelengths = wave_dat * wave_unit fluxes = flux_dat * flux_unit if isinstance(filename, str): fs.close() return header, wavelengths, fluxes
python
def read_fits_spec(filename, ext=1, wave_col='WAVELENGTH', flux_col='FLUX', wave_unit=u.AA, flux_unit=units.FLAM): """Read FITS spectrum. Wavelength and flux units are extracted from ``TUNIT1`` and ``TUNIT2`` keywords, respectively, from data table (not primary) header. If these keywords are not present, units are taken from ``wave_unit`` and ``flux_unit`` instead. Parameters ---------- filename : str or file pointer Spectrum file name or pointer. ext: int FITS extension with table data. Default is 1. wave_col, flux_col : str Wavelength and flux column names (case-insensitive). wave_unit, flux_unit : str or `~astropy.units.core.Unit` Wavelength and flux units, which default to Angstrom and FLAM, respectively. These are *only* used if ``TUNIT1`` and ``TUNIT2`` keywords are not present in table (not primary) header. Returns ------- header : dict Primary header only. Extension header is discarded. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. """ fs = fits.open(filename) header = dict(fs[str('PRIMARY')].header) wave_dat = fs[ext].data.field(wave_col).copy() flux_dat = fs[ext].data.field(flux_col).copy() fits_wave_unit = fs[ext].header.get('TUNIT1') fits_flux_unit = fs[ext].header.get('TUNIT2') if fits_wave_unit is not None: try: wave_unit = units.validate_unit(fits_wave_unit) except (exceptions.SynphotError, ValueError) as e: # pragma: no cover warnings.warn( '{0} from FITS header is not valid wavelength unit, using ' '{1}: {2}'.format(fits_wave_unit, wave_unit, e), AstropyUserWarning) if fits_flux_unit is not None: try: flux_unit = units.validate_unit(fits_flux_unit) except (exceptions.SynphotError, ValueError) as e: # pragma: no cover warnings.warn( '{0} from FITS header is not valid flux unit, using ' '{1}: {2}'.format(fits_flux_unit, flux_unit, e), AstropyUserWarning) wave_unit = units.validate_unit(wave_unit) flux_unit = units.validate_unit(flux_unit) wavelengths = wave_dat * wave_unit fluxes = flux_dat * flux_unit if isinstance(filename, str): fs.close() return header, wavelengths, fluxes
[ "def", "read_fits_spec", "(", "filename", ",", "ext", "=", "1", ",", "wave_col", "=", "'WAVELENGTH'", ",", "flux_col", "=", "'FLUX'", ",", "wave_unit", "=", "u", ".", "AA", ",", "flux_unit", "=", "units", ".", "FLAM", ")", ":", "fs", "=", "fits", ".", "open", "(", "filename", ")", "header", "=", "dict", "(", "fs", "[", "str", "(", "'PRIMARY'", ")", "]", ".", "header", ")", "wave_dat", "=", "fs", "[", "ext", "]", ".", "data", ".", "field", "(", "wave_col", ")", ".", "copy", "(", ")", "flux_dat", "=", "fs", "[", "ext", "]", ".", "data", ".", "field", "(", "flux_col", ")", ".", "copy", "(", ")", "fits_wave_unit", "=", "fs", "[", "ext", "]", ".", "header", ".", "get", "(", "'TUNIT1'", ")", "fits_flux_unit", "=", "fs", "[", "ext", "]", ".", "header", ".", "get", "(", "'TUNIT2'", ")", "if", "fits_wave_unit", "is", "not", "None", ":", "try", ":", "wave_unit", "=", "units", ".", "validate_unit", "(", "fits_wave_unit", ")", "except", "(", "exceptions", ".", "SynphotError", ",", "ValueError", ")", "as", "e", ":", "# pragma: no cover", "warnings", ".", "warn", "(", "'{0} from FITS header is not valid wavelength unit, using '", "'{1}: {2}'", ".", "format", "(", "fits_wave_unit", ",", "wave_unit", ",", "e", ")", ",", "AstropyUserWarning", ")", "if", "fits_flux_unit", "is", "not", "None", ":", "try", ":", "flux_unit", "=", "units", ".", "validate_unit", "(", "fits_flux_unit", ")", "except", "(", "exceptions", ".", "SynphotError", ",", "ValueError", ")", "as", "e", ":", "# pragma: no cover", "warnings", ".", "warn", "(", "'{0} from FITS header is not valid flux unit, using '", "'{1}: {2}'", ".", "format", "(", "fits_flux_unit", ",", "flux_unit", ",", "e", ")", ",", "AstropyUserWarning", ")", "wave_unit", "=", "units", ".", "validate_unit", "(", "wave_unit", ")", "flux_unit", "=", "units", ".", "validate_unit", "(", "flux_unit", ")", "wavelengths", "=", "wave_dat", "*", "wave_unit", "fluxes", "=", "flux_dat", "*", "flux_unit", "if", "isinstance", "(", "filename", ",", "str", ")", ":", "fs", ".", "close", "(", ")", "return", "header", ",", "wavelengths", ",", "fluxes" ]
Read FITS spectrum. Wavelength and flux units are extracted from ``TUNIT1`` and ``TUNIT2`` keywords, respectively, from data table (not primary) header. If these keywords are not present, units are taken from ``wave_unit`` and ``flux_unit`` instead. Parameters ---------- filename : str or file pointer Spectrum file name or pointer. ext: int FITS extension with table data. Default is 1. wave_col, flux_col : str Wavelength and flux column names (case-insensitive). wave_unit, flux_unit : str or `~astropy.units.core.Unit` Wavelength and flux units, which default to Angstrom and FLAM, respectively. These are *only* used if ``TUNIT1`` and ``TUNIT2`` keywords are not present in table (not primary) header. Returns ------- header : dict Primary header only. Extension header is discarded. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum.
[ "Read", "FITS", "spectrum", "." ]
train
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/specio.py#L147-L215
spacetelescope/synphot_refactor
synphot/specio.py
write_fits_spec
def write_fits_spec(filename, wavelengths, fluxes, pri_header={}, ext_header={}, overwrite=False, trim_zero=True, pad_zero_ends=True, precision=None, epsilon=0.00032, wave_col='WAVELENGTH', flux_col='FLUX', wave_unit=u.AA, flux_unit=units.FLAM): """Write FITS spectrum. .. warning:: If data is being written out as single-precision but wavelengths are in double-precision, some rows may be omitted. Parameters ---------- filename : str Output spectrum filename. wavelengths, fluxes : array-like or `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. pri_header, ext_header : dict Metadata to be added to primary and given extension FITS header, respectively. Do *not* use this to define column names and units. overwrite : bool Overwrite existing file. Defaults to `False`. trim_zero : bool Remove rows with zero-flux. Default is `True`. pad_zero_ends : bool Pad each end of the spectrum with a row of zero flux like :func:`synphot.spectrum.BaseSpectrum.taper`. This is unnecessary if input is already tapered. precision : {`None`, 'single', 'double'} Precision of values in output file. Use native flux precision by default. epsilon : float Single-precision :math:`\\epsilon` value, taken from IRAF SYNPHOT FAQ. This is the minimum separation in wavelengths necessary for SYNPHOT to read the entries as distinct single-precision numbers. This is *only* used if ``precision='single'`` but data are in double-precision. Default from the FAQ is 0.00032. wave_col, flux_col : str Wavelength and flux column names (case-insensitive). wave_unit, flux_unit : str or `~astropy.units.core.Unit` Wavelength and flux units, which default to Angstrom and FLAM, respectively. These are *only* used if wavelengths and fluxes are not in astropy quantities. Raises ------ synphot.exceptions.SynphotError Wavelengths and fluxes have difference shapes or value precision is not supported. """ if isinstance(wavelengths, u.Quantity): wave_unit = wavelengths.unit wave_value = wavelengths.value else: wave_value = wavelengths if isinstance(fluxes, u.Quantity): flux_unit = fluxes.unit flux_value = fluxes.value else: flux_value = fluxes wave_unit = units.validate_unit(wave_unit).to_string().upper() flux_unit = units.validate_unit(flux_unit).to_string().upper() if wave_value.shape != flux_value.shape: raise exceptions.SynphotError( 'Wavelengths have shape {0} but fluxes have shape {1}'.format( wave_value.shape, flux_value.shape)) # Remove rows with zero flux. Putting this before precision logic to avoid # keeping duplicate wavelengths with zero flux. if trim_zero: idx = np.where(flux_value != 0) wave_value = wave_value[idx] flux_value = flux_value[idx] n_thrown = wave_value.size - len(idx[0]) if n_thrown != 0: log.info('{0} zero-flux rows are thrown out'.format(n_thrown)) # Only these Numpy types are supported # 'f' np.float32 # 'd' np.float64 pcodes = {'d': 'D', 'f': 'E'} # Numpy to FITS conversion # Use native flux precision if precision is None: precision = flux_value.dtype.char if precision not in pcodes: raise exceptions.SynphotError('flux is not float32 or float64') # Use user specified precision else: precision = precision.lower() if precision == 'single': precision = 'f' elif precision == 'double': precision = 'd' else: raise exceptions.SynphotError( 'precision must be single or double') # Now check wavelength precision wave_precision = wave_value.dtype.char if wave_precision not in pcodes: raise exceptions.SynphotError( 'wavelength is not float32 or float64') # If wavelength is double-precision but data is written out as # single-precision, wavelength values have to be recalculated # so that they will still be sorted with no duplicates. if wave_precision == 'd' and precision == 'f': orig_size = wave_value.size idx = np.where(np.abs(wave_value[1:] - wave_value[:-1]) > epsilon) wave_value = np.append(wave_value[idx], wave_value[-1]) flux_value = np.append(flux_value[idx], flux_value[-1]) n_thrown = orig_size - wave_value.size if n_thrown != 0: warnings.warn( '{0} rows are thrown out in converting wavelengths from ' 'double- to single-precision'.format(n_thrown), AstropyUserWarning) # Keep one zero at each end if pad_zero_ends: w1 = wave_value[0] ** 2 / wave_value[1] w2 = wave_value[-1] ** 2 / wave_value[-2] wave_value = np.insert(wave_value, [0, wave_value.size], [w1, w2]) flux_value = np.insert(flux_value, [0, flux_value.size], [0.0, 0.0]) # Construct the columns cw = fits.Column(name=wave_col, array=wave_value, unit=wave_unit, format=pcodes[precision]) cf = fits.Column(name=flux_col, array=flux_value, unit=flux_unit, format=pcodes[precision]) # These are written to the primary header: # 1. Filename # 2. Origin # 3. User dictionary (can overwrite defaults) hdr_hdu = fits.PrimaryHDU() hdr_hdu.header['filename'] = (os.path.basename(filename), 'name of file') hdr_hdu.header['origin'] = ('synphot', 'Version {0}'.format(__version__)) for key, val in pri_header.items(): hdr_hdu.header[key] = val # Make the extension HDU and include user dictionary in extension header. tab_hdu = fits.BinTableHDU.from_columns(fits.ColDefs([cw, cf])) for key, val in ext_header.items(): tab_hdu.header[key] = val # Write to file hdulist = fits.HDUList([hdr_hdu]) hdulist.append(tab_hdu) hdulist.writeto(filename, overwrite=overwrite)
python
def write_fits_spec(filename, wavelengths, fluxes, pri_header={}, ext_header={}, overwrite=False, trim_zero=True, pad_zero_ends=True, precision=None, epsilon=0.00032, wave_col='WAVELENGTH', flux_col='FLUX', wave_unit=u.AA, flux_unit=units.FLAM): """Write FITS spectrum. .. warning:: If data is being written out as single-precision but wavelengths are in double-precision, some rows may be omitted. Parameters ---------- filename : str Output spectrum filename. wavelengths, fluxes : array-like or `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. pri_header, ext_header : dict Metadata to be added to primary and given extension FITS header, respectively. Do *not* use this to define column names and units. overwrite : bool Overwrite existing file. Defaults to `False`. trim_zero : bool Remove rows with zero-flux. Default is `True`. pad_zero_ends : bool Pad each end of the spectrum with a row of zero flux like :func:`synphot.spectrum.BaseSpectrum.taper`. This is unnecessary if input is already tapered. precision : {`None`, 'single', 'double'} Precision of values in output file. Use native flux precision by default. epsilon : float Single-precision :math:`\\epsilon` value, taken from IRAF SYNPHOT FAQ. This is the minimum separation in wavelengths necessary for SYNPHOT to read the entries as distinct single-precision numbers. This is *only* used if ``precision='single'`` but data are in double-precision. Default from the FAQ is 0.00032. wave_col, flux_col : str Wavelength and flux column names (case-insensitive). wave_unit, flux_unit : str or `~astropy.units.core.Unit` Wavelength and flux units, which default to Angstrom and FLAM, respectively. These are *only* used if wavelengths and fluxes are not in astropy quantities. Raises ------ synphot.exceptions.SynphotError Wavelengths and fluxes have difference shapes or value precision is not supported. """ if isinstance(wavelengths, u.Quantity): wave_unit = wavelengths.unit wave_value = wavelengths.value else: wave_value = wavelengths if isinstance(fluxes, u.Quantity): flux_unit = fluxes.unit flux_value = fluxes.value else: flux_value = fluxes wave_unit = units.validate_unit(wave_unit).to_string().upper() flux_unit = units.validate_unit(flux_unit).to_string().upper() if wave_value.shape != flux_value.shape: raise exceptions.SynphotError( 'Wavelengths have shape {0} but fluxes have shape {1}'.format( wave_value.shape, flux_value.shape)) # Remove rows with zero flux. Putting this before precision logic to avoid # keeping duplicate wavelengths with zero flux. if trim_zero: idx = np.where(flux_value != 0) wave_value = wave_value[idx] flux_value = flux_value[idx] n_thrown = wave_value.size - len(idx[0]) if n_thrown != 0: log.info('{0} zero-flux rows are thrown out'.format(n_thrown)) # Only these Numpy types are supported # 'f' np.float32 # 'd' np.float64 pcodes = {'d': 'D', 'f': 'E'} # Numpy to FITS conversion # Use native flux precision if precision is None: precision = flux_value.dtype.char if precision not in pcodes: raise exceptions.SynphotError('flux is not float32 or float64') # Use user specified precision else: precision = precision.lower() if precision == 'single': precision = 'f' elif precision == 'double': precision = 'd' else: raise exceptions.SynphotError( 'precision must be single or double') # Now check wavelength precision wave_precision = wave_value.dtype.char if wave_precision not in pcodes: raise exceptions.SynphotError( 'wavelength is not float32 or float64') # If wavelength is double-precision but data is written out as # single-precision, wavelength values have to be recalculated # so that they will still be sorted with no duplicates. if wave_precision == 'd' and precision == 'f': orig_size = wave_value.size idx = np.where(np.abs(wave_value[1:] - wave_value[:-1]) > epsilon) wave_value = np.append(wave_value[idx], wave_value[-1]) flux_value = np.append(flux_value[idx], flux_value[-1]) n_thrown = orig_size - wave_value.size if n_thrown != 0: warnings.warn( '{0} rows are thrown out in converting wavelengths from ' 'double- to single-precision'.format(n_thrown), AstropyUserWarning) # Keep one zero at each end if pad_zero_ends: w1 = wave_value[0] ** 2 / wave_value[1] w2 = wave_value[-1] ** 2 / wave_value[-2] wave_value = np.insert(wave_value, [0, wave_value.size], [w1, w2]) flux_value = np.insert(flux_value, [0, flux_value.size], [0.0, 0.0]) # Construct the columns cw = fits.Column(name=wave_col, array=wave_value, unit=wave_unit, format=pcodes[precision]) cf = fits.Column(name=flux_col, array=flux_value, unit=flux_unit, format=pcodes[precision]) # These are written to the primary header: # 1. Filename # 2. Origin # 3. User dictionary (can overwrite defaults) hdr_hdu = fits.PrimaryHDU() hdr_hdu.header['filename'] = (os.path.basename(filename), 'name of file') hdr_hdu.header['origin'] = ('synphot', 'Version {0}'.format(__version__)) for key, val in pri_header.items(): hdr_hdu.header[key] = val # Make the extension HDU and include user dictionary in extension header. tab_hdu = fits.BinTableHDU.from_columns(fits.ColDefs([cw, cf])) for key, val in ext_header.items(): tab_hdu.header[key] = val # Write to file hdulist = fits.HDUList([hdr_hdu]) hdulist.append(tab_hdu) hdulist.writeto(filename, overwrite=overwrite)
[ "def", "write_fits_spec", "(", "filename", ",", "wavelengths", ",", "fluxes", ",", "pri_header", "=", "{", "}", ",", "ext_header", "=", "{", "}", ",", "overwrite", "=", "False", ",", "trim_zero", "=", "True", ",", "pad_zero_ends", "=", "True", ",", "precision", "=", "None", ",", "epsilon", "=", "0.00032", ",", "wave_col", "=", "'WAVELENGTH'", ",", "flux_col", "=", "'FLUX'", ",", "wave_unit", "=", "u", ".", "AA", ",", "flux_unit", "=", "units", ".", "FLAM", ")", ":", "if", "isinstance", "(", "wavelengths", ",", "u", ".", "Quantity", ")", ":", "wave_unit", "=", "wavelengths", ".", "unit", "wave_value", "=", "wavelengths", ".", "value", "else", ":", "wave_value", "=", "wavelengths", "if", "isinstance", "(", "fluxes", ",", "u", ".", "Quantity", ")", ":", "flux_unit", "=", "fluxes", ".", "unit", "flux_value", "=", "fluxes", ".", "value", "else", ":", "flux_value", "=", "fluxes", "wave_unit", "=", "units", ".", "validate_unit", "(", "wave_unit", ")", ".", "to_string", "(", ")", ".", "upper", "(", ")", "flux_unit", "=", "units", ".", "validate_unit", "(", "flux_unit", ")", ".", "to_string", "(", ")", ".", "upper", "(", ")", "if", "wave_value", ".", "shape", "!=", "flux_value", ".", "shape", ":", "raise", "exceptions", ".", "SynphotError", "(", "'Wavelengths have shape {0} but fluxes have shape {1}'", ".", "format", "(", "wave_value", ".", "shape", ",", "flux_value", ".", "shape", ")", ")", "# Remove rows with zero flux. Putting this before precision logic to avoid", "# keeping duplicate wavelengths with zero flux.", "if", "trim_zero", ":", "idx", "=", "np", ".", "where", "(", "flux_value", "!=", "0", ")", "wave_value", "=", "wave_value", "[", "idx", "]", "flux_value", "=", "flux_value", "[", "idx", "]", "n_thrown", "=", "wave_value", ".", "size", "-", "len", "(", "idx", "[", "0", "]", ")", "if", "n_thrown", "!=", "0", ":", "log", ".", "info", "(", "'{0} zero-flux rows are thrown out'", ".", "format", "(", "n_thrown", ")", ")", "# Only these Numpy types are supported", "# 'f' np.float32", "# 'd' np.float64", "pcodes", "=", "{", "'d'", ":", "'D'", ",", "'f'", ":", "'E'", "}", "# Numpy to FITS conversion", "# Use native flux precision", "if", "precision", "is", "None", ":", "precision", "=", "flux_value", ".", "dtype", ".", "char", "if", "precision", "not", "in", "pcodes", ":", "raise", "exceptions", ".", "SynphotError", "(", "'flux is not float32 or float64'", ")", "# Use user specified precision", "else", ":", "precision", "=", "precision", ".", "lower", "(", ")", "if", "precision", "==", "'single'", ":", "precision", "=", "'f'", "elif", "precision", "==", "'double'", ":", "precision", "=", "'d'", "else", ":", "raise", "exceptions", ".", "SynphotError", "(", "'precision must be single or double'", ")", "# Now check wavelength precision", "wave_precision", "=", "wave_value", ".", "dtype", ".", "char", "if", "wave_precision", "not", "in", "pcodes", ":", "raise", "exceptions", ".", "SynphotError", "(", "'wavelength is not float32 or float64'", ")", "# If wavelength is double-precision but data is written out as", "# single-precision, wavelength values have to be recalculated", "# so that they will still be sorted with no duplicates.", "if", "wave_precision", "==", "'d'", "and", "precision", "==", "'f'", ":", "orig_size", "=", "wave_value", ".", "size", "idx", "=", "np", ".", "where", "(", "np", ".", "abs", "(", "wave_value", "[", "1", ":", "]", "-", "wave_value", "[", ":", "-", "1", "]", ")", ">", "epsilon", ")", "wave_value", "=", "np", ".", "append", "(", "wave_value", "[", "idx", "]", ",", "wave_value", "[", "-", "1", "]", ")", "flux_value", "=", "np", ".", "append", "(", "flux_value", "[", "idx", "]", ",", "flux_value", "[", "-", "1", "]", ")", "n_thrown", "=", "orig_size", "-", "wave_value", ".", "size", "if", "n_thrown", "!=", "0", ":", "warnings", ".", "warn", "(", "'{0} rows are thrown out in converting wavelengths from '", "'double- to single-precision'", ".", "format", "(", "n_thrown", ")", ",", "AstropyUserWarning", ")", "# Keep one zero at each end", "if", "pad_zero_ends", ":", "w1", "=", "wave_value", "[", "0", "]", "**", "2", "/", "wave_value", "[", "1", "]", "w2", "=", "wave_value", "[", "-", "1", "]", "**", "2", "/", "wave_value", "[", "-", "2", "]", "wave_value", "=", "np", ".", "insert", "(", "wave_value", ",", "[", "0", ",", "wave_value", ".", "size", "]", ",", "[", "w1", ",", "w2", "]", ")", "flux_value", "=", "np", ".", "insert", "(", "flux_value", ",", "[", "0", ",", "flux_value", ".", "size", "]", ",", "[", "0.0", ",", "0.0", "]", ")", "# Construct the columns", "cw", "=", "fits", ".", "Column", "(", "name", "=", "wave_col", ",", "array", "=", "wave_value", ",", "unit", "=", "wave_unit", ",", "format", "=", "pcodes", "[", "precision", "]", ")", "cf", "=", "fits", ".", "Column", "(", "name", "=", "flux_col", ",", "array", "=", "flux_value", ",", "unit", "=", "flux_unit", ",", "format", "=", "pcodes", "[", "precision", "]", ")", "# These are written to the primary header:", "# 1. Filename", "# 2. Origin", "# 3. User dictionary (can overwrite defaults)", "hdr_hdu", "=", "fits", ".", "PrimaryHDU", "(", ")", "hdr_hdu", ".", "header", "[", "'filename'", "]", "=", "(", "os", ".", "path", ".", "basename", "(", "filename", ")", ",", "'name of file'", ")", "hdr_hdu", ".", "header", "[", "'origin'", "]", "=", "(", "'synphot'", ",", "'Version {0}'", ".", "format", "(", "__version__", ")", ")", "for", "key", ",", "val", "in", "pri_header", ".", "items", "(", ")", ":", "hdr_hdu", ".", "header", "[", "key", "]", "=", "val", "# Make the extension HDU and include user dictionary in extension header.", "tab_hdu", "=", "fits", ".", "BinTableHDU", ".", "from_columns", "(", "fits", ".", "ColDefs", "(", "[", "cw", ",", "cf", "]", ")", ")", "for", "key", ",", "val", "in", "ext_header", ".", "items", "(", ")", ":", "tab_hdu", ".", "header", "[", "key", "]", "=", "val", "# Write to file", "hdulist", "=", "fits", ".", "HDUList", "(", "[", "hdr_hdu", "]", ")", "hdulist", ".", "append", "(", "tab_hdu", ")", "hdulist", ".", "writeto", "(", "filename", ",", "overwrite", "=", "overwrite", ")" ]
Write FITS spectrum. .. warning:: If data is being written out as single-precision but wavelengths are in double-precision, some rows may be omitted. Parameters ---------- filename : str Output spectrum filename. wavelengths, fluxes : array-like or `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. pri_header, ext_header : dict Metadata to be added to primary and given extension FITS header, respectively. Do *not* use this to define column names and units. overwrite : bool Overwrite existing file. Defaults to `False`. trim_zero : bool Remove rows with zero-flux. Default is `True`. pad_zero_ends : bool Pad each end of the spectrum with a row of zero flux like :func:`synphot.spectrum.BaseSpectrum.taper`. This is unnecessary if input is already tapered. precision : {`None`, 'single', 'double'} Precision of values in output file. Use native flux precision by default. epsilon : float Single-precision :math:`\\epsilon` value, taken from IRAF SYNPHOT FAQ. This is the minimum separation in wavelengths necessary for SYNPHOT to read the entries as distinct single-precision numbers. This is *only* used if ``precision='single'`` but data are in double-precision. Default from the FAQ is 0.00032. wave_col, flux_col : str Wavelength and flux column names (case-insensitive). wave_unit, flux_unit : str or `~astropy.units.core.Unit` Wavelength and flux units, which default to Angstrom and FLAM, respectively. These are *only* used if wavelengths and fluxes are not in astropy quantities. Raises ------ synphot.exceptions.SynphotError Wavelengths and fluxes have difference shapes or value precision is not supported.
[ "Write", "FITS", "spectrum", "." ]
train
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/specio.py#L218-L384
spacetelescope/synphot_refactor
synphot/units.py
spectral_density_vega
def spectral_density_vega(wav, vegaflux): """Flux equivalencies between PHOTLAM and VEGAMAG. Parameters ---------- wav : `~astropy.units.quantity.Quantity` Quantity associated with values being converted (e.g., wavelength or frequency). vegaflux : `~astropy.units.quantity.Quantity` Flux of Vega at ``wav``. Returns ------- eqv : list List of equivalencies. """ vega_photlam = vegaflux.to( PHOTLAM, equivalencies=u.spectral_density(wav)).value def converter(x): """Set nan/inf to -99 mag.""" val = -2.5 * np.log10(x / vega_photlam) result = np.zeros(val.shape, dtype=np.float64) - 99 mask = np.isfinite(val) if result.ndim > 0: result[mask] = val[mask] elif mask: result = np.asarray(val) return result def iconverter(x): return vega_photlam * 10**(-0.4 * x) return [(PHOTLAM, VEGAMAG, converter, iconverter)]
python
def spectral_density_vega(wav, vegaflux): """Flux equivalencies between PHOTLAM and VEGAMAG. Parameters ---------- wav : `~astropy.units.quantity.Quantity` Quantity associated with values being converted (e.g., wavelength or frequency). vegaflux : `~astropy.units.quantity.Quantity` Flux of Vega at ``wav``. Returns ------- eqv : list List of equivalencies. """ vega_photlam = vegaflux.to( PHOTLAM, equivalencies=u.spectral_density(wav)).value def converter(x): """Set nan/inf to -99 mag.""" val = -2.5 * np.log10(x / vega_photlam) result = np.zeros(val.shape, dtype=np.float64) - 99 mask = np.isfinite(val) if result.ndim > 0: result[mask] = val[mask] elif mask: result = np.asarray(val) return result def iconverter(x): return vega_photlam * 10**(-0.4 * x) return [(PHOTLAM, VEGAMAG, converter, iconverter)]
[ "def", "spectral_density_vega", "(", "wav", ",", "vegaflux", ")", ":", "vega_photlam", "=", "vegaflux", ".", "to", "(", "PHOTLAM", ",", "equivalencies", "=", "u", ".", "spectral_density", "(", "wav", ")", ")", ".", "value", "def", "converter", "(", "x", ")", ":", "\"\"\"Set nan/inf to -99 mag.\"\"\"", "val", "=", "-", "2.5", "*", "np", ".", "log10", "(", "x", "/", "vega_photlam", ")", "result", "=", "np", ".", "zeros", "(", "val", ".", "shape", ",", "dtype", "=", "np", ".", "float64", ")", "-", "99", "mask", "=", "np", ".", "isfinite", "(", "val", ")", "if", "result", ".", "ndim", ">", "0", ":", "result", "[", "mask", "]", "=", "val", "[", "mask", "]", "elif", "mask", ":", "result", "=", "np", ".", "asarray", "(", "val", ")", "return", "result", "def", "iconverter", "(", "x", ")", ":", "return", "vega_photlam", "*", "10", "**", "(", "-", "0.4", "*", "x", ")", "return", "[", "(", "PHOTLAM", ",", "VEGAMAG", ",", "converter", ",", "iconverter", ")", "]" ]
Flux equivalencies between PHOTLAM and VEGAMAG. Parameters ---------- wav : `~astropy.units.quantity.Quantity` Quantity associated with values being converted (e.g., wavelength or frequency). vegaflux : `~astropy.units.quantity.Quantity` Flux of Vega at ``wav``. Returns ------- eqv : list List of equivalencies.
[ "Flux", "equivalencies", "between", "PHOTLAM", "and", "VEGAMAG", "." ]
train
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/units.py#L64-L99
spacetelescope/synphot_refactor
synphot/units.py
spectral_density_count
def spectral_density_count(wav, area): """Flux equivalencies between PHOTLAM and count/OBMAG. Parameters ---------- wav : `~astropy.units.quantity.Quantity` Quantity associated with values being converted (e.g., wavelength or frequency). area : `~astropy.units.quantity.Quantity` Telescope collecting area. Returns ------- eqv : list List of equivalencies. """ from .binning import calculate_bin_widths, calculate_bin_edges wav = wav.to(u.AA, equivalencies=u.spectral()) area = area.to(AREA) bin_widths = calculate_bin_widths(calculate_bin_edges(wav)) factor = bin_widths.value * area.value def converter_count(x): return x * factor def iconverter_count(x): return x / factor def converter_obmag(x): return -2.5 * np.log10(x * factor) def iconverter_obmag(x): return 10**(-0.4 * x) / factor return [(PHOTLAM, u.count, converter_count, iconverter_count), (PHOTLAM, OBMAG, converter_obmag, iconverter_obmag)]
python
def spectral_density_count(wav, area): """Flux equivalencies between PHOTLAM and count/OBMAG. Parameters ---------- wav : `~astropy.units.quantity.Quantity` Quantity associated with values being converted (e.g., wavelength or frequency). area : `~astropy.units.quantity.Quantity` Telescope collecting area. Returns ------- eqv : list List of equivalencies. """ from .binning import calculate_bin_widths, calculate_bin_edges wav = wav.to(u.AA, equivalencies=u.spectral()) area = area.to(AREA) bin_widths = calculate_bin_widths(calculate_bin_edges(wav)) factor = bin_widths.value * area.value def converter_count(x): return x * factor def iconverter_count(x): return x / factor def converter_obmag(x): return -2.5 * np.log10(x * factor) def iconverter_obmag(x): return 10**(-0.4 * x) / factor return [(PHOTLAM, u.count, converter_count, iconverter_count), (PHOTLAM, OBMAG, converter_obmag, iconverter_obmag)]
[ "def", "spectral_density_count", "(", "wav", ",", "area", ")", ":", "from", ".", "binning", "import", "calculate_bin_widths", ",", "calculate_bin_edges", "wav", "=", "wav", ".", "to", "(", "u", ".", "AA", ",", "equivalencies", "=", "u", ".", "spectral", "(", ")", ")", "area", "=", "area", ".", "to", "(", "AREA", ")", "bin_widths", "=", "calculate_bin_widths", "(", "calculate_bin_edges", "(", "wav", ")", ")", "factor", "=", "bin_widths", ".", "value", "*", "area", ".", "value", "def", "converter_count", "(", "x", ")", ":", "return", "x", "*", "factor", "def", "iconverter_count", "(", "x", ")", ":", "return", "x", "/", "factor", "def", "converter_obmag", "(", "x", ")", ":", "return", "-", "2.5", "*", "np", ".", "log10", "(", "x", "*", "factor", ")", "def", "iconverter_obmag", "(", "x", ")", ":", "return", "10", "**", "(", "-", "0.4", "*", "x", ")", "/", "factor", "return", "[", "(", "PHOTLAM", ",", "u", ".", "count", ",", "converter_count", ",", "iconverter_count", ")", ",", "(", "PHOTLAM", ",", "OBMAG", ",", "converter_obmag", ",", "iconverter_obmag", ")", "]" ]
Flux equivalencies between PHOTLAM and count/OBMAG. Parameters ---------- wav : `~astropy.units.quantity.Quantity` Quantity associated with values being converted (e.g., wavelength or frequency). area : `~astropy.units.quantity.Quantity` Telescope collecting area. Returns ------- eqv : list List of equivalencies.
[ "Flux", "equivalencies", "between", "PHOTLAM", "and", "count", "/", "OBMAG", "." ]
train
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/units.py#L102-L140
spacetelescope/synphot_refactor
synphot/units.py
convert_flux
def convert_flux(wavelengths, fluxes, out_flux_unit, **kwargs): """Perform conversion for :ref:`supported flux units <synphot-flux-units>`. Parameters ---------- wavelengths : array-like or `~astropy.units.quantity.Quantity` Wavelength values. If not a Quantity, assumed to be in Angstrom. fluxes : array-like or `~astropy.units.quantity.Quantity` Flux values. If not a Quantity, assumed to be in PHOTLAM. out_flux_unit : str or `~astropy.units.core.Unit` Output flux unit. area : float or `~astropy.units.quantity.Quantity` Area that fluxes cover. If not a Quantity, assumed to be in :math:`cm^{2}`. This value *must* be provided for conversions involving OBMAG and count, otherwise it is not needed. vegaspec : `~synphot.spectrum.SourceSpectrum` Vega spectrum from :func:`~synphot.spectrum.SourceSpectrum.from_vega`. This is *only* used for conversions involving VEGAMAG. Returns ------- out_flux : `~astropy.units.quantity.Quantity` Converted flux values. Raises ------ astropy.units.core.UnitsError Conversion failed. synphot.exceptions.SynphotError Area or Vega spectrum is not given when needed. """ if not isinstance(fluxes, u.Quantity): fluxes = fluxes * PHOTLAM out_flux_unit = validate_unit(out_flux_unit) out_flux_unit_name = out_flux_unit.to_string() in_flux_unit_name = fluxes.unit.to_string() # No conversion necessary if in_flux_unit_name == out_flux_unit_name: return fluxes in_flux_type = fluxes.unit.physical_type out_flux_type = out_flux_unit.physical_type # Wavelengths must Quantity if not isinstance(wavelengths, u.Quantity): wavelengths = wavelengths * u.AA eqv = u.spectral_density(wavelengths) # Use built-in astropy equivalencies try: out_flux = fluxes.to(out_flux_unit, eqv) # Use PHOTLAM as in-between unit except u.UnitConversionError: # Convert input unit to PHOTLAM if fluxes.unit == PHOTLAM: flux_photlam = fluxes elif in_flux_type != 'unknown': flux_photlam = fluxes.to(PHOTLAM, eqv) else: flux_photlam = _convert_flux( wavelengths, fluxes, PHOTLAM, **kwargs) # Convert PHOTLAM to output unit if out_flux_unit == PHOTLAM: out_flux = flux_photlam elif out_flux_type != 'unknown': out_flux = flux_photlam.to(out_flux_unit, eqv) else: out_flux = _convert_flux( wavelengths, flux_photlam, out_flux_unit, **kwargs) return out_flux
python
def convert_flux(wavelengths, fluxes, out_flux_unit, **kwargs): """Perform conversion for :ref:`supported flux units <synphot-flux-units>`. Parameters ---------- wavelengths : array-like or `~astropy.units.quantity.Quantity` Wavelength values. If not a Quantity, assumed to be in Angstrom. fluxes : array-like or `~astropy.units.quantity.Quantity` Flux values. If not a Quantity, assumed to be in PHOTLAM. out_flux_unit : str or `~astropy.units.core.Unit` Output flux unit. area : float or `~astropy.units.quantity.Quantity` Area that fluxes cover. If not a Quantity, assumed to be in :math:`cm^{2}`. This value *must* be provided for conversions involving OBMAG and count, otherwise it is not needed. vegaspec : `~synphot.spectrum.SourceSpectrum` Vega spectrum from :func:`~synphot.spectrum.SourceSpectrum.from_vega`. This is *only* used for conversions involving VEGAMAG. Returns ------- out_flux : `~astropy.units.quantity.Quantity` Converted flux values. Raises ------ astropy.units.core.UnitsError Conversion failed. synphot.exceptions.SynphotError Area or Vega spectrum is not given when needed. """ if not isinstance(fluxes, u.Quantity): fluxes = fluxes * PHOTLAM out_flux_unit = validate_unit(out_flux_unit) out_flux_unit_name = out_flux_unit.to_string() in_flux_unit_name = fluxes.unit.to_string() # No conversion necessary if in_flux_unit_name == out_flux_unit_name: return fluxes in_flux_type = fluxes.unit.physical_type out_flux_type = out_flux_unit.physical_type # Wavelengths must Quantity if not isinstance(wavelengths, u.Quantity): wavelengths = wavelengths * u.AA eqv = u.spectral_density(wavelengths) # Use built-in astropy equivalencies try: out_flux = fluxes.to(out_flux_unit, eqv) # Use PHOTLAM as in-between unit except u.UnitConversionError: # Convert input unit to PHOTLAM if fluxes.unit == PHOTLAM: flux_photlam = fluxes elif in_flux_type != 'unknown': flux_photlam = fluxes.to(PHOTLAM, eqv) else: flux_photlam = _convert_flux( wavelengths, fluxes, PHOTLAM, **kwargs) # Convert PHOTLAM to output unit if out_flux_unit == PHOTLAM: out_flux = flux_photlam elif out_flux_type != 'unknown': out_flux = flux_photlam.to(out_flux_unit, eqv) else: out_flux = _convert_flux( wavelengths, flux_photlam, out_flux_unit, **kwargs) return out_flux
[ "def", "convert_flux", "(", "wavelengths", ",", "fluxes", ",", "out_flux_unit", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "fluxes", ",", "u", ".", "Quantity", ")", ":", "fluxes", "=", "fluxes", "*", "PHOTLAM", "out_flux_unit", "=", "validate_unit", "(", "out_flux_unit", ")", "out_flux_unit_name", "=", "out_flux_unit", ".", "to_string", "(", ")", "in_flux_unit_name", "=", "fluxes", ".", "unit", ".", "to_string", "(", ")", "# No conversion necessary", "if", "in_flux_unit_name", "==", "out_flux_unit_name", ":", "return", "fluxes", "in_flux_type", "=", "fluxes", ".", "unit", ".", "physical_type", "out_flux_type", "=", "out_flux_unit", ".", "physical_type", "# Wavelengths must Quantity", "if", "not", "isinstance", "(", "wavelengths", ",", "u", ".", "Quantity", ")", ":", "wavelengths", "=", "wavelengths", "*", "u", ".", "AA", "eqv", "=", "u", ".", "spectral_density", "(", "wavelengths", ")", "# Use built-in astropy equivalencies", "try", ":", "out_flux", "=", "fluxes", ".", "to", "(", "out_flux_unit", ",", "eqv", ")", "# Use PHOTLAM as in-between unit", "except", "u", ".", "UnitConversionError", ":", "# Convert input unit to PHOTLAM", "if", "fluxes", ".", "unit", "==", "PHOTLAM", ":", "flux_photlam", "=", "fluxes", "elif", "in_flux_type", "!=", "'unknown'", ":", "flux_photlam", "=", "fluxes", ".", "to", "(", "PHOTLAM", ",", "eqv", ")", "else", ":", "flux_photlam", "=", "_convert_flux", "(", "wavelengths", ",", "fluxes", ",", "PHOTLAM", ",", "*", "*", "kwargs", ")", "# Convert PHOTLAM to output unit", "if", "out_flux_unit", "==", "PHOTLAM", ":", "out_flux", "=", "flux_photlam", "elif", "out_flux_type", "!=", "'unknown'", ":", "out_flux", "=", "flux_photlam", ".", "to", "(", "out_flux_unit", ",", "eqv", ")", "else", ":", "out_flux", "=", "_convert_flux", "(", "wavelengths", ",", "flux_photlam", ",", "out_flux_unit", ",", "*", "*", "kwargs", ")", "return", "out_flux" ]
Perform conversion for :ref:`supported flux units <synphot-flux-units>`. Parameters ---------- wavelengths : array-like or `~astropy.units.quantity.Quantity` Wavelength values. If not a Quantity, assumed to be in Angstrom. fluxes : array-like or `~astropy.units.quantity.Quantity` Flux values. If not a Quantity, assumed to be in PHOTLAM. out_flux_unit : str or `~astropy.units.core.Unit` Output flux unit. area : float or `~astropy.units.quantity.Quantity` Area that fluxes cover. If not a Quantity, assumed to be in :math:`cm^{2}`. This value *must* be provided for conversions involving OBMAG and count, otherwise it is not needed. vegaspec : `~synphot.spectrum.SourceSpectrum` Vega spectrum from :func:`~synphot.spectrum.SourceSpectrum.from_vega`. This is *only* used for conversions involving VEGAMAG. Returns ------- out_flux : `~astropy.units.quantity.Quantity` Converted flux values. Raises ------ astropy.units.core.UnitsError Conversion failed. synphot.exceptions.SynphotError Area or Vega spectrum is not given when needed.
[ "Perform", "conversion", "for", ":", "ref", ":", "supported", "flux", "units", "<synphot", "-", "flux", "-", "units", ">", "." ]
train
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/units.py#L143-L225
spacetelescope/synphot_refactor
synphot/units.py
_convert_flux
def _convert_flux(wavelengths, fluxes, out_flux_unit, area=None, vegaspec=None): """Flux conversion for PHOTLAM <-> X.""" flux_unit_names = (fluxes.unit.to_string(), out_flux_unit.to_string()) if PHOTLAM.to_string() not in flux_unit_names: raise exceptions.SynphotError( 'PHOTLAM must be one of the conversion units but get ' '{0}.'.format(flux_unit_names)) # VEGAMAG if VEGAMAG.to_string() in flux_unit_names: from .spectrum import SourceSpectrum if not isinstance(vegaspec, SourceSpectrum): raise exceptions.SynphotError('Vega spectrum is missing.') flux_vega = vegaspec(wavelengths) out_flux = fluxes.to( out_flux_unit, equivalencies=spectral_density_vega(wavelengths, flux_vega)) # OBMAG or count elif (u.count in (fluxes.unit, out_flux_unit) or OBMAG.to_string() in flux_unit_names): if area is None: raise exceptions.SynphotError( 'Area is compulsory for conversion involving count or OBMAG.') elif not isinstance(area, u.Quantity): area = area * AREA out_flux = fluxes.to( out_flux_unit, equivalencies=spectral_density_count(wavelengths, area)) else: raise u.UnitsError('{0} and {1} are not convertible'.format( fluxes.unit, out_flux_unit)) return out_flux
python
def _convert_flux(wavelengths, fluxes, out_flux_unit, area=None, vegaspec=None): """Flux conversion for PHOTLAM <-> X.""" flux_unit_names = (fluxes.unit.to_string(), out_flux_unit.to_string()) if PHOTLAM.to_string() not in flux_unit_names: raise exceptions.SynphotError( 'PHOTLAM must be one of the conversion units but get ' '{0}.'.format(flux_unit_names)) # VEGAMAG if VEGAMAG.to_string() in flux_unit_names: from .spectrum import SourceSpectrum if not isinstance(vegaspec, SourceSpectrum): raise exceptions.SynphotError('Vega spectrum is missing.') flux_vega = vegaspec(wavelengths) out_flux = fluxes.to( out_flux_unit, equivalencies=spectral_density_vega(wavelengths, flux_vega)) # OBMAG or count elif (u.count in (fluxes.unit, out_flux_unit) or OBMAG.to_string() in flux_unit_names): if area is None: raise exceptions.SynphotError( 'Area is compulsory for conversion involving count or OBMAG.') elif not isinstance(area, u.Quantity): area = area * AREA out_flux = fluxes.to( out_flux_unit, equivalencies=spectral_density_count(wavelengths, area)) else: raise u.UnitsError('{0} and {1} are not convertible'.format( fluxes.unit, out_flux_unit)) return out_flux
[ "def", "_convert_flux", "(", "wavelengths", ",", "fluxes", ",", "out_flux_unit", ",", "area", "=", "None", ",", "vegaspec", "=", "None", ")", ":", "flux_unit_names", "=", "(", "fluxes", ".", "unit", ".", "to_string", "(", ")", ",", "out_flux_unit", ".", "to_string", "(", ")", ")", "if", "PHOTLAM", ".", "to_string", "(", ")", "not", "in", "flux_unit_names", ":", "raise", "exceptions", ".", "SynphotError", "(", "'PHOTLAM must be one of the conversion units but get '", "'{0}.'", ".", "format", "(", "flux_unit_names", ")", ")", "# VEGAMAG", "if", "VEGAMAG", ".", "to_string", "(", ")", "in", "flux_unit_names", ":", "from", ".", "spectrum", "import", "SourceSpectrum", "if", "not", "isinstance", "(", "vegaspec", ",", "SourceSpectrum", ")", ":", "raise", "exceptions", ".", "SynphotError", "(", "'Vega spectrum is missing.'", ")", "flux_vega", "=", "vegaspec", "(", "wavelengths", ")", "out_flux", "=", "fluxes", ".", "to", "(", "out_flux_unit", ",", "equivalencies", "=", "spectral_density_vega", "(", "wavelengths", ",", "flux_vega", ")", ")", "# OBMAG or count", "elif", "(", "u", ".", "count", "in", "(", "fluxes", ".", "unit", ",", "out_flux_unit", ")", "or", "OBMAG", ".", "to_string", "(", ")", "in", "flux_unit_names", ")", ":", "if", "area", "is", "None", ":", "raise", "exceptions", ".", "SynphotError", "(", "'Area is compulsory for conversion involving count or OBMAG.'", ")", "elif", "not", "isinstance", "(", "area", ",", "u", ".", "Quantity", ")", ":", "area", "=", "area", "*", "AREA", "out_flux", "=", "fluxes", ".", "to", "(", "out_flux_unit", ",", "equivalencies", "=", "spectral_density_count", "(", "wavelengths", ",", "area", ")", ")", "else", ":", "raise", "u", ".", "UnitsError", "(", "'{0} and {1} are not convertible'", ".", "format", "(", "fluxes", ".", "unit", ",", "out_flux_unit", ")", ")", "return", "out_flux" ]
Flux conversion for PHOTLAM <-> X.
[ "Flux", "conversion", "for", "PHOTLAM", "<", "-", ">", "X", "." ]
train
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/units.py#L228-L268
spacetelescope/synphot_refactor
synphot/units.py
validate_unit
def validate_unit(input_unit): """Validate unit. To be compatible with existing SYNPHOT data files: * 'angstroms' and 'inversemicrons' are accepted although unrecognized by astropy units * 'transmission', 'extinction', and 'emissivity' are converted to astropy dimensionless unit Parameters ---------- input_unit : str or `~astropy.units.core.Unit` Unit to validate. Returns ------- output_unit : `~astropy.units.core.Unit` Validated unit. Raises ------ synphot.exceptions.SynphotError Invalid unit. """ if isinstance(input_unit, str): input_unit_lowcase = input_unit.lower() # Backward-compatibility if input_unit_lowcase == 'angstroms': output_unit = u.AA elif input_unit_lowcase == 'inversemicrons': output_unit = u.micron ** -1 elif input_unit_lowcase in ('transmission', 'extinction', 'emissivity'): output_unit = THROUGHPUT elif input_unit_lowcase == 'jy': output_unit = u.Jy # Work around mag unit limitations elif input_unit_lowcase in ('stmag', 'mag(st)'): output_unit = u.STmag elif input_unit_lowcase in ('abmag', 'mag(ab)'): output_unit = u.ABmag else: try: # astropy.units is case-sensitive output_unit = u.Unit(input_unit) except ValueError: # synphot is case-insensitive output_unit = u.Unit(input_unit_lowcase) elif isinstance(input_unit, (u.UnitBase, u.LogUnit)): output_unit = input_unit else: raise exceptions.SynphotError( '{0} must be a recognized string or ' 'astropy.units.core.Unit'.format(input_unit)) return output_unit
python
def validate_unit(input_unit): """Validate unit. To be compatible with existing SYNPHOT data files: * 'angstroms' and 'inversemicrons' are accepted although unrecognized by astropy units * 'transmission', 'extinction', and 'emissivity' are converted to astropy dimensionless unit Parameters ---------- input_unit : str or `~astropy.units.core.Unit` Unit to validate. Returns ------- output_unit : `~astropy.units.core.Unit` Validated unit. Raises ------ synphot.exceptions.SynphotError Invalid unit. """ if isinstance(input_unit, str): input_unit_lowcase = input_unit.lower() # Backward-compatibility if input_unit_lowcase == 'angstroms': output_unit = u.AA elif input_unit_lowcase == 'inversemicrons': output_unit = u.micron ** -1 elif input_unit_lowcase in ('transmission', 'extinction', 'emissivity'): output_unit = THROUGHPUT elif input_unit_lowcase == 'jy': output_unit = u.Jy # Work around mag unit limitations elif input_unit_lowcase in ('stmag', 'mag(st)'): output_unit = u.STmag elif input_unit_lowcase in ('abmag', 'mag(ab)'): output_unit = u.ABmag else: try: # astropy.units is case-sensitive output_unit = u.Unit(input_unit) except ValueError: # synphot is case-insensitive output_unit = u.Unit(input_unit_lowcase) elif isinstance(input_unit, (u.UnitBase, u.LogUnit)): output_unit = input_unit else: raise exceptions.SynphotError( '{0} must be a recognized string or ' 'astropy.units.core.Unit'.format(input_unit)) return output_unit
[ "def", "validate_unit", "(", "input_unit", ")", ":", "if", "isinstance", "(", "input_unit", ",", "str", ")", ":", "input_unit_lowcase", "=", "input_unit", ".", "lower", "(", ")", "# Backward-compatibility", "if", "input_unit_lowcase", "==", "'angstroms'", ":", "output_unit", "=", "u", ".", "AA", "elif", "input_unit_lowcase", "==", "'inversemicrons'", ":", "output_unit", "=", "u", ".", "micron", "**", "-", "1", "elif", "input_unit_lowcase", "in", "(", "'transmission'", ",", "'extinction'", ",", "'emissivity'", ")", ":", "output_unit", "=", "THROUGHPUT", "elif", "input_unit_lowcase", "==", "'jy'", ":", "output_unit", "=", "u", ".", "Jy", "# Work around mag unit limitations", "elif", "input_unit_lowcase", "in", "(", "'stmag'", ",", "'mag(st)'", ")", ":", "output_unit", "=", "u", ".", "STmag", "elif", "input_unit_lowcase", "in", "(", "'abmag'", ",", "'mag(ab)'", ")", ":", "output_unit", "=", "u", ".", "ABmag", "else", ":", "try", ":", "# astropy.units is case-sensitive", "output_unit", "=", "u", ".", "Unit", "(", "input_unit", ")", "except", "ValueError", ":", "# synphot is case-insensitive", "output_unit", "=", "u", ".", "Unit", "(", "input_unit_lowcase", ")", "elif", "isinstance", "(", "input_unit", ",", "(", "u", ".", "UnitBase", ",", "u", ".", "LogUnit", ")", ")", ":", "output_unit", "=", "input_unit", "else", ":", "raise", "exceptions", ".", "SynphotError", "(", "'{0} must be a recognized string or '", "'astropy.units.core.Unit'", ".", "format", "(", "input_unit", ")", ")", "return", "output_unit" ]
Validate unit. To be compatible with existing SYNPHOT data files: * 'angstroms' and 'inversemicrons' are accepted although unrecognized by astropy units * 'transmission', 'extinction', and 'emissivity' are converted to astropy dimensionless unit Parameters ---------- input_unit : str or `~astropy.units.core.Unit` Unit to validate. Returns ------- output_unit : `~astropy.units.core.Unit` Validated unit. Raises ------ synphot.exceptions.SynphotError Invalid unit.
[ "Validate", "unit", "." ]
train
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/units.py#L275-L335
spacetelescope/synphot_refactor
synphot/units.py
validate_wave_unit
def validate_wave_unit(wave_unit): """Like :func:`validate_unit` but specific to wavelength.""" output_unit = validate_unit(wave_unit) unit_type = output_unit.physical_type if unit_type not in ('length', 'wavenumber', 'frequency'): raise exceptions.SynphotError( 'wavelength physical type is not length, wave number, or ' 'frequency: {0}'.format(unit_type)) return output_unit
python
def validate_wave_unit(wave_unit): """Like :func:`validate_unit` but specific to wavelength.""" output_unit = validate_unit(wave_unit) unit_type = output_unit.physical_type if unit_type not in ('length', 'wavenumber', 'frequency'): raise exceptions.SynphotError( 'wavelength physical type is not length, wave number, or ' 'frequency: {0}'.format(unit_type)) return output_unit
[ "def", "validate_wave_unit", "(", "wave_unit", ")", ":", "output_unit", "=", "validate_unit", "(", "wave_unit", ")", "unit_type", "=", "output_unit", ".", "physical_type", "if", "unit_type", "not", "in", "(", "'length'", ",", "'wavenumber'", ",", "'frequency'", ")", ":", "raise", "exceptions", ".", "SynphotError", "(", "'wavelength physical type is not length, wave number, or '", "'frequency: {0}'", ".", "format", "(", "unit_type", ")", ")", "return", "output_unit" ]
Like :func:`validate_unit` but specific to wavelength.
[ "Like", ":", "func", ":", "validate_unit", "but", "specific", "to", "wavelength", "." ]
train
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/units.py#L338-L348
spacetelescope/synphot_refactor
synphot/units.py
validate_quantity
def validate_quantity(input_value, output_unit, equivalencies=[]): """Validate quantity (value and unit). .. note:: For flux conversion, use :func:`convert_flux` instead. Parameters ---------- input_value : number, array-like, or `~astropy.units.quantity.Quantity` Quantity to validate. If not a Quantity, assumed to be already in output unit. output_unit : str or `~astropy.units.core.Unit` Output quantity unit. equivalencies : list of equivalence pairs, optional See `astropy.units`. Returns ------- output_value : `~astropy.units.quantity.Quantity` Validated quantity in given unit. """ output_unit = validate_unit(output_unit) if isinstance(input_value, u.Quantity): output_value = input_value.to(output_unit, equivalencies=equivalencies) else: output_value = input_value * output_unit return output_value
python
def validate_quantity(input_value, output_unit, equivalencies=[]): """Validate quantity (value and unit). .. note:: For flux conversion, use :func:`convert_flux` instead. Parameters ---------- input_value : number, array-like, or `~astropy.units.quantity.Quantity` Quantity to validate. If not a Quantity, assumed to be already in output unit. output_unit : str or `~astropy.units.core.Unit` Output quantity unit. equivalencies : list of equivalence pairs, optional See `astropy.units`. Returns ------- output_value : `~astropy.units.quantity.Quantity` Validated quantity in given unit. """ output_unit = validate_unit(output_unit) if isinstance(input_value, u.Quantity): output_value = input_value.to(output_unit, equivalencies=equivalencies) else: output_value = input_value * output_unit return output_value
[ "def", "validate_quantity", "(", "input_value", ",", "output_unit", ",", "equivalencies", "=", "[", "]", ")", ":", "output_unit", "=", "validate_unit", "(", "output_unit", ")", "if", "isinstance", "(", "input_value", ",", "u", ".", "Quantity", ")", ":", "output_value", "=", "input_value", ".", "to", "(", "output_unit", ",", "equivalencies", "=", "equivalencies", ")", "else", ":", "output_value", "=", "input_value", "*", "output_unit", "return", "output_value" ]
Validate quantity (value and unit). .. note:: For flux conversion, use :func:`convert_flux` instead. Parameters ---------- input_value : number, array-like, or `~astropy.units.quantity.Quantity` Quantity to validate. If not a Quantity, assumed to be already in output unit. output_unit : str or `~astropy.units.core.Unit` Output quantity unit. equivalencies : list of equivalence pairs, optional See `astropy.units`. Returns ------- output_value : `~astropy.units.quantity.Quantity` Validated quantity in given unit.
[ "Validate", "quantity", "(", "value", "and", "unit", ")", "." ]
train
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/units.py#L351-L383
Julius2342/pyvlx
old_api/pyvlx/devices.py
Devices.add
def add(self, device): """Add device.""" if not isinstance(device, Device): raise TypeError() self.__devices.append(device)
python
def add(self, device): """Add device.""" if not isinstance(device, Device): raise TypeError() self.__devices.append(device)
[ "def", "add", "(", "self", ",", "device", ")", ":", "if", "not", "isinstance", "(", "device", ",", "Device", ")", ":", "raise", "TypeError", "(", ")", "self", ".", "__devices", ".", "append", "(", "device", ")" ]
Add device.
[ "Add", "device", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/old_api/pyvlx/devices.py#L36-L40