code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
return [dict(r) for r in self.find_aliases(seq_id=seq_id, current_only=current_only, translate_ncbi_namespace=translate_ncbi_namespace)]
def fetch_aliases(self, seq_id, current_only=True, translate_ncbi_namespace=None)
return list of alias annotation records (dicts) for a given seq_id
3.085487
2.624927
1.175456
clauses = [] params = [] def eq_or_like(s): return "like" if "%" in s else "=" if translate_ncbi_namespace is None: translate_ncbi_namespace = self.translate_ncbi_namespace if alias is not None: clauses += ["alias {} ?".format(eq_or_like(alias))] params += [alias] if namespace is not None: # Switch to using RefSeq for RefSeq accessions # issue #38: translate "RefSeq" to "NCBI" to enable RefSeq lookups # issue #31: later breaking change, translate database if namespace == "RefSeq": namespace = "NCBI" clauses += ["namespace {} ?".format(eq_or_like(namespace))] params += [namespace] if seq_id is not None: clauses += ["seq_id {} ?".format(eq_or_like(seq_id))] params += [seq_id] if current_only: clauses += ["is_current = 1"] cols = ["seqalias_id", "seq_id", "alias", "added", "is_current"] if translate_ncbi_namespace: cols += ["case namespace when 'NCBI' then 'RefSeq' else namespace end as namespace"] else: cols += ["namespace"] sql = "select {cols} from seqalias".format(cols=", ".join(cols)) if clauses: sql += " where " + " and ".join("(" + c + ")" for c in clauses) sql += " order by seq_id, namespace, alias" _logger.debug("Executing: " + sql) return self._db.execute(sql, params)
def find_aliases(self, seq_id=None, namespace=None, alias=None, current_only=True, translate_ncbi_namespace=None)
returns iterator over alias annotation records that match criteria The arguments, all optional, restrict the records that are returned. Without arguments, all aliases are returned. If arguments contain %, the `like` comparison operator is used. Otherwise arguments must match exactly.
2.864395
2.861996
1.000838
if not self._writeable: raise RuntimeError("Cannot write -- opened read-only") log_pfx = "store({q},{n},{a})".format(n=namespace, a=alias, q=seq_id) try: c = self._db.execute("insert into seqalias (seq_id, namespace, alias) values (?, ?, ?)", (seq_id, namespace, alias)) # success => new record return c.lastrowid except sqlite3.IntegrityError: pass # IntegrityError fall-through # existing record is guaranteed to exist uniquely; fetchone() should always succeed current_rec = self.find_aliases(namespace=namespace, alias=alias).fetchone() # if seq_id matches current record, it's a duplicate (seq_id, namespace, alias) tuple # and we return current record if current_rec["seq_id"] == seq_id: _logger.debug(log_pfx + ": duplicate record") return current_rec["seqalias_id"] # otherwise, we're reassigning; deprecate old record, then retry _logger.debug(log_pfx + ": collision; deprecating {s1}".format(s1=current_rec["seq_id"])) self._db.execute("update seqalias set is_current = 0 where seqalias_id = ?", [current_rec["seqalias_id"]]) return self.store_alias(seq_id, namespace, alias)
def store_alias(self, seq_id, namespace, alias)
associate a namespaced alias with a sequence Alias association with sequences is idempotent: duplicate associations are discarded silently.
4.416242
4.455681
0.991149
migration_path = "_data/migrations" sqlite3.connect(self._db_path).close() # ensure that it exists db_url = "sqlite:///" + self._db_path backend = yoyo.get_backend(db_url) migration_dir = pkg_resources.resource_filename(__package__, migration_path) migrations = yoyo.read_migrations(migration_dir) assert len(migrations) > 0, "no migration scripts found -- wrong migraion path for " + __package__ migrations_to_apply = backend.to_apply(migrations) backend.apply_migrations(migrations_to_apply)
def _upgrade_db(self)
upgrade db using scripts for specified (current) schema version
4.52484
4.367169
1.036104
assert os.sep == "/", "tested only on slash-delimited paths" split_re = re.compile(os.sep + "+") if len(paths) == 0: raise ValueError("commonpath() arg is an empty sequence") spaths = [p.rstrip(os.sep) for p in paths] splitpaths = [split_re.split(p) for p in spaths] if all(p.startswith(os.sep) for p in paths): abs_paths = True splitpaths = [p[1:] for p in splitpaths] elif all(not p.startswith(os.sep) for p in paths): abs_paths = False else: raise ValueError("Can't mix absolute and relative paths") splitpaths0 = splitpaths[0] splitpaths1n = splitpaths[1:] min_length = min(len(p) for p in splitpaths) equal = [i for i in range(min_length) if all(splitpaths0[i] == sp[i] for sp in splitpaths1n)] max_equal = max(equal or [-1]) commonelems = splitpaths0[:max_equal + 1] commonpath = os.sep.join(commonelems) return (os.sep if abs_paths else '') + commonpath
def commonpath(paths)
py2 compatible version of py3's os.path.commonpath >>> commonpath([""]) '' >>> commonpath(["/"]) '/' >>> commonpath(["/a"]) '/a' >>> commonpath(["/a//"]) '/a' >>> commonpath(["/a", "/a"]) '/a' >>> commonpath(["/a/b", "/a"]) '/a' >>> commonpath(["/a/b", "/a/b"]) '/a/b' >>> commonpath(["/a/b/c", "/a/b/d"]) '/a/b' >>> commonpath(["/a/b/c", "/a/b/d", "//a//b//e//"]) '/a/b' >>> commonpath([]) Traceback (most recent call last): ... ValueError: commonpath() arg is an empty sequence >>> commonpath(["/absolute/path", "relative/path"]) Traceback (most recent call last): ... ValueError: (Can't mix absolute and relative paths")
2.969339
2.84158
1.044961
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name) sr = SeqRepo(seqrepo_dir, writeable=True) assemblies = bioutils.assemblies.get_assemblies() if opts.reload_all: assemblies_to_load = sorted(assemblies) else: namespaces = [r["namespace"] for r in sr.aliases._db.execute("select distinct namespace from seqalias")] assemblies_to_load = sorted(k for k in assemblies if k not in namespaces) _logger.info("{} assemblies to load".format(len(assemblies_to_load))) ncbi_alias_map = {r["alias"]: r["seq_id"] for r in sr.aliases.find_aliases(namespace="NCBI", current_only=False)} for assy_name in tqdm.tqdm(assemblies_to_load, unit="assembly"): _logger.debug("loading " + assy_name) sequences = assemblies[assy_name]["sequences"] eq_sequences = [s for s in sequences if s["relationship"] in ("=", "<>")] if not eq_sequences: _logger.info("No '=' sequences to load for {an}; skipping".format(an=assy_name)) continue # all assembled-molecules (1..22, X, Y, MT) have ncbi aliases in seqrepo not_in_seqrepo = [s["refseq_ac"] for s in eq_sequences if s["refseq_ac"] not in ncbi_alias_map] if not_in_seqrepo: _logger.warning("Assembly {an} references {n} accessions not in SeqRepo instance {opts.instance_name} (e.g., {acs})".format( an=assy_name, n=len(not_in_seqrepo), opts=opts, acs=", ".join(not_in_seqrepo[:5]+["..."]), seqrepo_dir=seqrepo_dir)) if not opts.partial_load: _logger.warning("Skipping {an} (-p to enable partial loading)".format(an=assy_name)) continue eq_sequences = [es for es in eq_sequences if es["refseq_ac"] in ncbi_alias_map] _logger.info("Loading {n} new accessions for assembly {an}".format(an=assy_name, n=len(eq_sequences))) for s in eq_sequences: seq_id = ncbi_alias_map[s["refseq_ac"]] aliases = [{"namespace": assy_name, "alias": a} for a in [s["name"]] + s["aliases"]] if "genbank_ac" in s and s["genbank_ac"]: aliases += [{"namespace": "genbank", "alias": s["genbank_ac"]}] for alias in aliases: sr.aliases.store_alias(seq_id=seq_id, **alias) _logger.debug("Added assembly alias {a[namespace]}:{a[alias]} for {seq_id}".format(a=alias, seq_id=seq_id)) sr.commit()
def add_assembly_names(opts)
add assembly names as aliases to existing sequences Specifically, associate aliases like GRCh37.p9:1 with existing refseq accessions ``` [{'aliases': ['chr19'], 'assembly_unit': 'Primary Assembly', 'genbank_ac': 'CM000681.2', 'length': 58617616, 'name': '19', 'refseq_ac': 'NC_000019.10', 'relationship': '=', 'sequence_role': 'assembled-molecule'}] ``` For the above sample record, this function adds the following aliases: * genbank:CM000681.2 * GRCh38:19 * GRCh38:chr19 to the sequence referred to by refseq:NC_000019.10.
3.261622
3.113564
1.047553
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name) dst_dir = opts.destination_name if not dst_dir.startswith("/"): # interpret dst_dir as relative to parent dir of seqrepo_dir dst_dir = os.path.join(opts.root_directory, dst_dir) src_dir = os.path.realpath(seqrepo_dir) dst_dir = os.path.realpath(dst_dir) if commonpath([src_dir, dst_dir]).startswith(src_dir): raise RuntimeError("Cannot nest seqrepo directories " "({} is within {})".format(dst_dir, src_dir)) if os.path.exists(dst_dir): raise IOError(dst_dir + ": File exists") tmp_dir = tempfile.mkdtemp(prefix=dst_dir + ".") _logger.debug("src_dir = " + src_dir) _logger.debug("dst_dir = " + dst_dir) _logger.debug("tmp_dir = " + tmp_dir) # TODO: cleanup of tmpdir on failure makedirs(tmp_dir, exist_ok=True) wd = os.getcwd() os.chdir(src_dir) # make destination directories (walk is top-down) for rp in (os.path.join(dirpath, dirname) for dirpath, dirnames, _ in os.walk(".") for dirname in dirnames): dp = os.path.join(tmp_dir, rp) os.mkdir(dp) # hard link sequence files for rp in (os.path.join(dirpath, filename) for dirpath, _, filenames in os.walk(".") for filename in filenames if ".bgz" in filename): dp = os.path.join(tmp_dir, rp) os.link(rp, dp) # copy sqlite databases for rp in ["aliases.sqlite3", "sequences/db.sqlite3"]: dp = os.path.join(tmp_dir, rp) shutil.copyfile(rp, dp) # recursively drop write perms on snapshot mode_aw = stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH def _drop_write(p): mode = os.lstat(p).st_mode new_mode = mode & ~mode_aw os.chmod(p, new_mode) for dp in (os.path.join(dirpath, dirent) for dirpath, dirnames, filenames in os.walk(tmp_dir) for dirent in dirnames + filenames): _drop_write(dp) _drop_write(tmp_dir) os.rename(tmp_dir, dst_dir) _logger.info("snapshot created in " + dst_dir) os.chdir(wd)
def snapshot(opts)
snapshot a seqrepo data directory by hardlinking sequence files, copying sqlite databases, and remove write permissions from directories
2.766899
2.616544
1.057463
# internally, we register an Event object for each entry in this function. # when self.notify_of_job_update() is called, we call Event.set() on all events # registered for that job, thereby releasing any threads waiting for that specific job. event = JOB_EVENT_MAPPING[job_id] event.clear() result = event.wait(timeout=timeout) job = self.get_job(job_id) if result: return job else: raise TimeoutError("Job {} has not received any updates.".format(job_id))
def wait_for_job_update(self, job_id, timeout=None)
Blocks until a job given by job_id has updated its state (canceled, completed, progress updated, etc.) if timeout is not None, then this function raises iceqube.exceptions.TimeoutError. :param job_id: the job's job_id to monitor for changes. :param timeout: if None, wait forever for a job update. If given, wait until timeout seconds, and then raise iceqube.exceptions.TimeoutError. :return: the Job object corresponding to job_id.
6.401615
6.212467
1.030447
assert isinstance(funcstring, str) modulestring, funcname = funcstring.rsplit('.', 1) mod = importlib.import_module(modulestring) func = getattr(mod, funcname) return func
def import_stringified_func(funcstring)
Import a string that represents a module and function, e.g. {module}.{funcname}. Given a function f, import_stringified_func(stringify_func(f)) will return the same function. :param funcstring: String to try to import :return: callable
2.368767
3.141675
0.753982
if self.trigger_event.wait(timeout): try: self.func() except Exception as e: self.logger.warning("Got an exception running {func}: {e}".format(func=self.func, e=str(e))) finally: self.trigger_event.clear()
def main_loop(self, timeout=None)
Check if self.trigger_event is set. If it is, then run our function. If not, return early. :param timeout: How long to wait for a trigger event. Defaults to 0. :return:
3.291155
2.685474
1.22554
def _pragmas_on_connect(dbapi_con, con_record): dbapi_con.execute("PRAGMA journal_mode = WAL;") event.listen(self.engine, "connect", _pragmas_on_connect)
def set_sqlite_pragmas(self)
Sets the connection PRAGMAs for the sqlalchemy engine stored in self.engine. It currently sets: - journal_mode to WAL :return: None
3.322064
2.707963
1.226776
job_id = uuid.uuid4().hex j.job_id = job_id session = self.sessionmaker() orm_job = ORMJob( id=job_id, state=j.state, app=self.app, namespace=self.namespace, obj=j) session.add(orm_job) try: session.commit() except Exception as e: logging.error( "Got an error running session.commit(): {}".format(e)) return job_id
def schedule_job(self, j)
Add the job given by j to the job queue. Note: Does not actually run the job.
3.41352
3.561809
0.958367
job, _ = self._update_job_state(job_id, State.CANCELING) return job
def mark_job_as_canceling(self, job_id)
Mark the job as requested for canceling. Does not actually try to cancel a running job. :param job_id: the job to be marked as canceling. :return: the job object
6.253464
7.872286
0.794364
s = self.sessionmaker() q = self._ns_query(s) if job_id: q = q.filter_by(id=job_id) # filter only by the finished jobs, if we are not specified to force if not force: q = q.filter( or_(ORMJob.state == State.COMPLETED, ORMJob.state == State.FAILED)) q.delete(synchronize_session=False) s.commit() s.close()
def clear(self, job_id=None, force=False)
Clear the queue and the job data. If job_id is not given, clear out all jobs marked COMPLETED. If job_id is given, clear out the given job's data. This function won't do anything if the job's state is not COMPLETED or FAILED. :type job_id: NoneType or str :param job_id: the job_id to clear. If None, clear all jobs. :type force: bool :param force: If True, clear the job (or jobs), even if it hasn't completed or failed.
3.941465
4.164499
0.946444
session = self.sessionmaker() job, orm_job = self._update_job_state( job_id, state=State.RUNNING, session=session) # Note (aron): looks like SQLAlchemy doesn't automatically # save any pickletype fields even if we re-set (orm_job.obj = job) that # field. My hunch is that it's tracking the id of the object, # and if that doesn't change, then SQLAlchemy doesn't repickle the object # and save to the DB. # Our hack here is to just copy the job object, and then set thespecific # field we want to edit, in this case the job.state. That forces # SQLAlchemy to re-pickle the object, thus setting it to the correct state. job = copy(job) job.progress = progress job.total_progress = total_progress orm_job.obj = job session.add(orm_job) session.commit() session.close() return job_id
def update_job_progress(self, job_id, progress, total_progress)
Update the job given by job_id's progress info. :type total_progress: int :type progress: int :type job_id: str :param job_id: The id of the job to update :param progress: The current progress achieved by the job :param total_progress: The total progress achievable by the job. :return: the job_id
8.272176
8.507583
0.97233
session = self.sessionmaker() job, orm_job = self._update_job_state( job_id, State.FAILED, session=session) # Note (aron): looks like SQLAlchemy doesn't automatically # save any pickletype fields even if we re-set (orm_job.obj = job) that # field. My hunch is that it's tracking the id of the object, # and if that doesn't change, then SQLAlchemy doesn't repickle the object # and save to the DB. # Our hack here is to just copy the job object, and then set thespecific # field we want to edit, in this case the job.state. That forces # SQLAlchemy to re-pickle the object, thus setting it to the correct state. job = copy(job) job.exception = exception job.traceback = traceback orm_job.obj = job session.add(orm_job) session.commit() session.close()
def mark_job_as_failed(self, job_id, exception, traceback)
Mark the job as failed, and record the traceback and exception. Args: job_id: The job_id of the job that failed. exception: The exception object thrown by the job. traceback: The traceback, if any. Note (aron): Not implemented yet. We need to find a way for the conncurrent.futures workers to throw back the error to us. Returns: None
8.756929
8.702692
1.006232
return session.query(ORMJob).filter(ORMJob.app == self.app, ORMJob.namespace == self.namespace)
def _ns_query(self, session)
Return a SQLAlchemy query that is already namespaced by the app and namespace given to this backend during initialization. Returns: a SQLAlchemy query object
7.237597
6.693675
1.081259
t = InfiniteLoopThread(self.process_messages, thread_name="MESSAGEPROCESSOR", wait_between_runs=0.5) t.start() return t
def start_message_processing(self)
Starts up the message processor thread, that continuously reads messages sent to self.incoming_message_mailbox, and starts or cancels jobs based on the message received. Returns: the Thread object.
7.842324
8.180207
0.958695
try: msg = self.msgbackend.pop(self.incoming_message_mailbox) self.handle_incoming_message(msg) except queue.Empty: logger.debug("Worker message queue currently empty.")
def process_messages(self)
Read from the incoming_message_mailbox and report to the storage backend based on the first message found there. Returns: None
9.100702
6.439177
1.413333
if msg.type == MessageType.START_JOB: job = msg.message['job'] self.schedule_job(job) elif msg.type == MessageType.CANCEL_JOB: job_id = msg.message['job_id'] self.cancel(job_id)
def handle_incoming_message(self, msg)
Start or cancel a job, based on the msg. If msg.type == MessageType.START_JOB, then start the job given by msg.job. If msg.type == MessageType.CANCEL_JOB, then try to cancel the job given by msg.job.job_id. Args: msg (barbequeue.messaging.classes.Message): Returns: None
2.874353
2.367314
1.214183
def wrap(*args, **kwargs): try: return f(*args, **kwargs) except Exception as e: traceback_str = traceback.format_exc() e.traceback = traceback_str raise e return wrap
def _reraise_with_traceback(f)
Call the function normally. But if the function raises an error, attach the str(traceback) into the function.traceback attribute, then reraise the error. Args: f: The function to run. Returns: A function that wraps f, attaching the traceback if an error occurred.
2.588145
2.210565
1.170807
l = _reraise_with_traceback(job.get_lambda_to_execute()) future = self.workers.submit(l, update_progress_func=self.update_progress, cancel_job_func=self._check_for_cancel) # assign the futures to a dict, mapping them to a job self.job_future_mapping[future] = job self.future_job_mapping[job.job_id] = future # callback for when the future is now! future.add_done_callback(self.handle_finished_future) # add the job to our cancel notifications data structure, with False at first self.cancel_notifications[job.job_id] = False return future
def schedule_job(self, job)
schedule a job to the type of workers spawned by self.start_workers. :param job: the job to schedule for running. :return:
6.712333
7.046341
0.952598
future = self.future_job_mapping[job_id] is_future_cancelled = future.cancel() if is_future_cancelled: # success! return True else: if future.running(): # Already running, but let's mark the future as cancelled # anyway, to make sure that calling future.result() will raise an error. # Our cancelling callback will then check this variable to see its state, # and exit if it's cancelled. from concurrent.futures._base import CANCELLED future._state = CANCELLED return False else: # probably finished already, too late to cancel! return False
def cancel(self, job_id)
Request a cancellation from the futures executor pool. If that didn't work (because it's already running), then mark a special variable inside the future that we can check inside a special check_for_cancel function passed to the job. :param job_id: :return:
6.485798
6.115805
1.060498
future = self.future_job_mapping[job_id] is_cancelled = future._state in [CANCELLED, CANCELLED_AND_NOTIFIED] if is_cancelled: raise UserCancelledError(last_stage=current_stage)
def _check_for_cancel(self, job_id, current_stage="")
Check if a job has been requested to be cancelled. When called, the calling function can optionally give the stage it is currently in, so the user has information on where the job was before it was cancelled. :param job_id: The job_id to check :param current_stage: Where the job currently is :return: raises a CancelledError if we find out that we were cancelled.
4.983521
4.709839
1.058109
t = InfiniteLoopThread( func=self.schedule_next_job, thread_name="SCHEDULER", wait_between_runs=0.5) t.start() return t
def start_scheduler(self)
Start the scheduler thread. This thread reads the queue of jobs to be scheduled and sends them to the workers. Returns: None
7.019349
7.421705
0.945786
t = InfiniteLoopThread( func=lambda: self.handle_worker_messages(timeout=2), thread_name="WORKERMESSAGEHANDLER", wait_between_runs=0.5) t.start() return t
def start_worker_message_handler(self)
Start the worker message handler thread, that loops over messages from workers (job progress updates, failures and successes etc.) and then updates the job's status. Returns: None
6.965869
7.63046
0.912903
self.scheduler_thread.stop() self.worker_message_handler_thread.stop() if wait: self.scheduler_thread.join() self.worker_message_handler_thread.join()
def shutdown(self, wait=True)
Shut down the worker message handler and scheduler threads. Args: wait: If true, block until both threads have successfully shut down. If False, return immediately. Returns: None
3.557446
2.590165
1.373444
msg = CancelMessage(job_id) self.messaging_backend.send(self.worker_mailbox, msg) self.storage_backend.mark_job_as_canceling(job_id)
def request_job_cancel(self, job_id)
Send a message to the workers to cancel the job with job_id. We then mark the job in the storage as being canceled. :param job_id: the job to cancel :return: None
6.962219
5.229671
1.331292
next_job = self.storage_backend.get_next_scheduled_job() # TODO: don't loop over if workers are already all running if not next_job: logging.debug("No job to schedule right now.") return try: self.messaging_backend.send(self.worker_mailbox, Message( type=MessageType.START_JOB, message={'job': next_job})) self.storage_backend.mark_job_as_queued(next_job.job_id) except Full: logging.debug( "Worker queue full; skipping scheduling of job {} for now.".format(next_job.job_id) ) return
def schedule_next_job(self)
Get the next job in the queue to be scheduled, and send a message to the workers to start the job. Returns: None
4.806532
4.638061
1.036324
msgs = self.messaging_backend.popn(self.incoming_mailbox, n=20) for msg in msgs: self.handle_single_message(msg)
def handle_worker_messages(self, timeout)
Read messages that are placed in self.incoming_mailbox, and then update the job states corresponding to each message. Args: timeout: How long to wait for an incoming message, if the mailbox is empty right now. Returns: None
10.358263
8.08965
1.280434
job_id = msg.message['job_id'] actual_msg = msg.message if msg.type == MessageType.JOB_UPDATED: progress = actual_msg['progress'] total_progress = actual_msg['total_progress'] self.storage_backend.update_job_progress(job_id, progress, total_progress) elif msg.type == MessageType.JOB_COMPLETED: self.storage_backend.complete_job(job_id) elif msg.type == MessageType.JOB_FAILED: exc = actual_msg['exception'] trace = actual_msg['traceback'] self.storage_backend.mark_job_as_failed(job_id, exc, trace) elif msg.type == MessageType.JOB_CANCELED: self.storage_backend.mark_job_as_canceled(job_id) else: self.logger.error("Unknown message type: {}".format(msg.type))
def handle_single_message(self, msg)
Handle one message and modify the job storage appropriately. :param msg: the message to handle :return: None
2.078203
2.063243
1.007251
def y(update_progress_func, cancel_job_func): func = import_stringified_func(self.func) extrafunckwargs = {} args, kwargs = copy.copy(self.args), copy.copy(self.kwargs) if self.track_progress: extrafunckwargs["update_progress"] = partial(update_progress_func, self.job_id) if self.cancellable: extrafunckwargs["check_for_cancel"] = partial(cancel_job_func, self.job_id) kwargs.update(extrafunckwargs) return func(*args, **kwargs) return y
def get_lambda_to_execute(self)
return a function that executes the function assigned to this job. If job.track_progress is None (the default), the returned function accepts no argument and simply needs to be called. If job.track_progress is True, an update_progress function is passed in that can be used by the function to provide feedback progress back to the job scheduling system. :return: a function that executes the original function assigned to this job.
4.567539
4.106936
1.112152
if self.total_progress != 0: return float(self.progress) / self.total_progress else: return self.progress
def percentage_progress(self)
Returns a float between 0 and 1, representing the current job's progress in its task. If total_progress is not given or 0, just return self.progress. :return: float corresponding to the total percentage progress of the job.
4.117908
2.550303
1.614674
# if the func is already a job object, just schedule that directly. if isinstance(func, Job): job = func # else, turn it into a job first. else: job = Job(func, *args, **kwargs) job.track_progress = kwargs.pop('track_progress', False) job.cancellable = kwargs.pop('cancellable', False) job.extra_metadata = kwargs.pop('extra_metadata', {}) job_id = self.storage.schedule_job(job) return job_id
def schedule(self, func, *args, **kwargs)
Schedules a function func for execution. One special parameter is track_progress. If passed in and not None, the func will be passed in a keyword parameter called update_progress: def update_progress(progress, total_progress, stage=""): The running function can call the update_progress function to notify interested parties of the function's current progress. Another special parameter is the "cancellable" keyword parameter. When passed in and not None, a special "check_for_cancel" parameter is passed in. When called, it raises an error when the user has requested a job to be cancelled. The caller can also pass in any pickleable object into the "extra_metadata" parameter. This data is stored within the job and can be retrieved when the job status is queried. All other parameters are directly passed to the function when it starts running. :type func: callable or str :param func: A callable object that will be scheduled for running. :return: a string representing the job_id.
3.364807
2.581996
1.303181
return self.storage.wait_for_job_update(job_id, timeout=timeout)
def wait(self, job_id, timeout=None)
Wait until the job given by job_id has a new update. :param job_id: the id of the job to wait for. :param timeout: how long to wait for a job state change before timing out. :return: Job object corresponding to job_id
5.928777
8.288565
0.715296
while 1: job = self.wait(job_id, timeout=timeout) if job.state in [State.COMPLETED, State.FAILED, State.CANCELED]: return job else: continue
def wait_for_completion(self, job_id, timeout=None)
Wait for the job given by job_id to change to COMPLETED or CANCELED. Raises a iceqube.exceptions.TimeoutError if timeout is exceeded before each job change. :param job_id: the id of the job to wait for. :param timeout: how long to wait for a job state change before timing out.
3.200828
3.91872
0.816805
self._storage.clear() self._scheduler.shutdown(wait=False) self._workers.shutdown(wait=False)
def shutdown(self)
Shutdown the client and all of its managed resources: - the workers - the scheduler threads :return: None
5.259016
5.126978
1.025754
client = Telnet(host, int(port)) client.write(b'version\n') res = client.read_until(b'\r\n').strip() version_list = res.split(b' ') if len(version_list) not in [2, 3] or version_list[0] != b'VERSION': raise WrongProtocolData('version', res) version = version_list[1] if StrictVersion(smart_text(version)) >= StrictVersion('1.4.14'): cmd = b'config get cluster\n' else: cmd = b'get AmazonElastiCache:cluster\n' client.write(cmd) regex_index, match_object, res = client.expect([ re.compile(b'\n\r\nEND\r\n'), re.compile(b'ERROR\r\n') ]) client.close() if res == b'ERROR\r\n' and ignore_cluster_errors: return { 'version': version, 'nodes': [ '{0}:{1}'.format(smart_text(host), smart_text(port)) ] } ls = list(filter(None, re.compile(br'\r?\n').split(res))) if len(ls) != 4: raise WrongProtocolData(cmd, res) try: version = int(ls[1]) except ValueError: raise WrongProtocolData(cmd, res) nodes = [] try: for node in ls[2].split(b' '): host, ip, port = node.split(b'|') nodes.append('{0}:{1}'.format(smart_text(ip or host), smart_text(port))) except ValueError: raise WrongProtocolData(cmd, res) return { 'version': version, 'nodes': nodes }
def get_cluster_info(host, port, ignore_cluster_errors=False)
return dict with info about nodes in cluster and current version { 'nodes': [ 'IP:port', 'IP:port', ], 'version': '1.4.4' }
2.99463
2.891914
1.035519
@wraps(f) def wrapper(self, *args, **kwds): try: return f(self, *args, **kwds) except Exception: self.clear_cluster_nodes_cache() raise return wrapper
def invalidate_cache_after_error(f)
catch any exception and invalidate internal cache with list of nodes
2.604221
2.395468
1.087145
if not params.get('BINARY', True): raise Warning('To increase performance please use ElastiCache' ' in binary mode') else: params['BINARY'] = True # patch params, set binary mode if 'OPTIONS' not in params: # set special 'behaviors' pylibmc attributes params['OPTIONS'] = { 'tcp_nodelay': True, 'ketama': True }
def update_params(self, params)
update connection params to maximize performance
10.209401
9.285217
1.099533
if not hasattr(self, '_cluster_nodes_cache'): server, port = self._servers[0].split(':') try: self._cluster_nodes_cache = ( get_cluster_info(server, port, self._ignore_cluster_errors)['nodes']) except (socket.gaierror, socket.timeout) as err: raise Exception('Cannot connect to cluster {0} ({1})'.format( self._servers[0], err )) return self._cluster_nodes_cache
def get_cluster_nodes(self)
return list with all nodes in cluster
3.311768
3.229087
1.025605
return re.sub( r'%(?:\((\w+)\))?([sd])', lambda match: r'__{0}__'.format( match.group(1).lower() if match.group(1) else 'number' if match.group(2) == 'd' else 'item'), msgid)
def humanize_placeholders(msgid)
Convert placeholders to the (google translate) service friendly form. %(name)s -> __name__ %s -> __item__ %d -> __number__
3.669964
3.122884
1.175184
placehoders = re.findall(r'(\s*)(%(?:\(\w+\))?[sd])(\s*)', msgid) return re.sub( r'(\s*)(__[\w]+?__)(\s*)', lambda matches: '{0}{1}{2}'.format(placehoders[0][0], placehoders[0][1], placehoders.pop(0)[2]), translation)
def restore_placeholders(msgid, translation)
Restore placeholders in the translated message.
4.57872
4.379276
1.045543
logger.info('filling up translations for locale `{}`'.format(target_language)) po = polib.pofile(os.path.join(root, file_name)) strings = self.get_strings_to_translate(po) # translate the strings, # all the translated strings are returned # in the same order on the same index # viz. [a, b] -> [trans_a, trans_b] tl = get_translator() translated_strings = tl.translate_strings(strings, target_language, 'en', False) self.update_translations(po, translated_strings) po.save()
def translate_file(self, root, file_name, target_language)
convenience method for translating a pot file :param root: the absolute path of folder where the file is present :param file_name: name of the file to be translated (it should be a pot file) :param target_language: language in which the file needs to be translated
5.302339
5.602962
0.946346
strings = [] for index, entry in enumerate(po): if not self.need_translate(entry): continue strings.append(humanize_placeholders(entry.msgid)) if entry.msgid_plural: strings.append(humanize_placeholders(entry.msgid_plural)) return strings
def get_strings_to_translate(self, po)
Return list of string to translate from po file. :param po: POFile object to translate :type po: polib.POFile :return: list of string to translate :rtype: collections.Iterable[six.text_type]
3.363099
3.754014
0.895867
translations = iter(translated_strings) for entry in entries: if not self.need_translate(entry): continue if entry.msgid_plural: # fill the first plural form with the entry.msgid translation translation = next(translations) translation = fix_translation(entry.msgid, translation) entry.msgstr_plural[0] = translation # fill the rest of plural forms with the entry.msgid_plural translation translation = next(translations) translation = fix_translation(entry.msgid_plural, translation) for k, v in entry.msgstr_plural.items(): if k != 0: entry.msgstr_plural[k] = translation else: translation = next(translations) translation = fix_translation(entry.msgid, translation) entry.msgstr = translation # Set the 'fuzzy' flag on translation if self.set_fuzzy and 'fuzzy' not in entry.flags: entry.flags.append('fuzzy')
def update_translations(self, entries, translated_strings)
Update translations in entries. The order and number of translations should match to get_strings_to_translate() result. :param entries: list of entries to translate :type entries: collections.Iterable[polib.POEntry] | polib.POFile :param translated_strings: list of translations :type translated_strings: collections.Iterable[six.text_type]
2.693383
2.688088
1.00197
def nextline(): while True: line = fileobj.readline() assert line != '' # eof if not line.startswith('comment'): return line.strip() assert nextline() == 'ply' assert nextline() == 'format ascii 1.0' line = nextline() assert line.startswith('element vertex') nverts = int(line.split()[2]) # print 'nverts : ', nverts assert nextline() == 'property float x' assert nextline() == 'property float y' assert nextline() == 'property float z' line = nextline() assert line.startswith('element face') nfaces = int(line.split()[2]) # print 'nfaces : ', nfaces assert nextline() == 'property list uchar int vertex_indices' line = nextline() has_texcoords = line == 'property list uchar float texcoord' if has_texcoords: assert nextline() == 'end_header' else: assert line == 'end_header' # Verts verts = np.zeros((nverts, 3)) for i in range(nverts): vals = nextline().split() verts[i, :] = [float(v) for v in vals[:3]] # Faces faces = [] faces_uv = [] for i in range(nfaces): vals = nextline().split() assert int(vals[0]) == 3 faces.append([int(v) for v in vals[1:4]]) if has_texcoords: assert len(vals) == 11 assert int(vals[4]) == 6 faces_uv.append([(float(vals[5]), float(vals[6])), (float(vals[7]), float(vals[8])), (float(vals[9]), float(vals[10]))]) # faces_uv.append([float(v) for v in vals[5:]]) else: assert len(vals) == 4 return verts, faces, faces_uv
def load_ply(fileobj)
Same as load_ply, but takes a file-like object
1.839198
1.845165
0.996766
with open(path, "r") as fh_: lines = fh_.read().splitlines() return SshConfig(lines)
def read_ssh_config(path)
Read ssh config file and return parsed SshConfig
4.602622
3.948656
1.165617
if key in KNOWN_PARAMS: return key if key.lower() in known_params: return KNOWN_PARAMS[known_params.index(key.lower())] return key
def _remap_key(key)
Change key into correct casing if we know the parameter
3.553367
2.989102
1.188774
cur_entry = None for line in lines: kv_ = _key_value(line) if len(kv_) > 1: key, value = kv_ if key.lower() == "host": cur_entry = value self.hosts_.add(value) self.lines_.append(ConfigLine(line=line, host=cur_entry, key=key, value=value)) else: self.lines_.append(ConfigLine(line=line))
def parse(self, lines)
Parse lines from ssh config file
3.882112
3.600524
1.078207
if host in self.hosts_: vals = defaultdict(list) for k, value in [(x.key.lower(), x.value) for x in self.lines_ if x.host == host and x.key.lower() != "host"]: vals[k].append(value) flatten = lambda x: x[0] if len(x) == 1 else x return {k: flatten(v) for k, v in vals.items()} return {}
def host(self, host)
Return the configuration of a specific host as a dictionary. Dictionary always contains lowercase versions of the attribute names. Parameters ---------- host : the host to return values for. Returns ------- dict of key value pairs, excluding "Host", empty map if host is not found.
3.798837
3.788288
1.002785
self.__check_host_args(host, kwargs) def update_line(key, value): return " %s %s" % (key, value) for key, values in kwargs.items(): if type(values) not in [list, tuple]: # pylint: disable=unidiomatic-typecheck values = [values] lower_key = key.lower() update_idx = [idx for idx, x in enumerate(self.lines_) if x.host == host and x.key.lower() == lower_key] extra_remove = [] for idx in update_idx: if values: # values available, update the line value = values.pop() self.lines_[idx].line = update_line(self.lines_[idx].key, value) self.lines_[idx].value = value else: # no more values available, remove the line extra_remove.append(idx) for idx in reversed(sorted(extra_remove)): del self.lines_[idx] if values: mapped_key = _remap_key(key) max_idx = max([idx for idx, line in enumerate(self.lines_) if line.host == host]) for value in values: self.lines_.insert(max_idx + 1, ConfigLine(line=update_line(mapped_key, value), host=host, key=mapped_key, value=value))
def set(self, host, **kwargs)
Set configuration values for an existing host. Overwrites values for existing settings, or adds new settings. Parameters ---------- host : the Host to modify. **kwargs : The new configuration parameters
3.00135
3.02604
0.991841
self.__check_host_args(host, args) remove_idx = [idx for idx, x in enumerate(self.lines_) if x.host == host and x.key.lower() in args] for idx in reversed(sorted(remove_idx)): del self.lines_[idx]
def unset(self, host, *args)
Removes settings for a host. Parameters ---------- host : the host to remove settings from. *args : list of settings to removes.
4.209366
4.247213
0.991089
if host not in self.hosts_: raise ValueError("Host %s: not found" % host) if "host" in [x.lower() for x in keys]: raise ValueError("Cannot modify Host value")
def __check_host_args(self, host, keys)
Checks parameters
6.875309
6.446279
1.066555
if new_host in self.hosts_: raise ValueError("Host %s: already exists." % new_host) for line in self.lines_: # update lines if line.host == old_host: line.host = new_host if line.key.lower() == "host": line.value = new_host line.line = "Host %s" % new_host self.hosts_.remove(old_host) # update host cache self.hosts_.add(new_host)
def rename(self, old_host, new_host)
Renames a host configuration. Parameters ---------- old_host : the host to rename. new_host : the new host value
3.49564
3.739182
0.934868
if host in self.hosts_: raise ValueError("Host %s: exists (use update)." % host) self.hosts_.add(host) self.lines_.append(ConfigLine(line="", host=None)) self.lines_.append(ConfigLine(line="Host %s" % host, host=host, key="Host", value=host)) for k, v in kwargs.items(): if type(v) not in [list, tuple]: v = [v] mapped_k = _remap_key(k) for value in v: self.lines_.append(ConfigLine(line=" %s %s" % (mapped_k, str(value)), host=host, key=mapped_k, value=value)) self.lines_.append(ConfigLine(line="", host=None))
def add(self, host, **kwargs)
Add another host to the SSH configuration. Parameters ---------- host: The Host entry to add. **kwargs: The parameters for the host (without "Host" parameter itself)
2.945265
2.939811
1.001855
if host not in self.hosts_: raise ValueError("Host %s: not found." % host) self.hosts_.remove(host) # remove lines, including comments inside the host lines host_lines = [ idx for idx, x in enumerate(self.lines_) if x.host == host ] remove_range = reversed(range(min(host_lines), max(host_lines) + 1)) for idx in remove_range: del self.lines_[idx]
def remove(self, host)
Removes a host from the SSH configuration. Parameters ---------- host : The host to remove
3.887416
4.089421
0.950603
with open(path, "w") as fh_: fh_.write(self.config())
def write(self, path)
Writes ssh config file Parameters ---------- path : The file to write to
6.951672
6.20013
1.121214
if v[1] != 0 or v[2] != 0: c = (1, 0, 0) else: c = (0, 1, 0) return np.cross(v, c)
def orthogonal_vector(v)
Return an arbitrary vector that is orthogonal to v
2.308715
2.355901
0.979971
b1 = orthogonal_vector(n) b1 /= la.norm(b1) b2 = np.cross(b1, n) b2 /= la.norm(b2) verts = [orig + scale*(-b1 - b2), orig + scale*(b1 - b2), orig + scale*(b1 + b2), orig + scale*(-b1 + b2)] faces = [(0, 1, 2), (0, 2, 3)] trimesh3d(np.array(verts), faces, **kwargs)
def show_plane(orig, n, scale=1.0, **kwargs)
Show the plane with the given origin and normal. scale give its size
2.277121
2.295433
0.992022
dists = [point_to_plane_dist(p, plane_orig, plane_norm) for p in verts[tri]] if np.sign(dists[0]) == np.sign(dists[1]) \ and np.sign(dists[1]) == np.sign(dists[2]): # Triangle is on one side of the plane return [] # Iterate through the edges, cutting the ones that intersect intersect_points = [] for fi in range(3): v1 = verts[tri[fi]] d1 = dists[fi] v2 = verts[tri[(fi + 1) % 3]] d2 = dists[(fi + 1) % 3] if d1 * d2 < 0: # intersection factor (between 0 and 1) # here is a nice drawing : # https://ravehgonen.files.wordpress.com/2013/02/slide8.png s = d1 / (d1 - d2) vdir = v2 - v1 intersect_points.append(v1 + vdir * s) elif np.fabs(d1) < 1e-5: # point on plane intersect_points.append(v1) return intersect_points
def slice_triangle_plane(verts, tri, plane_orig, plane_norm)
Args: verts : the vertices of the mesh tri: the face to cut plane_orig: origin of the plane plane_norm: normal to the plane
3.338927
3.356709
0.994702
dists = [point_to_plane_dist(mesh.verts[vid], plane) for vid in mesh.tris[tid]] side = np.sign(dists) return not (side[0] == side[1] == side[2])
def triangle_intersects_plane(mesh, tid, plane)
Returns true if the given triangle is cut by the plane. This will return false if a single vertex of the triangle lies on the plane
4.133952
5.721641
0.722512
# TODO: Use a distance cache dists = {vid: point_to_plane_dist(mesh.verts[vid], plane) for vid in mesh.tris[tid]} # TODO: Use an edge intersection cache (we currently compute each edge # intersection twice : once for each tri) # This is to avoid registering the same vertex intersection twice # from two different edges vert_intersect = {vid: False for vid in dists.keys()} # Iterate through the edges, cutting the ones that intersect intersections = [] for e in mesh.edges_for_triangle(tid): v1 = mesh.verts[e[0]] d1 = dists[e[0]] v2 = mesh.verts[e[1]] d2 = dists[e[1]] if np.fabs(d1) < dist_tol: # Avoid creating the vertex intersection twice if not vert_intersect[e[0]]: # point on plane intersections.append((INTERSECT_VERTEX, v1, e[0])) vert_intersect[e[0]] = True if np.fabs(d2) < dist_tol: if not vert_intersect[e[1]]: # point on plane intersections.append((INTERSECT_VERTEX, v2, e[1])) vert_intersect[e[1]] = True # If vertices are on opposite sides of the plane, we have an edge # intersection if d1 * d2 < 0: # Due to numerical accuracy, we could have both a vertex intersect # and an edge intersect on the same vertex, which is impossible if not vert_intersect[e[0]] and not vert_intersect[e[1]]: # intersection factor (between 0 and 1) # here is a nice drawing : # https://ravehgonen.files.wordpress.com/2013/02/slide8.png # keep in mind d1, d2 are *signed* distances (=> d1 - d2) s = d1 / (d1 - d2) vdir = v2 - v1 ipos = v1 + vdir * s intersections.append((INTERSECT_EDGE, ipos, e)) return intersections
def compute_triangle_plane_intersections(mesh, tid, plane, dist_tol=1e-8)
Compute the intersection between a triangle and a plane Returns a list of intersections in the form (INTERSECT_EDGE, <intersection point>, <edge>) for edges intersection (INTERSECT_VERTEX, <intersection point>, <vertex index>) for vertices This return between 0 and 2 intersections : - 0 : the plane does not intersect the plane - 1 : one of the triangle's vertices lies on the plane (so it just "touches" the plane without really intersecting) - 2 : the plane slice the triangle in two parts (either vertex-edge, vertex-vertex or edge-edge)
3.951957
3.731366
1.059118
if intersection[0] == INTERSECT_EDGE: tris = mesh.triangles_for_edge(intersection[2]) elif intersection[0] == INTERSECT_VERTEX: tris = mesh.triangles_for_vert(intersection[2]) else: assert False, 'Invalid intersection[0] value : %d' % intersection[0] # Knowing where we come from is not enough. If an edge of the triangle # lies exactly on the plane, i.e. : # # /t1\ # -v1---v2- # \t2/ # # With v1, v2 being the vertices and t1, t2 being the triangles, then # if you just try to go to the next connected triangle that intersect, # you can visit v1 -> t1 -> v2 -> t2 -> v1 . # Therefore, we need to limit the new candidates to the set of unvisited # triangles and once we've visited a triangle and decided on a next one, # remove all the neighbors of the visited triangle so we don't come # back to it T = set(T) for tid in tris: if tid in T: intersections = compute_triangle_plane_intersections( mesh, tid, plane, dist_tol) if len(intersections) == 2: T = T.difference(tris) return tid, intersections, T return None, [], T
def get_next_triangle(mesh, T, plane, intersection, dist_tol)
Returns the next triangle to visit given the intersection and the list of unvisited triangles (T) We look for a triangle that is cut by the plane (2 intersections) as opposed to one that only touch the plane (1 vertex intersection)
5.266724
5.232506
1.006539
T = set(T) p = [] # Loop until we have explored all the triangles for the current # polyline while True: p.append(intersect[1]) tid, intersections, T = get_next_triangle(mesh, T, plane, intersect, dist_tol) if tid is None: break # get_next_triangle returns triangles that our plane actually # intersects (as opposed to touching only a single vertex), # hence the assert assert len(intersections) == 2 # Of the two returned intersections, one should have the # intersection point equal to p[-1] if la.norm(intersections[0][1] - p[-1]) < dist_tol: intersect = intersections[1] else: assert la.norm(intersections[1][1] - p[-1]) < dist_tol, \ '%s not close to %s' % (str(p[-1]), str(intersections)) intersect = intersections[0] return p, T
def _walk_polyline(tid, intersect, T, mesh, plane, dist_tol)
Given an intersection, walk through the mesh triangles, computing intersection with the cut plane for each visited triangle and adding those intersection to a polyline.
4.500336
4.518735
0.995928
# Set of all triangles T = set(range(len(mesh.tris))) # List of all cross-section polylines P = [] while len(T) > 0: tid = T.pop() intersections = compute_triangle_plane_intersections( mesh, tid, plane, dist_tol) if len(intersections) == 2: for intersection in intersections: p, T = _walk_polyline(tid, intersection, T, mesh, plane, dist_tol) if len(p) > 1: P.append(np.array(p)) return P
def cross_section_mesh(mesh, plane, dist_tol=1e-8)
Args: mesh: A geom.TriangleMesh instance plane: The cut plane : geom.Plane instance dist_tol: If two points are closer than dist_tol, they are considered the same
4.272015
4.198713
1.017458
mesh = TriangleMesh(verts, tris) plane = Plane(plane_orig, plane_normal) return cross_section_mesh(mesh, plane, **kwargs)
def cross_section(verts, tris, plane_orig, plane_normal, **kwargs)
Compute the planar cross section of a mesh. This returns a set of polylines. Args: verts: Nx3 array of the vertices position faces: Nx3 array of the faces, containing vertex indices plane_orig: 3-vector indicating the plane origin plane_normal: 3-vector indicating the plane normal Returns: A list of Nx3 arrays, each representing a disconnected portion of the cross section as a polyline
2.611678
4.396834
0.593991
a = np.array(a, dtype=np.float64) a_sumrows = np.einsum('ij,ij->i', a, a) dist = a_sumrows[:, None] + a_sumrows - 2 * np.dot(a, a.T) np.fill_diagonal(dist, 0) return dist
def pdist_squareformed_numpy(a)
Compute spatial distance using pure numpy (similar to scipy.spatial.distance.cdist()) Thanks to Divakar Roy (@droyed) at stackoverflow.com Note this needs at least np.float64 precision! Returns: dist
2.366532
2.642285
0.895639
# Pairwise distance between verts if USE_SCIPY: D = spdist.cdist(verts, verts) else: D = np.sqrt(np.abs(pdist_squareformed_numpy(verts))) # Compute a mapping from old to new : for each input vert, store the index # of the new vert it will be merged into old2new = np.zeros(D.shape[0], dtype=np.int) # A mask indicating if a vertex has already been merged into another merged_verts = np.zeros(D.shape[0], dtype=np.bool) new_verts = [] for i in range(D.shape[0]): if merged_verts[i]: continue else: # The vertices that will be merged into this one merged = np.flatnonzero(D[i, :] < close_epsilon) old2new[merged] = len(new_verts) new_verts.append(verts[i]) merged_verts[merged] = True new_verts = np.array(new_verts) # Recompute face indices to index in new_verts new_faces = np.zeros((len(faces), 3), dtype=np.int) for i, f in enumerate(faces): new_faces[i] = (old2new[f[0]], old2new[f[1]], old2new[f[2]]) # again, plot with utils.trimesh3d(new_verts, new_faces) return new_verts, new_faces
def merge_close_vertices(verts, faces, close_epsilon=1e-5)
Will merge vertices that are closer than close_epsilon. Warning, this has a O(n^2) memory usage because we compute the full vert-to-vert distance matrix. If you have a large mesh, might want to use some kind of spatial search structure like an octree or some fancy hashing scheme Returns: new_verts, new_faces
3.073492
3.022615
1.016832
if int(hex, 16) & 0x8000: return -(int(hex, 16) & 0x7FFF) / 10 else: return int(hex, 16) / 10
def signed_to_float(hex: str) -> float
Convert signed hexadecimal to floating value.
2.170473
1.874779
1.157722
node_id, _, protocol, attrs = packet.split(DELIM, 3) data = cast(Dict[str, Any], { 'node': PacketHeader(node_id).name, }) # make exception for version response data['protocol'] = UNKNOWN if '=' in protocol: attrs = protocol + DELIM + attrs # no attributes but instead the welcome banner elif 'RFLink Gateway' in protocol: data.update(parse_banner(protocol)) elif protocol == 'PONG': data['ping'] = protocol.lower() # debug response elif protocol == 'DEBUG': data['protocol'] = protocol.lower() data['tm'] = packet[3:5] # failure response elif protocol == 'CMD UNKNOWN': data['response'] = 'command_unknown' data['ok'] = False # ok response elif protocol == 'OK': data['ok'] = True # its a regular packet else: data['protocol'] = protocol.lower() # convert key=value pairs where needed for attr in filter(None, attrs.strip(DELIM).split(DELIM)): key, value = attr.lower().split('=') if key in VALUE_TRANSLATION: value = VALUE_TRANSLATION.get(key)(value) name = PACKET_FIELDS.get(key, key) data[name] = value unit = UNITS.get(key, None) if unit: data[name + '_unit'] = unit # correct KaKu device address if data.get('protocol', '') == 'kaku' and len(data['id']) != 6: data['id'] = '0000' + data['id'] return data
def decode_packet(packet: str) -> dict
Break packet down into primitives, and do basic interpretation. >>> decode_packet('20;06;Kaku;ID=41;SWITCH=1;CMD=ON;') == { ... 'node': 'gateway', ... 'protocol': 'kaku', ... 'id': '000041', ... 'switch': '1', ... 'command': 'on', ... } True
5.550523
5.198458
1.067725
if packet['protocol'] == 'rfdebug': return '10;RFDEBUG=' + packet['command'] + ';' elif packet['protocol'] == 'rfudebug': return '10;RFDEBUG=' + packet['command'] + ';' else: return SWITCH_COMMAND_TEMPLATE.format( node=PacketHeader.master.value, **packet )
def encode_packet(packet: dict) -> str
Construct packet string from packet dictionary. >>> encode_packet({ ... 'protocol': 'newkaku', ... 'id': '000001', ... 'switch': '01', ... 'command': 'on', ... }) '10;newkaku;000001;01;on;'
7.85031
6.35911
1.234498
# translate protocol in something reversable protocol = protocol_translations[packet['protocol']] if protocol == UNKNOWN: protocol = 'rflink' return '_'.join(filter(None, [ protocol, packet.get('id', None), packet.get('switch', None), ]))
def serialize_packet_id(packet: dict) -> str
Serialize packet identifiers into one reversable string. >>> serialize_packet_id({ ... 'protocol': 'newkaku', ... 'id': '000001', ... 'switch': '01', ... 'command': 'on', ... }) 'newkaku_000001_01' >>> serialize_packet_id({ ... 'protocol': 'ikea koppla', ... 'id': '000080', ... 'switch': '0', ... 'command': 'on', ... }) 'ikeakoppla_000080_0' >>> # unserializeable protocol name without explicit entry >>> # in translation table should be properly serialized >>> serialize_packet_id({ ... 'protocol': 'alecto v4', ... 'id': '000080', ... 'switch': '0', ... 'command': 'on', ... }) 'alectov4_000080_0'
11.18615
9.789896
1.142622
r if packet_id == 'rflink': return {'protocol': UNKNOWN} protocol, *id_switch = packet_id.split(PACKET_ID_SEP) assert len(id_switch) < 3 packet_identifiers = { # lookup the reverse translation of the protocol in the translation # table, fallback to protocol. If this is a unserializable protocol # name, it has not been serialized before and is not in the # translate_protocols table this will result in an invalid command. 'protocol': protocol_translations.get(protocol, protocol), } if id_switch: packet_identifiers['id'] = id_switch[0] if len(id_switch) > 1: packet_identifiers['switch'] = id_switch[1] return packet_identifiers
def deserialize_packet_id(packet_id: str) -> dict
r"""Turn a packet id into individual packet components. >>> deserialize_packet_id('newkaku_000001_01') == { ... 'protocol': 'newkaku', ... 'id': '000001', ... 'switch': '01', ... } True >>> deserialize_packet_id('ikeakoppla_000080_0') == { ... 'protocol': 'ikea koppla', ... 'id': '000080', ... 'switch': '0', ... } True
6.437586
6.398946
1.006039
field_abbrev = {v: k for k, v in PACKET_FIELDS.items()} packet_id = serialize_packet_id(packet) events = {f: v for f, v in packet.items() if f in field_abbrev} if 'command' in events or 'version' in events: # switch events only have one event in each packet yield dict(id=packet_id, **events) else: if packet_id == 'debug': yield { 'id': 'raw', 'value': packet.get('pulses(usec)'), 'tm': packet.get('tm'), 'pulses': packet.get('pulses'), } else: # sensors can have multiple for sensor, value in events.items(): unit = packet.get(sensor + '_unit', None) yield { 'id': packet_id + PACKET_ID_SEP + field_abbrev[sensor], 'sensor': sensor, 'value': value, 'unit': unit, } if packet_id != 'rflink': yield { 'id': packet_id + PACKET_ID_SEP + 'update_time', 'sensor': 'update_time', 'value': round(time.time()), 'unit': 's', }
def packet_events(packet: dict) -> Generator
Return list of all events in the packet. >>> x = list(packet_events({ ... 'protocol': 'alecto v1', ... 'id': 'ec02', ... 'temperature': 1.0, ... 'temperature_unit': '°C', ... 'humidity': 10, ... 'humidity_unit': '%', ... })) >>> assert { ... 'id': 'alectov1_ec02_temp', ... 'sensor': 'temperature', ... 'value': 1.0, ... 'unit': '°C', ... } in x >>> assert { ... 'id': 'alectov1_ec02_hum', ... 'sensor': 'humidity', ... 'value': 10, ... 'unit': '%', ... } in x >>> y = list(packet_events({ ... 'protocol': 'newkaku', ... 'id': '000001', ... 'switch': '01', ... 'command': 'on', ... })) >>> assert {'id': 'newkaku_000001_01', 'command': 'on'} in y
4.104818
3.981208
1.031048
node_id, protocol, attrs = packet.split(DELIM, 2) data = cast(Dict[str, Any], { 'node': PacketHeader(node_id).name, }) data['protocol'] = protocol.lower() for i, attr in enumerate(filter(None, attrs.strip(DELIM).split(DELIM))): if i == 0: data['id'] = attr if i == 1: data['switch'] = attr if i == 2: data['command'] = attr # correct KaKu device address if data.get('protocol', '') == 'kaku' and len(data['id']) != 6: data['id'] = '0000' + data['id'] return data
def decode_tx_packet(packet: str) -> dict
Break packet down into primitives, and do basic interpretation. >>> decode_packet('10;Kaku;ID=41;SWITCH=1;CMD=ON;') == { ... 'node': 'gateway', ... 'protocol': 'kaku', ... 'id': '000041', ... 'switch': '1', ... 'command': 'on', ... } True
4.270728
3.541547
1.205893
args = docopt(__doc__, argv=argv, version=pkg_resources.require('rflink')[0].version) level = logging.ERROR if args['-v']: level = logging.INFO if args['-v'] == 2: level = logging.DEBUG logging.basicConfig(level=level) if not loop: loop = asyncio.get_event_loop() host = args['--host'] port = args['--port'] baud = args['--baud'] listenport = args['--listenport'] proxy = RFLinkProxy(port=port, host=host, baud=baud, loop=loop) server_coro = asyncio.start_server( proxy.client_connected_callback, host="", port=listenport, loop=loop, ) server = loop.run_until_complete(server_coro) addr = server.sockets[0].getsockname() log.info('Serving on %s', addr) conn_coro = proxy.connect() loop.run_until_complete(conn_coro) proxy.closing = False try: loop.run_forever() except KeyboardInterrupt: proxy.closing = True # cleanup server server.close() loop.run_until_complete(server.wait_closed()) # cleanup server connections writers = [i[1] for i in list(clients)] for writer in writers: writer.close() if sys.version_info >= (3, 7): loop.run_until_complete(writer.wait_closed()) # cleanup RFLink connection proxy.transport.close() finally: loop.close()
def main(argv=sys.argv[1:], loop=None)
Parse argument and setup main program loop.
2.307237
2.297941
1.004045
log.debug('got packet: %s', raw_packet) packet = None try: packet = decode_packet(raw_packet) except: log.exception('failed to parse packet: %s', packet) log.debug('decoded packet: %s', packet) if packet: if 'ok' in packet: # handle response packets internally log.debug('command response: %s', packet) self._last_ack = packet self._command_ack.set() elif self.raw_callback: self.raw_callback(raw_packet) else: log.warning('no valid packet')
def handle_raw_packet(self, raw_packet)
Parse raw packet string into packet dict.
3.628721
3.506063
1.034985
peer = writer.get_extra_info('peername') log.debug(' %s:%s: processing data: %s', peer[0], peer[1], raw_packet) packet = None try: packet = decode_tx_packet(raw_packet) except: log.exception(' %s:%s: failed to parse packet: %s', peer[0], peer[1], packet) log.debug(' %s:%s: decoded packet: %s', peer[0], peer[1], packet) if self.protocol and packet: if not ';PING;' in raw_packet: log.info(' %s:%s: forwarding packet %s to RFLink', peer[0], peer[1], raw_packet) else: log.debug(' %s:%s: forwarding packet %s to RFLink', peer[0], peer[1], raw_packet) yield from self.forward_packet(writer, packet, raw_packet) else: log.warning(' %s:%s: no valid packet %s', peer[0], peer[1], packet)
def handle_raw_tx_packet(self, writer, raw_packet)
Parse raw packet string into packet dict.
2.742393
2.669533
1.027293
peer = writer.get_extra_info('peername') log.debug(' %s:%s: forwarding data: %s', peer[0], peer[1], packet) if 'command' in packet: packet_id = serialize_packet_id(packet) command = packet['command'] ack = yield from self.protocol.send_command_ack( packet_id, command) if ack: writer.write("20;00;OK;".encode() + CRLF) for _ in range(DEFAULT_SIGNAL_REPETITIONS-1): yield from self.protocol.send_command_ack( packet_id, command) else: self.protocol.send_raw_packet(raw_packet)
def forward_packet(self, writer, packet, raw_packet)
Forward packet from client to RFLink.
4.473295
4.296708
1.041098
peer = writer.get_extra_info('peername') clients.append((reader, writer, peer)) log.info("Incoming connection from: %s:%s", peer[0], peer[1]) try: while True: data = yield from reader.readline() if not data: break try: line = data.decode().strip() except UnicodeDecodeError: line = '\x00' # Workaround for domoticz issue #2816 if line[-1] != DELIM: line = line + DELIM if valid_packet(line): yield from self.handle_raw_tx_packet(writer, line) else: log.warning(" %s:%s: dropping invalid data: '%s'", peer[0], peer[1], line) pass except ConnectionResetError: pass except Exception as e: log.exception(e) log.info("Disconnected from: %s:%s", peer[0], peer[1]) writer.close() clients.remove((reader, writer, peer))
def client_connected_callback(self, reader, writer)
Handle connected client.
3.069433
3.051611
1.00584
if not ';PONG;' in raw_packet: log.info('forwarding packet %s to clients', raw_packet) else: log.debug('forwarding packet %s to clients', raw_packet) writers = [i[1] for i in list(clients)] for writer in writers: writer.write(str(raw_packet).encode() + CRLF)
def raw_callback(self, raw_packet)
Send data to all connected clients.
5.345117
4.891253
1.092791
# Reset protocol binding before starting reconnect self.protocol = None if not self.closing: log.warning('disconnected from Rflink, reconnecting') self.loop.create_task(self.connect())
def reconnect(self, exc=None)
Schedule reconnect after connection has been unexpectedly lost.
11.632308
10.896206
1.067556
import serial log.info('Initiating Rflink connection') # Rflink create_rflink_connection decides based on the value of host # (string or None) if serial or tcp mode should be used # Setup protocol protocol = partial( ProxyProtocol, disconnect_callback=self.reconnect, raw_callback=self.raw_callback, loop=self.loop, ) # Initiate serial/tcp connection to Rflink gateway if self.host: connection = self.loop.create_connection(protocol, self.host, self.port) else: connection = create_serial_connection(self.loop, protocol, self.port, self.baud) try: with async_timeout.timeout(CONNECTION_TIMEOUT, loop=self.loop): self.transport, self.protocol = await connection except (serial.serialutil.SerialException, ConnectionRefusedError, TimeoutError, OSError, asyncio.TimeoutError) as exc: reconnect_interval = DEFAULT_RECONNECT_INTERVAL log.error( "Error connecting to Rflink, reconnecting in %s", reconnect_interval) self.loop.call_later(reconnect_interval, self.reconnect, exc) return log.info('Connected to Rflink')
async def connect(self)
Set up connection and hook it into HA for reconnect/shutdown.
4.449492
4.296911
1.035509
# use default protocol if not specified protocol = partial( protocol, loop=loop if loop else asyncio.get_event_loop(), packet_callback=packet_callback, event_callback=event_callback, disconnect_callback=disconnect_callback, ignore=ignore if ignore else [], ) # setup serial connection if no transport specified if host: conn = loop.create_connection(protocol, host, port) else: baud = baud conn = create_serial_connection(loop, protocol, port, baud) return conn
def create_rflink_connection(port=None, host=None, baud=57600, protocol=RflinkProtocol, packet_callback=None, event_callback=None, disconnect_callback=None, ignore=None, loop=None)
Create Rflink manager class, returns transport coroutine.
3.142961
3.00891
1.044551
data = data.decode() log.debug('received data: %s', data.strip()) self.buffer += data self.handle_lines()
def data_received(self, data)
Add incoming data to buffer.
5.038205
4.363447
1.154639
while "\r\n" in self.buffer: line, self.buffer = self.buffer.split("\r\n", 1) if valid_packet(line): self.handle_raw_packet(line) else: log.warning('dropping invalid data: %s', line)
def handle_lines(self)
Assemble incoming data into per-line packets.
3.711021
3.302027
1.123861
data = packet + '\r\n' log.debug('writing data: %s', repr(data)) self.transport.write(data.encode())
def send_raw_packet(self, packet: str)
Encode and put packet string onto write buffer.
5.161537
3.98706
1.294572
global rflink_log if file == None: rflink_log = None else: log.debug('logging to: %s', file) rflink_log = open(file, 'a')
def log_all(self, file)
Log all data received from RFLink to file.
5.139564
3.668824
1.400875
if exc: log.exception('disconnected due to exception') else: log.info('disconnected because of close/abort.') if self.disconnect_callback: self.disconnect_callback(exc)
def connection_lost(self, exc)
Log when connection is closed, if needed call callback.
4.823954
4.053465
1.190081
log.debug('got packet: %s', raw_packet) if rflink_log: print(raw_packet, file=rflink_log) rflink_log.flush() packet = None try: packet = decode_packet(raw_packet) except: log.exception('failed to parse packet: %s', packet) log.debug('decoded packet: %s', packet) if packet: if 'ok' in packet: # handle response packets internally log.debug('command response: %s', packet) self._last_ack = packet self._command_ack.set() else: self.handle_packet(packet) else: log.warning('no valid packet')
def handle_raw_packet(self, raw_packet)
Parse raw packet string into packet dict.
3.597178
3.469061
1.036931
if self.packet_callback: # forward to callback self.packet_callback(packet) else: print('packet', packet)
def handle_packet(self, packet)
Process incoming packet dict and optionally call callback.
5.929236
4.874225
1.216447
command = deserialize_packet_id(device_id) command['command'] = action log.debug('sending command: %s', command) self.send_packet(command)
def send_command(self, device_id, action)
Send device command to rflink gateway.
5.649503
5.410342
1.044205
# serialize commands yield from self._ready_to_send.acquire() acknowledgement = None try: self._command_ack.clear() self.send_command(device_id, action) log.debug('waiting for acknowledgement') try: yield from asyncio.wait_for(self._command_ack.wait(), TIMEOUT.seconds, loop=self.loop) log.debug('packet acknowledged') except concurrent.futures._base.TimeoutError: acknowledgement = {'ok': False, 'message': 'timeout'} log.warning('acknowledge timeout') else: acknowledgement = self._last_ack.get('ok', False) finally: # allow next command self._ready_to_send.release() return acknowledgement
def send_command_ack(self, device_id, action)
Send command, wait for gateway to repond with acknowledgment.
4.042222
3.875953
1.042898
events = packet_events(packet) for event in events: if self.ignore_event(event['id']): log.debug('ignoring event with id: %s', event) continue log.debug('got event: %s', event) if self.event_callback: self.event_callback(event) else: self.handle_event(event)
def _handle_packet(self, packet)
Event specific packet handling logic. Break packet into events and fires configured event callback or nicely prints events for console.
3.455758
2.894135
1.194055
string = '{id:<32} ' if 'command' in event: string += '{command}' elif 'version' in event: if 'hardware' in event: string += '{hardware} {firmware} ' string += 'V{version} R{revision}' else: string += '{value}' if event.get('unit'): string += ' {unit}' print(string.format(**event))
def handle_event(self, event)
Default handling of incoming event (print).
4.339615
4.167505
1.041298
for ignore in self.ignore: if (ignore == event_id or (ignore.endswith('*') and event_id.startswith(ignore[:-1]))): return True return False
def ignore_event(self, event_id)
Verify event id against list of events to ignore. >>> e = EventHandling(ignore=[ ... 'test1_00', ... 'test2_*', ... ]) >>> e.ignore_event('test1_00') True >>> e.ignore_event('test2_00') True >>> e.ignore_event('test3_00') False
3.392687
4.185946
0.810495
if event.get('command'): if event['command'] == 'on': cmd = 'off' else: cmd = 'on' task = self.send_command_ack(event['id'], cmd) self.loop.create_task(task)
def handle_event(self, event)
Handle incoming packet from rflink gateway.
4.507261
3.887338
1.159472
if packet.get('command'): task = self.send_command_ack(packet['id'], packet['command']) self.loop.create_task(task)
def handle_event(self, packet)
Handle incoming packet from rflink gateway.
6.289017
5.582389
1.126582
args = docopt(__doc__, argv=argv, version=pkg_resources.require('rflink')[0].version) level = logging.ERROR if args['-v']: level = logging.INFO if args['-v'] == 2: level = logging.DEBUG logging.basicConfig(level=level) if not loop: loop = asyncio.get_event_loop() if args['--ignore']: ignore = args['--ignore'].split(',') else: ignore = [] command = next((c for c in ALL_COMMANDS if args[c] is True), None) if command: protocol = PROTOCOLS['command'] else: protocol = PROTOCOLS[args['-m']] conn = create_rflink_connection( protocol=protocol, host=args['--host'], port=args['--port'], baud=args['--baud'], loop=loop, ignore=ignore, ) transport, protocol = loop.run_until_complete(conn) try: if command: for _ in range(int(args['--repeat'])): loop.run_until_complete( protocol.send_command_ack( args['<id>'], command)) else: loop.run_forever() except KeyboardInterrupt: # cleanup connection transport.close() loop.run_forever() finally: loop.close()
def main(argv=sys.argv[1:], loop=None)
Parse argument and setup main program loop.
2.525718
2.511265
1.005756