id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
236,900
closeio/tasktiger
tasktiger/worker.py
Worker._execute_task_group
def _execute_task_group(self, queue, tasks, all_task_ids, queue_lock): """ Executes the given tasks in the queue. Updates the heartbeat for task IDs passed in all_task_ids. This internal method is only meant to be called from within _process_from_queue. """ log = self.log.bind(queue=queue) locks = [] # Keep track of the acquired locks: If two tasks in the list require # the same lock we only acquire it once. lock_ids = set() ready_tasks = [] for task in tasks: if task.lock: if task.lock_key: kwargs = task.kwargs lock_id = gen_unique_id( task.serialized_func, None, {key: kwargs.get(key) for key in task.lock_key}, ) else: lock_id = gen_unique_id( task.serialized_func, task.args, task.kwargs, ) if lock_id not in lock_ids: lock = Lock(self.connection, self._key('lock', lock_id), timeout=self.config['ACTIVE_TASK_UPDATE_TIMEOUT']) acquired = lock.acquire(blocking=False) if acquired: lock_ids.add(lock_id) locks.append(lock) else: log.info('could not acquire lock', task_id=task.id) # Reschedule the task (but if the task is already # scheduled in case of a unique task, don't prolong # the schedule date). when = time.time() + self.config['LOCK_RETRY'] task._move(from_state=ACTIVE, to_state=SCHEDULED, when=when, mode='min') # Make sure to remove it from this list so we don't # re-add to the ACTIVE queue by updating the heartbeat. all_task_ids.remove(task.id) continue ready_tasks.append(task) if not ready_tasks: return True, [] if self.stats_thread: self.stats_thread.report_task_start() success = self._execute(queue, ready_tasks, log, locks, queue_lock, all_task_ids) if self.stats_thread: self.stats_thread.report_task_end() for lock in locks: lock.release() return success, ready_tasks
python
def _execute_task_group(self, queue, tasks, all_task_ids, queue_lock): """ Executes the given tasks in the queue. Updates the heartbeat for task IDs passed in all_task_ids. This internal method is only meant to be called from within _process_from_queue. """ log = self.log.bind(queue=queue) locks = [] # Keep track of the acquired locks: If two tasks in the list require # the same lock we only acquire it once. lock_ids = set() ready_tasks = [] for task in tasks: if task.lock: if task.lock_key: kwargs = task.kwargs lock_id = gen_unique_id( task.serialized_func, None, {key: kwargs.get(key) for key in task.lock_key}, ) else: lock_id = gen_unique_id( task.serialized_func, task.args, task.kwargs, ) if lock_id not in lock_ids: lock = Lock(self.connection, self._key('lock', lock_id), timeout=self.config['ACTIVE_TASK_UPDATE_TIMEOUT']) acquired = lock.acquire(blocking=False) if acquired: lock_ids.add(lock_id) locks.append(lock) else: log.info('could not acquire lock', task_id=task.id) # Reschedule the task (but if the task is already # scheduled in case of a unique task, don't prolong # the schedule date). when = time.time() + self.config['LOCK_RETRY'] task._move(from_state=ACTIVE, to_state=SCHEDULED, when=when, mode='min') # Make sure to remove it from this list so we don't # re-add to the ACTIVE queue by updating the heartbeat. all_task_ids.remove(task.id) continue ready_tasks.append(task) if not ready_tasks: return True, [] if self.stats_thread: self.stats_thread.report_task_start() success = self._execute(queue, ready_tasks, log, locks, queue_lock, all_task_ids) if self.stats_thread: self.stats_thread.report_task_end() for lock in locks: lock.release() return success, ready_tasks
[ "def", "_execute_task_group", "(", "self", ",", "queue", ",", "tasks", ",", "all_task_ids", ",", "queue_lock", ")", ":", "log", "=", "self", ".", "log", ".", "bind", "(", "queue", "=", "queue", ")", "locks", "=", "[", "]", "# Keep track of the acquired locks: If two tasks in the list require", "# the same lock we only acquire it once.", "lock_ids", "=", "set", "(", ")", "ready_tasks", "=", "[", "]", "for", "task", "in", "tasks", ":", "if", "task", ".", "lock", ":", "if", "task", ".", "lock_key", ":", "kwargs", "=", "task", ".", "kwargs", "lock_id", "=", "gen_unique_id", "(", "task", ".", "serialized_func", ",", "None", ",", "{", "key", ":", "kwargs", ".", "get", "(", "key", ")", "for", "key", "in", "task", ".", "lock_key", "}", ",", ")", "else", ":", "lock_id", "=", "gen_unique_id", "(", "task", ".", "serialized_func", ",", "task", ".", "args", ",", "task", ".", "kwargs", ",", ")", "if", "lock_id", "not", "in", "lock_ids", ":", "lock", "=", "Lock", "(", "self", ".", "connection", ",", "self", ".", "_key", "(", "'lock'", ",", "lock_id", ")", ",", "timeout", "=", "self", ".", "config", "[", "'ACTIVE_TASK_UPDATE_TIMEOUT'", "]", ")", "acquired", "=", "lock", ".", "acquire", "(", "blocking", "=", "False", ")", "if", "acquired", ":", "lock_ids", ".", "add", "(", "lock_id", ")", "locks", ".", "append", "(", "lock", ")", "else", ":", "log", ".", "info", "(", "'could not acquire lock'", ",", "task_id", "=", "task", ".", "id", ")", "# Reschedule the task (but if the task is already", "# scheduled in case of a unique task, don't prolong", "# the schedule date).", "when", "=", "time", ".", "time", "(", ")", "+", "self", ".", "config", "[", "'LOCK_RETRY'", "]", "task", ".", "_move", "(", "from_state", "=", "ACTIVE", ",", "to_state", "=", "SCHEDULED", ",", "when", "=", "when", ",", "mode", "=", "'min'", ")", "# Make sure to remove it from this list so we don't", "# re-add to the ACTIVE queue by updating the heartbeat.", "all_task_ids", ".", "remove", "(", "task", ".", "id", ")", "continue", "ready_tasks", ".", "append", "(", "task", ")", "if", "not", "ready_tasks", ":", "return", "True", ",", "[", "]", "if", "self", ".", "stats_thread", ":", "self", ".", "stats_thread", ".", "report_task_start", "(", ")", "success", "=", "self", ".", "_execute", "(", "queue", ",", "ready_tasks", ",", "log", ",", "locks", ",", "queue_lock", ",", "all_task_ids", ")", "if", "self", ".", "stats_thread", ":", "self", ".", "stats_thread", ".", "report_task_end", "(", ")", "for", "lock", "in", "locks", ":", "lock", ".", "release", "(", ")", "return", "success", ",", "ready_tasks" ]
Executes the given tasks in the queue. Updates the heartbeat for task IDs passed in all_task_ids. This internal method is only meant to be called from within _process_from_queue.
[ "Executes", "the", "given", "tasks", "in", "the", "queue", ".", "Updates", "the", "heartbeat", "for", "task", "IDs", "passed", "in", "all_task_ids", ".", "This", "internal", "method", "is", "only", "meant", "to", "be", "called", "from", "within", "_process_from_queue", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L692-L757
236,901
closeio/tasktiger
tasktiger/worker.py
Worker._finish_task_processing
def _finish_task_processing(self, queue, task, success): """ After a task is executed, this method is called and ensures that the task gets properly removed from the ACTIVE queue and, in case of an error, retried or marked as failed. """ log = self.log.bind(queue=queue, task_id=task.id) def _mark_done(): # Remove the task from active queue task._move(from_state=ACTIVE) log.info('done') if success: _mark_done() else: should_retry = False should_log_error = True # Get execution info (for logging and retry purposes) execution = self.connection.lindex( self._key('task', task.id, 'executions'), -1) if execution: execution = json.loads(execution) if execution and execution.get('retry'): if 'retry_method' in execution: retry_func, retry_args = execution['retry_method'] else: # We expect the serialized method here. retry_func, retry_args = serialize_retry_method( \ self.config['DEFAULT_RETRY_METHOD']) should_log_error = execution['log_error'] should_retry = True if task.retry_method and not should_retry: retry_func, retry_args = task.retry_method if task.retry_on: if execution: exception_name = execution.get('exception_name') try: exception_class = import_attribute(exception_name) except TaskImportError: log.error('could not import exception', exception_name=exception_name) else: if task.should_retry_on(exception_class, logger=log): should_retry = True else: should_retry = True state = ERROR when = time.time() log_context = { 'func': task.serialized_func } if should_retry: retry_num = task.n_executions() log_context['retry_func'] = retry_func log_context['retry_num'] = retry_num try: func = import_attribute(retry_func) except TaskImportError: log.error('could not import retry function', func=retry_func) else: try: retry_delay = func(retry_num, *retry_args) log_context['retry_delay'] = retry_delay when += retry_delay except StopRetry: pass else: state = SCHEDULED if execution: if state == ERROR and should_log_error: log_func = log.error else: log_func = log.warning log_context.update({ 'time_failed': execution.get('time_failed'), 'traceback': execution.get('traceback'), 'exception_name': execution.get('exception_name'), }) log_func('task error', **log_context) else: log.error('execution not found', **log_context) # Move task to the scheduled queue for retry, or move to error # queue if we don't want to retry. if state == ERROR and not should_log_error: _mark_done() else: task._move(from_state=ACTIVE, to_state=state, when=when)
python
def _finish_task_processing(self, queue, task, success): """ After a task is executed, this method is called and ensures that the task gets properly removed from the ACTIVE queue and, in case of an error, retried or marked as failed. """ log = self.log.bind(queue=queue, task_id=task.id) def _mark_done(): # Remove the task from active queue task._move(from_state=ACTIVE) log.info('done') if success: _mark_done() else: should_retry = False should_log_error = True # Get execution info (for logging and retry purposes) execution = self.connection.lindex( self._key('task', task.id, 'executions'), -1) if execution: execution = json.loads(execution) if execution and execution.get('retry'): if 'retry_method' in execution: retry_func, retry_args = execution['retry_method'] else: # We expect the serialized method here. retry_func, retry_args = serialize_retry_method( \ self.config['DEFAULT_RETRY_METHOD']) should_log_error = execution['log_error'] should_retry = True if task.retry_method and not should_retry: retry_func, retry_args = task.retry_method if task.retry_on: if execution: exception_name = execution.get('exception_name') try: exception_class = import_attribute(exception_name) except TaskImportError: log.error('could not import exception', exception_name=exception_name) else: if task.should_retry_on(exception_class, logger=log): should_retry = True else: should_retry = True state = ERROR when = time.time() log_context = { 'func': task.serialized_func } if should_retry: retry_num = task.n_executions() log_context['retry_func'] = retry_func log_context['retry_num'] = retry_num try: func = import_attribute(retry_func) except TaskImportError: log.error('could not import retry function', func=retry_func) else: try: retry_delay = func(retry_num, *retry_args) log_context['retry_delay'] = retry_delay when += retry_delay except StopRetry: pass else: state = SCHEDULED if execution: if state == ERROR and should_log_error: log_func = log.error else: log_func = log.warning log_context.update({ 'time_failed': execution.get('time_failed'), 'traceback': execution.get('traceback'), 'exception_name': execution.get('exception_name'), }) log_func('task error', **log_context) else: log.error('execution not found', **log_context) # Move task to the scheduled queue for retry, or move to error # queue if we don't want to retry. if state == ERROR and not should_log_error: _mark_done() else: task._move(from_state=ACTIVE, to_state=state, when=when)
[ "def", "_finish_task_processing", "(", "self", ",", "queue", ",", "task", ",", "success", ")", ":", "log", "=", "self", ".", "log", ".", "bind", "(", "queue", "=", "queue", ",", "task_id", "=", "task", ".", "id", ")", "def", "_mark_done", "(", ")", ":", "# Remove the task from active queue", "task", ".", "_move", "(", "from_state", "=", "ACTIVE", ")", "log", ".", "info", "(", "'done'", ")", "if", "success", ":", "_mark_done", "(", ")", "else", ":", "should_retry", "=", "False", "should_log_error", "=", "True", "# Get execution info (for logging and retry purposes)", "execution", "=", "self", ".", "connection", ".", "lindex", "(", "self", ".", "_key", "(", "'task'", ",", "task", ".", "id", ",", "'executions'", ")", ",", "-", "1", ")", "if", "execution", ":", "execution", "=", "json", ".", "loads", "(", "execution", ")", "if", "execution", "and", "execution", ".", "get", "(", "'retry'", ")", ":", "if", "'retry_method'", "in", "execution", ":", "retry_func", ",", "retry_args", "=", "execution", "[", "'retry_method'", "]", "else", ":", "# We expect the serialized method here.", "retry_func", ",", "retry_args", "=", "serialize_retry_method", "(", "self", ".", "config", "[", "'DEFAULT_RETRY_METHOD'", "]", ")", "should_log_error", "=", "execution", "[", "'log_error'", "]", "should_retry", "=", "True", "if", "task", ".", "retry_method", "and", "not", "should_retry", ":", "retry_func", ",", "retry_args", "=", "task", ".", "retry_method", "if", "task", ".", "retry_on", ":", "if", "execution", ":", "exception_name", "=", "execution", ".", "get", "(", "'exception_name'", ")", "try", ":", "exception_class", "=", "import_attribute", "(", "exception_name", ")", "except", "TaskImportError", ":", "log", ".", "error", "(", "'could not import exception'", ",", "exception_name", "=", "exception_name", ")", "else", ":", "if", "task", ".", "should_retry_on", "(", "exception_class", ",", "logger", "=", "log", ")", ":", "should_retry", "=", "True", "else", ":", "should_retry", "=", "True", "state", "=", "ERROR", "when", "=", "time", ".", "time", "(", ")", "log_context", "=", "{", "'func'", ":", "task", ".", "serialized_func", "}", "if", "should_retry", ":", "retry_num", "=", "task", ".", "n_executions", "(", ")", "log_context", "[", "'retry_func'", "]", "=", "retry_func", "log_context", "[", "'retry_num'", "]", "=", "retry_num", "try", ":", "func", "=", "import_attribute", "(", "retry_func", ")", "except", "TaskImportError", ":", "log", ".", "error", "(", "'could not import retry function'", ",", "func", "=", "retry_func", ")", "else", ":", "try", ":", "retry_delay", "=", "func", "(", "retry_num", ",", "*", "retry_args", ")", "log_context", "[", "'retry_delay'", "]", "=", "retry_delay", "when", "+=", "retry_delay", "except", "StopRetry", ":", "pass", "else", ":", "state", "=", "SCHEDULED", "if", "execution", ":", "if", "state", "==", "ERROR", "and", "should_log_error", ":", "log_func", "=", "log", ".", "error", "else", ":", "log_func", "=", "log", ".", "warning", "log_context", ".", "update", "(", "{", "'time_failed'", ":", "execution", ".", "get", "(", "'time_failed'", ")", ",", "'traceback'", ":", "execution", ".", "get", "(", "'traceback'", ")", ",", "'exception_name'", ":", "execution", ".", "get", "(", "'exception_name'", ")", ",", "}", ")", "log_func", "(", "'task error'", ",", "*", "*", "log_context", ")", "else", ":", "log", ".", "error", "(", "'execution not found'", ",", "*", "*", "log_context", ")", "# Move task to the scheduled queue for retry, or move to error", "# queue if we don't want to retry.", "if", "state", "==", "ERROR", "and", "not", "should_log_error", ":", "_mark_done", "(", ")", "else", ":", "task", ".", "_move", "(", "from_state", "=", "ACTIVE", ",", "to_state", "=", "state", ",", "when", "=", "when", ")" ]
After a task is executed, this method is called and ensures that the task gets properly removed from the ACTIVE queue and, in case of an error, retried or marked as failed.
[ "After", "a", "task", "is", "executed", "this", "method", "is", "called", "and", "ensures", "that", "the", "task", "gets", "properly", "removed", "from", "the", "ACTIVE", "queue", "and", "in", "case", "of", "an", "error", "retried", "or", "marked", "as", "failed", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L759-L859
236,902
closeio/tasktiger
tasktiger/worker.py
Worker.run
def run(self, once=False, force_once=False): """ Main loop of the worker. Use once=True to execute any queued tasks and then exit. Use force_once=True with once=True to always exit after one processing loop even if tasks remain queued. """ self.log.info('ready', id=self.id, queues=sorted(self.only_queues), exclude_queues=sorted(self.exclude_queues), single_worker_queues=sorted(self.single_worker_queues), max_workers=self.max_workers_per_queue) if not self.scripts.can_replicate_commands: # Older Redis versions may create additional overhead when # executing pipelines. self.log.warn('using old Redis version') if self.config['STATS_INTERVAL']: self.stats_thread = StatsThread(self) self.stats_thread.start() # Queue any periodic tasks that are not queued yet. self._queue_periodic_tasks() # First scan all the available queues for new items until they're empty. # Then, listen to the activity channel. # XXX: This can get inefficient when having lots of queues. self._pubsub = self.connection.pubsub() self._pubsub.subscribe(self._key('activity')) self._queue_set = set(self._filter_queues( self.connection.smembers(self._key(QUEUED)))) try: while True: # Update the queue set on every iteration so we don't get stuck # on processing a specific queue. self._wait_for_new_tasks(timeout=self.config['SELECT_TIMEOUT'], batch_timeout=self.config['SELECT_BATCH_TIMEOUT']) self._install_signal_handlers() self._did_work = False self._worker_run() self._uninstall_signal_handlers() if once and (not self._queue_set or force_once): break if self._stop_requested: raise KeyboardInterrupt() except KeyboardInterrupt: pass except Exception as e: self.log.exception(event='exception') raise finally: if self.stats_thread: self.stats_thread.stop() self.stats_thread = None # Free up Redis connection self._pubsub.reset() self.log.info('done')
python
def run(self, once=False, force_once=False): """ Main loop of the worker. Use once=True to execute any queued tasks and then exit. Use force_once=True with once=True to always exit after one processing loop even if tasks remain queued. """ self.log.info('ready', id=self.id, queues=sorted(self.only_queues), exclude_queues=sorted(self.exclude_queues), single_worker_queues=sorted(self.single_worker_queues), max_workers=self.max_workers_per_queue) if not self.scripts.can_replicate_commands: # Older Redis versions may create additional overhead when # executing pipelines. self.log.warn('using old Redis version') if self.config['STATS_INTERVAL']: self.stats_thread = StatsThread(self) self.stats_thread.start() # Queue any periodic tasks that are not queued yet. self._queue_periodic_tasks() # First scan all the available queues for new items until they're empty. # Then, listen to the activity channel. # XXX: This can get inefficient when having lots of queues. self._pubsub = self.connection.pubsub() self._pubsub.subscribe(self._key('activity')) self._queue_set = set(self._filter_queues( self.connection.smembers(self._key(QUEUED)))) try: while True: # Update the queue set on every iteration so we don't get stuck # on processing a specific queue. self._wait_for_new_tasks(timeout=self.config['SELECT_TIMEOUT'], batch_timeout=self.config['SELECT_BATCH_TIMEOUT']) self._install_signal_handlers() self._did_work = False self._worker_run() self._uninstall_signal_handlers() if once and (not self._queue_set or force_once): break if self._stop_requested: raise KeyboardInterrupt() except KeyboardInterrupt: pass except Exception as e: self.log.exception(event='exception') raise finally: if self.stats_thread: self.stats_thread.stop() self.stats_thread = None # Free up Redis connection self._pubsub.reset() self.log.info('done')
[ "def", "run", "(", "self", ",", "once", "=", "False", ",", "force_once", "=", "False", ")", ":", "self", ".", "log", ".", "info", "(", "'ready'", ",", "id", "=", "self", ".", "id", ",", "queues", "=", "sorted", "(", "self", ".", "only_queues", ")", ",", "exclude_queues", "=", "sorted", "(", "self", ".", "exclude_queues", ")", ",", "single_worker_queues", "=", "sorted", "(", "self", ".", "single_worker_queues", ")", ",", "max_workers", "=", "self", ".", "max_workers_per_queue", ")", "if", "not", "self", ".", "scripts", ".", "can_replicate_commands", ":", "# Older Redis versions may create additional overhead when", "# executing pipelines.", "self", ".", "log", ".", "warn", "(", "'using old Redis version'", ")", "if", "self", ".", "config", "[", "'STATS_INTERVAL'", "]", ":", "self", ".", "stats_thread", "=", "StatsThread", "(", "self", ")", "self", ".", "stats_thread", ".", "start", "(", ")", "# Queue any periodic tasks that are not queued yet.", "self", ".", "_queue_periodic_tasks", "(", ")", "# First scan all the available queues for new items until they're empty.", "# Then, listen to the activity channel.", "# XXX: This can get inefficient when having lots of queues.", "self", ".", "_pubsub", "=", "self", ".", "connection", ".", "pubsub", "(", ")", "self", ".", "_pubsub", ".", "subscribe", "(", "self", ".", "_key", "(", "'activity'", ")", ")", "self", ".", "_queue_set", "=", "set", "(", "self", ".", "_filter_queues", "(", "self", ".", "connection", ".", "smembers", "(", "self", ".", "_key", "(", "QUEUED", ")", ")", ")", ")", "try", ":", "while", "True", ":", "# Update the queue set on every iteration so we don't get stuck", "# on processing a specific queue.", "self", ".", "_wait_for_new_tasks", "(", "timeout", "=", "self", ".", "config", "[", "'SELECT_TIMEOUT'", "]", ",", "batch_timeout", "=", "self", ".", "config", "[", "'SELECT_BATCH_TIMEOUT'", "]", ")", "self", ".", "_install_signal_handlers", "(", ")", "self", ".", "_did_work", "=", "False", "self", ".", "_worker_run", "(", ")", "self", ".", "_uninstall_signal_handlers", "(", ")", "if", "once", "and", "(", "not", "self", ".", "_queue_set", "or", "force_once", ")", ":", "break", "if", "self", ".", "_stop_requested", ":", "raise", "KeyboardInterrupt", "(", ")", "except", "KeyboardInterrupt", ":", "pass", "except", "Exception", "as", "e", ":", "self", ".", "log", ".", "exception", "(", "event", "=", "'exception'", ")", "raise", "finally", ":", "if", "self", ".", "stats_thread", ":", "self", ".", "stats_thread", ".", "stop", "(", ")", "self", ".", "stats_thread", "=", "None", "# Free up Redis connection", "self", ".", "_pubsub", ".", "reset", "(", ")", "self", ".", "log", ".", "info", "(", "'done'", ")" ]
Main loop of the worker. Use once=True to execute any queued tasks and then exit. Use force_once=True with once=True to always exit after one processing loop even if tasks remain queued.
[ "Main", "loop", "of", "the", "worker", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L938-L1004
236,903
closeio/tasktiger
tasktiger/redis_scripts.py
RedisScripts.can_replicate_commands
def can_replicate_commands(self): """ Whether Redis supports single command replication. """ if not hasattr(self, '_can_replicate_commands'): info = self.redis.info('server') version_info = info['redis_version'].split('.') major, minor = int(version_info[0]), int(version_info[1]) result = major > 3 or major == 3 and minor >= 2 self._can_replicate_commands = result return self._can_replicate_commands
python
def can_replicate_commands(self): """ Whether Redis supports single command replication. """ if not hasattr(self, '_can_replicate_commands'): info = self.redis.info('server') version_info = info['redis_version'].split('.') major, minor = int(version_info[0]), int(version_info[1]) result = major > 3 or major == 3 and minor >= 2 self._can_replicate_commands = result return self._can_replicate_commands
[ "def", "can_replicate_commands", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_can_replicate_commands'", ")", ":", "info", "=", "self", ".", "redis", ".", "info", "(", "'server'", ")", "version_info", "=", "info", "[", "'redis_version'", "]", ".", "split", "(", "'.'", ")", "major", ",", "minor", "=", "int", "(", "version_info", "[", "0", "]", ")", ",", "int", "(", "version_info", "[", "1", "]", ")", "result", "=", "major", ">", "3", "or", "major", "==", "3", "and", "minor", ">=", "2", "self", ".", "_can_replicate_commands", "=", "result", "return", "self", ".", "_can_replicate_commands" ]
Whether Redis supports single command replication.
[ "Whether", "Redis", "supports", "single", "command", "replication", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/redis_scripts.py#L301-L311
236,904
closeio/tasktiger
tasktiger/redis_scripts.py
RedisScripts.zpoppush
def zpoppush(self, source, destination, count, score, new_score, client=None, withscores=False, on_success=None, if_exists=None): """ Pops the first ``count`` members from the ZSET ``source`` and adds them to the ZSET ``destination`` with a score of ``new_score``. If ``score`` is not None, only members up to a score of ``score`` are used. Returns the members that were moved and, if ``withscores`` is True, their original scores. If items were moved, the action defined in ``on_success`` is executed. The only implemented option is a tuple in the form ('update_sets', ``set_value``, ``remove_from_set``, ``add_to_set`` [, ``add_to_set_if_exists``]). If no items are left in the ``source`` ZSET, the ``set_value`` is removed from ``remove_from_set``. If any items were moved to the ``destination`` ZSET, the ``set_value`` is added to ``add_to_set``. If any items were moved to the ``if_exists_key`` ZSET (see below), the ``set_value`` is added to the ``add_to_set_if_exists`` set. If ``if_exists`` is specified as a tuple ('add', if_exists_key, if_exists_score, if_exists_mode), then members that are already in the ``destination`` set will not be returned or updated, but they will be added to a ZSET ``if_exists_key`` with a score of ``if_exists_score`` and the given behavior specified in ``if_exists_mode`` for members that already exist in the ``if_exists_key`` ZSET. ``if_exists_mode`` can be one of the following: - "nx": Don't update the score - "min": Use the smaller of the given and existing score - "max": Use the larger of the given and existing score If ``if_exists`` is specified as a tuple ('noupdate',), then no action will be taken for members that are already in the ``destination`` ZSET (their score will not be updated). """ if score is None: score = '+inf' # Include all elements. if withscores: if on_success: raise NotImplementedError() return self._zpoppush_withscores( keys=[source, destination], args=[score, count, new_score], client=client) else: if if_exists and if_exists[0] == 'add': _, if_exists_key, if_exists_score, if_exists_mode = if_exists if if_exists_mode != 'min': raise NotImplementedError() if not on_success or on_success[0] != 'update_sets': raise NotImplementedError() set_value, remove_from_set, add_to_set, add_to_set_if_exists \ = on_success[1:] return self._zpoppush_exists_min_update_sets( keys=[source, destination, remove_from_set, add_to_set, add_to_set_if_exists, if_exists_key], args=[score, count, new_score, set_value, if_exists_score], ) elif if_exists and if_exists[0] == 'noupdate': if not on_success or on_success[0] != 'update_sets': raise NotImplementedError() set_value, remove_from_set, add_to_set \ = on_success[1:] return self._zpoppush_exists_ignore_update_sets( keys=[source, destination, remove_from_set, add_to_set], args=[score, count, new_score, set_value], ) if on_success: if on_success[0] != 'update_sets': raise NotImplementedError() else: set_value, remove_from_set, add_to_set = on_success[1:] return self._zpoppush_update_sets( keys=[source, destination, remove_from_set, add_to_set], args=[score, count, new_score, set_value], client=client) else: return self._zpoppush( keys=[source, destination], args=[score, count, new_score], client=client)
python
def zpoppush(self, source, destination, count, score, new_score, client=None, withscores=False, on_success=None, if_exists=None): """ Pops the first ``count`` members from the ZSET ``source`` and adds them to the ZSET ``destination`` with a score of ``new_score``. If ``score`` is not None, only members up to a score of ``score`` are used. Returns the members that were moved and, if ``withscores`` is True, their original scores. If items were moved, the action defined in ``on_success`` is executed. The only implemented option is a tuple in the form ('update_sets', ``set_value``, ``remove_from_set``, ``add_to_set`` [, ``add_to_set_if_exists``]). If no items are left in the ``source`` ZSET, the ``set_value`` is removed from ``remove_from_set``. If any items were moved to the ``destination`` ZSET, the ``set_value`` is added to ``add_to_set``. If any items were moved to the ``if_exists_key`` ZSET (see below), the ``set_value`` is added to the ``add_to_set_if_exists`` set. If ``if_exists`` is specified as a tuple ('add', if_exists_key, if_exists_score, if_exists_mode), then members that are already in the ``destination`` set will not be returned or updated, but they will be added to a ZSET ``if_exists_key`` with a score of ``if_exists_score`` and the given behavior specified in ``if_exists_mode`` for members that already exist in the ``if_exists_key`` ZSET. ``if_exists_mode`` can be one of the following: - "nx": Don't update the score - "min": Use the smaller of the given and existing score - "max": Use the larger of the given and existing score If ``if_exists`` is specified as a tuple ('noupdate',), then no action will be taken for members that are already in the ``destination`` ZSET (their score will not be updated). """ if score is None: score = '+inf' # Include all elements. if withscores: if on_success: raise NotImplementedError() return self._zpoppush_withscores( keys=[source, destination], args=[score, count, new_score], client=client) else: if if_exists and if_exists[0] == 'add': _, if_exists_key, if_exists_score, if_exists_mode = if_exists if if_exists_mode != 'min': raise NotImplementedError() if not on_success or on_success[0] != 'update_sets': raise NotImplementedError() set_value, remove_from_set, add_to_set, add_to_set_if_exists \ = on_success[1:] return self._zpoppush_exists_min_update_sets( keys=[source, destination, remove_from_set, add_to_set, add_to_set_if_exists, if_exists_key], args=[score, count, new_score, set_value, if_exists_score], ) elif if_exists and if_exists[0] == 'noupdate': if not on_success or on_success[0] != 'update_sets': raise NotImplementedError() set_value, remove_from_set, add_to_set \ = on_success[1:] return self._zpoppush_exists_ignore_update_sets( keys=[source, destination, remove_from_set, add_to_set], args=[score, count, new_score, set_value], ) if on_success: if on_success[0] != 'update_sets': raise NotImplementedError() else: set_value, remove_from_set, add_to_set = on_success[1:] return self._zpoppush_update_sets( keys=[source, destination, remove_from_set, add_to_set], args=[score, count, new_score, set_value], client=client) else: return self._zpoppush( keys=[source, destination], args=[score, count, new_score], client=client)
[ "def", "zpoppush", "(", "self", ",", "source", ",", "destination", ",", "count", ",", "score", ",", "new_score", ",", "client", "=", "None", ",", "withscores", "=", "False", ",", "on_success", "=", "None", ",", "if_exists", "=", "None", ")", ":", "if", "score", "is", "None", ":", "score", "=", "'+inf'", "# Include all elements.", "if", "withscores", ":", "if", "on_success", ":", "raise", "NotImplementedError", "(", ")", "return", "self", ".", "_zpoppush_withscores", "(", "keys", "=", "[", "source", ",", "destination", "]", ",", "args", "=", "[", "score", ",", "count", ",", "new_score", "]", ",", "client", "=", "client", ")", "else", ":", "if", "if_exists", "and", "if_exists", "[", "0", "]", "==", "'add'", ":", "_", ",", "if_exists_key", ",", "if_exists_score", ",", "if_exists_mode", "=", "if_exists", "if", "if_exists_mode", "!=", "'min'", ":", "raise", "NotImplementedError", "(", ")", "if", "not", "on_success", "or", "on_success", "[", "0", "]", "!=", "'update_sets'", ":", "raise", "NotImplementedError", "(", ")", "set_value", ",", "remove_from_set", ",", "add_to_set", ",", "add_to_set_if_exists", "=", "on_success", "[", "1", ":", "]", "return", "self", ".", "_zpoppush_exists_min_update_sets", "(", "keys", "=", "[", "source", ",", "destination", ",", "remove_from_set", ",", "add_to_set", ",", "add_to_set_if_exists", ",", "if_exists_key", "]", ",", "args", "=", "[", "score", ",", "count", ",", "new_score", ",", "set_value", ",", "if_exists_score", "]", ",", ")", "elif", "if_exists", "and", "if_exists", "[", "0", "]", "==", "'noupdate'", ":", "if", "not", "on_success", "or", "on_success", "[", "0", "]", "!=", "'update_sets'", ":", "raise", "NotImplementedError", "(", ")", "set_value", ",", "remove_from_set", ",", "add_to_set", "=", "on_success", "[", "1", ":", "]", "return", "self", ".", "_zpoppush_exists_ignore_update_sets", "(", "keys", "=", "[", "source", ",", "destination", ",", "remove_from_set", ",", "add_to_set", "]", ",", "args", "=", "[", "score", ",", "count", ",", "new_score", ",", "set_value", "]", ",", ")", "if", "on_success", ":", "if", "on_success", "[", "0", "]", "!=", "'update_sets'", ":", "raise", "NotImplementedError", "(", ")", "else", ":", "set_value", ",", "remove_from_set", ",", "add_to_set", "=", "on_success", "[", "1", ":", "]", "return", "self", ".", "_zpoppush_update_sets", "(", "keys", "=", "[", "source", ",", "destination", ",", "remove_from_set", ",", "add_to_set", "]", ",", "args", "=", "[", "score", ",", "count", ",", "new_score", ",", "set_value", "]", ",", "client", "=", "client", ")", "else", ":", "return", "self", ".", "_zpoppush", "(", "keys", "=", "[", "source", ",", "destination", "]", ",", "args", "=", "[", "score", ",", "count", ",", "new_score", "]", ",", "client", "=", "client", ")" ]
Pops the first ``count`` members from the ZSET ``source`` and adds them to the ZSET ``destination`` with a score of ``new_score``. If ``score`` is not None, only members up to a score of ``score`` are used. Returns the members that were moved and, if ``withscores`` is True, their original scores. If items were moved, the action defined in ``on_success`` is executed. The only implemented option is a tuple in the form ('update_sets', ``set_value``, ``remove_from_set``, ``add_to_set`` [, ``add_to_set_if_exists``]). If no items are left in the ``source`` ZSET, the ``set_value`` is removed from ``remove_from_set``. If any items were moved to the ``destination`` ZSET, the ``set_value`` is added to ``add_to_set``. If any items were moved to the ``if_exists_key`` ZSET (see below), the ``set_value`` is added to the ``add_to_set_if_exists`` set. If ``if_exists`` is specified as a tuple ('add', if_exists_key, if_exists_score, if_exists_mode), then members that are already in the ``destination`` set will not be returned or updated, but they will be added to a ZSET ``if_exists_key`` with a score of ``if_exists_score`` and the given behavior specified in ``if_exists_mode`` for members that already exist in the ``if_exists_key`` ZSET. ``if_exists_mode`` can be one of the following: - "nx": Don't update the score - "min": Use the smaller of the given and existing score - "max": Use the larger of the given and existing score If ``if_exists`` is specified as a tuple ('noupdate',), then no action will be taken for members that are already in the ``destination`` ZSET (their score will not be updated).
[ "Pops", "the", "first", "count", "members", "from", "the", "ZSET", "source", "and", "adds", "them", "to", "the", "ZSET", "destination", "with", "a", "score", "of", "new_score", ".", "If", "score", "is", "not", "None", "only", "members", "up", "to", "a", "score", "of", "score", "are", "used", ".", "Returns", "the", "members", "that", "were", "moved", "and", "if", "withscores", "is", "True", "their", "original", "scores", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/redis_scripts.py#L339-L423
236,905
closeio/tasktiger
tasktiger/redis_scripts.py
RedisScripts.execute_pipeline
def execute_pipeline(self, pipeline, client=None): """ Executes the given Redis pipeline as a Lua script. When an error occurs, the transaction stops executing, and an exception is raised. This differs from Redis transactions, where execution continues after an error. On success, a list of results is returned. The pipeline is cleared after execution and can no longer be reused. Example: p = conn.pipeline() p.lrange('x', 0, -1) p.set('success', 1) # If "x" is empty or a list, an array [[...], True] is returned. # Otherwise, ResponseError is raised and "success" is not set. results = redis_scripts.execute_pipeline(p) """ client = client or self.redis executing_pipeline = None try: # Prepare args stack = pipeline.command_stack script_args = [int(self.can_replicate_commands), len(stack)] for args, options in stack: script_args += [len(args)-1] + list(args) # Run the pipeline if self.can_replicate_commands: # Redis 3.2 or higher # Make sure scripts exist if pipeline.scripts: pipeline.load_scripts() raw_results = self._execute_pipeline(args=script_args, client=client) else: executing_pipeline = client.pipeline() # Always load scripts to avoid issues when Redis loads data # from AOF file / when replicating. for s in pipeline.scripts: executing_pipeline.script_load(s.script) # Run actual pipeline lua script self._execute_pipeline(args=script_args, client=executing_pipeline) # Always load all scripts and run actual pipeline lua script raw_results = executing_pipeline.execute()[-1] # Run response callbacks on results. results = [] response_callbacks = pipeline.response_callbacks for ((args, options), result) in zip(stack, raw_results): command_name = args[0] if command_name in response_callbacks: result = response_callbacks[command_name](result, **options) results.append(result) return results finally: if executing_pipeline: executing_pipeline.reset() pipeline.reset()
python
def execute_pipeline(self, pipeline, client=None): """ Executes the given Redis pipeline as a Lua script. When an error occurs, the transaction stops executing, and an exception is raised. This differs from Redis transactions, where execution continues after an error. On success, a list of results is returned. The pipeline is cleared after execution and can no longer be reused. Example: p = conn.pipeline() p.lrange('x', 0, -1) p.set('success', 1) # If "x" is empty or a list, an array [[...], True] is returned. # Otherwise, ResponseError is raised and "success" is not set. results = redis_scripts.execute_pipeline(p) """ client = client or self.redis executing_pipeline = None try: # Prepare args stack = pipeline.command_stack script_args = [int(self.can_replicate_commands), len(stack)] for args, options in stack: script_args += [len(args)-1] + list(args) # Run the pipeline if self.can_replicate_commands: # Redis 3.2 or higher # Make sure scripts exist if pipeline.scripts: pipeline.load_scripts() raw_results = self._execute_pipeline(args=script_args, client=client) else: executing_pipeline = client.pipeline() # Always load scripts to avoid issues when Redis loads data # from AOF file / when replicating. for s in pipeline.scripts: executing_pipeline.script_load(s.script) # Run actual pipeline lua script self._execute_pipeline(args=script_args, client=executing_pipeline) # Always load all scripts and run actual pipeline lua script raw_results = executing_pipeline.execute()[-1] # Run response callbacks on results. results = [] response_callbacks = pipeline.response_callbacks for ((args, options), result) in zip(stack, raw_results): command_name = args[0] if command_name in response_callbacks: result = response_callbacks[command_name](result, **options) results.append(result) return results finally: if executing_pipeline: executing_pipeline.reset() pipeline.reset()
[ "def", "execute_pipeline", "(", "self", ",", "pipeline", ",", "client", "=", "None", ")", ":", "client", "=", "client", "or", "self", ".", "redis", "executing_pipeline", "=", "None", "try", ":", "# Prepare args", "stack", "=", "pipeline", ".", "command_stack", "script_args", "=", "[", "int", "(", "self", ".", "can_replicate_commands", ")", ",", "len", "(", "stack", ")", "]", "for", "args", ",", "options", "in", "stack", ":", "script_args", "+=", "[", "len", "(", "args", ")", "-", "1", "]", "+", "list", "(", "args", ")", "# Run the pipeline", "if", "self", ".", "can_replicate_commands", ":", "# Redis 3.2 or higher", "# Make sure scripts exist", "if", "pipeline", ".", "scripts", ":", "pipeline", ".", "load_scripts", "(", ")", "raw_results", "=", "self", ".", "_execute_pipeline", "(", "args", "=", "script_args", ",", "client", "=", "client", ")", "else", ":", "executing_pipeline", "=", "client", ".", "pipeline", "(", ")", "# Always load scripts to avoid issues when Redis loads data", "# from AOF file / when replicating.", "for", "s", "in", "pipeline", ".", "scripts", ":", "executing_pipeline", ".", "script_load", "(", "s", ".", "script", ")", "# Run actual pipeline lua script", "self", ".", "_execute_pipeline", "(", "args", "=", "script_args", ",", "client", "=", "executing_pipeline", ")", "# Always load all scripts and run actual pipeline lua script", "raw_results", "=", "executing_pipeline", ".", "execute", "(", ")", "[", "-", "1", "]", "# Run response callbacks on results.", "results", "=", "[", "]", "response_callbacks", "=", "pipeline", ".", "response_callbacks", "for", "(", "(", "args", ",", "options", ")", ",", "result", ")", "in", "zip", "(", "stack", ",", "raw_results", ")", ":", "command_name", "=", "args", "[", "0", "]", "if", "command_name", "in", "response_callbacks", ":", "result", "=", "response_callbacks", "[", "command_name", "]", "(", "result", ",", "*", "*", "options", ")", "results", ".", "append", "(", "result", ")", "return", "results", "finally", ":", "if", "executing_pipeline", ":", "executing_pipeline", ".", "reset", "(", ")", "pipeline", ".", "reset", "(", ")" ]
Executes the given Redis pipeline as a Lua script. When an error occurs, the transaction stops executing, and an exception is raised. This differs from Redis transactions, where execution continues after an error. On success, a list of results is returned. The pipeline is cleared after execution and can no longer be reused. Example: p = conn.pipeline() p.lrange('x', 0, -1) p.set('success', 1) # If "x" is empty or a list, an array [[...], True] is returned. # Otherwise, ResponseError is raised and "success" is not set. results = redis_scripts.execute_pipeline(p)
[ "Executes", "the", "given", "Redis", "pipeline", "as", "a", "Lua", "script", ".", "When", "an", "error", "occurs", "the", "transaction", "stops", "executing", "and", "an", "exception", "is", "raised", ".", "This", "differs", "from", "Redis", "transactions", "where", "execution", "continues", "after", "an", "error", ".", "On", "success", "a", "list", "of", "results", "is", "returned", ".", "The", "pipeline", "is", "cleared", "after", "execution", "and", "can", "no", "longer", "be", "reused", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/redis_scripts.py#L466-L534
236,906
closeio/tasktiger
tasktiger/_internal.py
gen_unique_id
def gen_unique_id(serialized_name, args, kwargs): """ Generates and returns a hex-encoded 256-bit ID for the given task name and args. Used to generate IDs for unique tasks or for task locks. """ return hashlib.sha256(json.dumps({ 'func': serialized_name, 'args': args, 'kwargs': kwargs, }, sort_keys=True).encode('utf8')).hexdigest()
python
def gen_unique_id(serialized_name, args, kwargs): """ Generates and returns a hex-encoded 256-bit ID for the given task name and args. Used to generate IDs for unique tasks or for task locks. """ return hashlib.sha256(json.dumps({ 'func': serialized_name, 'args': args, 'kwargs': kwargs, }, sort_keys=True).encode('utf8')).hexdigest()
[ "def", "gen_unique_id", "(", "serialized_name", ",", "args", ",", "kwargs", ")", ":", "return", "hashlib", ".", "sha256", "(", "json", ".", "dumps", "(", "{", "'func'", ":", "serialized_name", ",", "'args'", ":", "args", ",", "'kwargs'", ":", "kwargs", ",", "}", ",", "sort_keys", "=", "True", ")", ".", "encode", "(", "'utf8'", ")", ")", ".", "hexdigest", "(", ")" ]
Generates and returns a hex-encoded 256-bit ID for the given task name and args. Used to generate IDs for unique tasks or for task locks.
[ "Generates", "and", "returns", "a", "hex", "-", "encoded", "256", "-", "bit", "ID", "for", "the", "given", "task", "name", "and", "args", ".", "Used", "to", "generate", "IDs", "for", "unique", "tasks", "or", "for", "task", "locks", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/_internal.py#L56-L65
236,907
closeio/tasktiger
tasktiger/_internal.py
serialize_func_name
def serialize_func_name(func): """ Returns the dotted serialized path to the passed function. """ if func.__module__ == '__main__': raise ValueError('Functions from the __main__ module cannot be ' 'processed by workers.') try: # This will only work on Python 3.3 or above, but it will allow us to use static/classmethods func_name = func.__qualname__ except AttributeError: func_name = func.__name__ return ':'.join([func.__module__, func_name])
python
def serialize_func_name(func): """ Returns the dotted serialized path to the passed function. """ if func.__module__ == '__main__': raise ValueError('Functions from the __main__ module cannot be ' 'processed by workers.') try: # This will only work on Python 3.3 or above, but it will allow us to use static/classmethods func_name = func.__qualname__ except AttributeError: func_name = func.__name__ return ':'.join([func.__module__, func_name])
[ "def", "serialize_func_name", "(", "func", ")", ":", "if", "func", ".", "__module__", "==", "'__main__'", ":", "raise", "ValueError", "(", "'Functions from the __main__ module cannot be '", "'processed by workers.'", ")", "try", ":", "# This will only work on Python 3.3 or above, but it will allow us to use static/classmethods", "func_name", "=", "func", ".", "__qualname__", "except", "AttributeError", ":", "func_name", "=", "func", ".", "__name__", "return", "':'", ".", "join", "(", "[", "func", ".", "__module__", ",", "func_name", "]", ")" ]
Returns the dotted serialized path to the passed function.
[ "Returns", "the", "dotted", "serialized", "path", "to", "the", "passed", "function", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/_internal.py#L67-L79
236,908
closeio/tasktiger
tasktiger/_internal.py
dotted_parts
def dotted_parts(s): """ For a string "a.b.c", yields "a", "a.b", "a.b.c". """ idx = -1 while s: idx = s.find('.', idx+1) if idx == -1: yield s break yield s[:idx]
python
def dotted_parts(s): """ For a string "a.b.c", yields "a", "a.b", "a.b.c". """ idx = -1 while s: idx = s.find('.', idx+1) if idx == -1: yield s break yield s[:idx]
[ "def", "dotted_parts", "(", "s", ")", ":", "idx", "=", "-", "1", "while", "s", ":", "idx", "=", "s", ".", "find", "(", "'.'", ",", "idx", "+", "1", ")", "if", "idx", "==", "-", "1", ":", "yield", "s", "break", "yield", "s", "[", ":", "idx", "]" ]
For a string "a.b.c", yields "a", "a.b", "a.b.c".
[ "For", "a", "string", "a", ".", "b", ".", "c", "yields", "a", "a", ".", "b", "a", ".", "b", ".", "c", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/_internal.py#L81-L91
236,909
closeio/tasktiger
tasktiger/_internal.py
reversed_dotted_parts
def reversed_dotted_parts(s): """ For a string "a.b.c", yields "a.b.c", "a.b", "a". """ idx = -1 if s: yield s while s: idx = s.rfind('.', 0, idx) if idx == -1: break yield s[:idx]
python
def reversed_dotted_parts(s): """ For a string "a.b.c", yields "a.b.c", "a.b", "a". """ idx = -1 if s: yield s while s: idx = s.rfind('.', 0, idx) if idx == -1: break yield s[:idx]
[ "def", "reversed_dotted_parts", "(", "s", ")", ":", "idx", "=", "-", "1", "if", "s", ":", "yield", "s", "while", "s", ":", "idx", "=", "s", ".", "rfind", "(", "'.'", ",", "0", ",", "idx", ")", "if", "idx", "==", "-", "1", ":", "break", "yield", "s", "[", ":", "idx", "]" ]
For a string "a.b.c", yields "a.b.c", "a.b", "a".
[ "For", "a", "string", "a", ".", "b", ".", "c", "yields", "a", ".", "b", ".", "c", "a", ".", "b", "a", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/_internal.py#L93-L104
236,910
closeio/tasktiger
tasktiger/logging.py
tasktiger_processor
def tasktiger_processor(logger, method_name, event_dict): """ TaskTiger structlog processor. Inject the current task id for non-batch tasks. """ if g['current_tasks'] is not None and not g['current_task_is_batch']: event_dict['task_id'] = g['current_tasks'][0].id return event_dict
python
def tasktiger_processor(logger, method_name, event_dict): """ TaskTiger structlog processor. Inject the current task id for non-batch tasks. """ if g['current_tasks'] is not None and not g['current_task_is_batch']: event_dict['task_id'] = g['current_tasks'][0].id return event_dict
[ "def", "tasktiger_processor", "(", "logger", ",", "method_name", ",", "event_dict", ")", ":", "if", "g", "[", "'current_tasks'", "]", "is", "not", "None", "and", "not", "g", "[", "'current_task_is_batch'", "]", ":", "event_dict", "[", "'task_id'", "]", "=", "g", "[", "'current_tasks'", "]", "[", "0", "]", ".", "id", "return", "event_dict" ]
TaskTiger structlog processor. Inject the current task id for non-batch tasks.
[ "TaskTiger", "structlog", "processor", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/logging.py#L4-L14
236,911
closeio/tasktiger
tasktiger/task.py
Task.should_retry_on
def should_retry_on(self, exception_class, logger=None): """ Whether this task should be retried when the given exception occurs. """ for n in (self.retry_on or []): try: if issubclass(exception_class, import_attribute(n)): return True except TaskImportError: if logger: logger.error('should_retry_on could not import class', exception_name=n) return False
python
def should_retry_on(self, exception_class, logger=None): """ Whether this task should be retried when the given exception occurs. """ for n in (self.retry_on or []): try: if issubclass(exception_class, import_attribute(n)): return True except TaskImportError: if logger: logger.error('should_retry_on could not import class', exception_name=n) return False
[ "def", "should_retry_on", "(", "self", ",", "exception_class", ",", "logger", "=", "None", ")", ":", "for", "n", "in", "(", "self", ".", "retry_on", "or", "[", "]", ")", ":", "try", ":", "if", "issubclass", "(", "exception_class", ",", "import_attribute", "(", "n", ")", ")", ":", "return", "True", "except", "TaskImportError", ":", "if", "logger", ":", "logger", ".", "error", "(", "'should_retry_on could not import class'", ",", "exception_name", "=", "n", ")", "return", "False" ]
Whether this task should be retried when the given exception occurs.
[ "Whether", "this", "task", "should", "be", "retried", "when", "the", "given", "exception", "occurs", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/task.py#L155-L167
236,912
closeio/tasktiger
tasktiger/task.py
Task.update_scheduled_time
def update_scheduled_time(self, when): """ Updates a scheduled task's date to the given date. If the task is not scheduled, a TaskNotFound exception is raised. """ tiger = self.tiger ts = get_timestamp(when) assert ts pipeline = tiger.connection.pipeline() key = tiger._key(SCHEDULED, self.queue) tiger.scripts.zadd(key, ts, self.id, mode='xx', client=pipeline) pipeline.zscore(key, self.id) _, score = pipeline.execute() if not score: raise TaskNotFound('Task {} not found in queue "{}" in state "{}".'.format( self.id, self.queue, SCHEDULED )) self._ts = ts
python
def update_scheduled_time(self, when): """ Updates a scheduled task's date to the given date. If the task is not scheduled, a TaskNotFound exception is raised. """ tiger = self.tiger ts = get_timestamp(when) assert ts pipeline = tiger.connection.pipeline() key = tiger._key(SCHEDULED, self.queue) tiger.scripts.zadd(key, ts, self.id, mode='xx', client=pipeline) pipeline.zscore(key, self.id) _, score = pipeline.execute() if not score: raise TaskNotFound('Task {} not found in queue "{}" in state "{}".'.format( self.id, self.queue, SCHEDULED )) self._ts = ts
[ "def", "update_scheduled_time", "(", "self", ",", "when", ")", ":", "tiger", "=", "self", ".", "tiger", "ts", "=", "get_timestamp", "(", "when", ")", "assert", "ts", "pipeline", "=", "tiger", ".", "connection", ".", "pipeline", "(", ")", "key", "=", "tiger", ".", "_key", "(", "SCHEDULED", ",", "self", ".", "queue", ")", "tiger", ".", "scripts", ".", "zadd", "(", "key", ",", "ts", ",", "self", ".", "id", ",", "mode", "=", "'xx'", ",", "client", "=", "pipeline", ")", "pipeline", ".", "zscore", "(", "key", ",", "self", ".", "id", ")", "_", ",", "score", "=", "pipeline", ".", "execute", "(", ")", "if", "not", "score", ":", "raise", "TaskNotFound", "(", "'Task {} not found in queue \"{}\" in state \"{}\".'", ".", "format", "(", "self", ".", "id", ",", "self", ".", "queue", ",", "SCHEDULED", ")", ")", "self", ".", "_ts", "=", "ts" ]
Updates a scheduled task's date to the given date. If the task is not scheduled, a TaskNotFound exception is raised.
[ "Updates", "a", "scheduled", "task", "s", "date", "to", "the", "given", "date", ".", "If", "the", "task", "is", "not", "scheduled", "a", "TaskNotFound", "exception", "is", "raised", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/task.py#L322-L342
236,913
closeio/tasktiger
tasktiger/task.py
Task.n_executions
def n_executions(self): """ Queries and returns the number of past task executions. """ pipeline = self.tiger.connection.pipeline() pipeline.exists(self.tiger._key('task', self.id)) pipeline.llen(self.tiger._key('task', self.id, 'executions')) exists, n_executions = pipeline.execute() if not exists: raise TaskNotFound('Task {} not found.'.format( self.id )) return n_executions
python
def n_executions(self): """ Queries and returns the number of past task executions. """ pipeline = self.tiger.connection.pipeline() pipeline.exists(self.tiger._key('task', self.id)) pipeline.llen(self.tiger._key('task', self.id, 'executions')) exists, n_executions = pipeline.execute() if not exists: raise TaskNotFound('Task {} not found.'.format( self.id )) return n_executions
[ "def", "n_executions", "(", "self", ")", ":", "pipeline", "=", "self", ".", "tiger", ".", "connection", ".", "pipeline", "(", ")", "pipeline", ".", "exists", "(", "self", ".", "tiger", ".", "_key", "(", "'task'", ",", "self", ".", "id", ")", ")", "pipeline", ".", "llen", "(", "self", ".", "tiger", ".", "_key", "(", "'task'", ",", "self", ".", "id", ",", "'executions'", ")", ")", "exists", ",", "n_executions", "=", "pipeline", ".", "execute", "(", ")", "if", "not", "exists", ":", "raise", "TaskNotFound", "(", "'Task {} not found.'", ".", "format", "(", "self", ".", "id", ")", ")", "return", "n_executions" ]
Queries and returns the number of past task executions.
[ "Queries", "and", "returns", "the", "number", "of", "past", "task", "executions", "." ]
59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/task.py#L421-L433
236,914
aewallin/allantools
allantools/noise_kasdin.py
Noise.set_input
def set_input(self, nr=2, qd=1, b=0): """ Set inputs after initialization Parameters ------- nr: integer length of generated time-series number must be power of two qd: float discrete variance b: float noise type: 0 : White Phase Modulation (WPM) -1 : Flicker Phase Modulation (FPM) -2 : White Frequency Modulation (WFM) -3 : Flicker Frequency Modulation (FFM) -4 : Random Walk Frequency Modulation (RWFM) """ self.nr = nr self.qd = qd self.b = b
python
def set_input(self, nr=2, qd=1, b=0): """ Set inputs after initialization Parameters ------- nr: integer length of generated time-series number must be power of two qd: float discrete variance b: float noise type: 0 : White Phase Modulation (WPM) -1 : Flicker Phase Modulation (FPM) -2 : White Frequency Modulation (WFM) -3 : Flicker Frequency Modulation (FFM) -4 : Random Walk Frequency Modulation (RWFM) """ self.nr = nr self.qd = qd self.b = b
[ "def", "set_input", "(", "self", ",", "nr", "=", "2", ",", "qd", "=", "1", ",", "b", "=", "0", ")", ":", "self", ".", "nr", "=", "nr", "self", ".", "qd", "=", "qd", "self", ".", "b", "=", "b" ]
Set inputs after initialization Parameters ------- nr: integer length of generated time-series number must be power of two qd: float discrete variance b: float noise type: 0 : White Phase Modulation (WPM) -1 : Flicker Phase Modulation (FPM) -2 : White Frequency Modulation (WFM) -3 : Flicker Frequency Modulation (FFM) -4 : Random Walk Frequency Modulation (RWFM)
[ "Set", "inputs", "after", "initialization" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/noise_kasdin.py#L81-L102
236,915
aewallin/allantools
allantools/noise_kasdin.py
Noise.generateNoise
def generateNoise(self): """ Generate noise time series based on input parameters Returns ------- time_series: np.array Time series with colored noise. len(time_series) == nr """ # Fill wfb array with white noise based on given discrete variance wfb = np.zeros(self.nr*2) wfb[:self.nr] = np.random.normal(0, np.sqrt(self.qd), self.nr) # Generate the hfb coefficients based on the noise type mhb = -self.b/2.0 hfb = np.zeros(self.nr*2) hfb = np.zeros(self.nr*2) hfb[0] = 1.0 indices = np.arange(self.nr-1) hfb[1:self.nr] = (mhb+indices)/(indices+1.0) hfb[:self.nr] = np.multiply.accumulate(hfb[:self.nr]) # Perform discrete Fourier transform of wfb and hfb time series wfb_fft = np.fft.rfft(wfb) hfb_fft = np.fft.rfft(hfb) # Perform inverse Fourier transform of the product of wfb and hfb FFTs time_series = np.fft.irfft(wfb_fft*hfb_fft)[:self.nr] self.time_series = time_series
python
def generateNoise(self): """ Generate noise time series based on input parameters Returns ------- time_series: np.array Time series with colored noise. len(time_series) == nr """ # Fill wfb array with white noise based on given discrete variance wfb = np.zeros(self.nr*2) wfb[:self.nr] = np.random.normal(0, np.sqrt(self.qd), self.nr) # Generate the hfb coefficients based on the noise type mhb = -self.b/2.0 hfb = np.zeros(self.nr*2) hfb = np.zeros(self.nr*2) hfb[0] = 1.0 indices = np.arange(self.nr-1) hfb[1:self.nr] = (mhb+indices)/(indices+1.0) hfb[:self.nr] = np.multiply.accumulate(hfb[:self.nr]) # Perform discrete Fourier transform of wfb and hfb time series wfb_fft = np.fft.rfft(wfb) hfb_fft = np.fft.rfft(hfb) # Perform inverse Fourier transform of the product of wfb and hfb FFTs time_series = np.fft.irfft(wfb_fft*hfb_fft)[:self.nr] self.time_series = time_series
[ "def", "generateNoise", "(", "self", ")", ":", "# Fill wfb array with white noise based on given discrete variance", "wfb", "=", "np", ".", "zeros", "(", "self", ".", "nr", "*", "2", ")", "wfb", "[", ":", "self", ".", "nr", "]", "=", "np", ".", "random", ".", "normal", "(", "0", ",", "np", ".", "sqrt", "(", "self", ".", "qd", ")", ",", "self", ".", "nr", ")", "# Generate the hfb coefficients based on the noise type", "mhb", "=", "-", "self", ".", "b", "/", "2.0", "hfb", "=", "np", ".", "zeros", "(", "self", ".", "nr", "*", "2", ")", "hfb", "=", "np", ".", "zeros", "(", "self", ".", "nr", "*", "2", ")", "hfb", "[", "0", "]", "=", "1.0", "indices", "=", "np", ".", "arange", "(", "self", ".", "nr", "-", "1", ")", "hfb", "[", "1", ":", "self", ".", "nr", "]", "=", "(", "mhb", "+", "indices", ")", "/", "(", "indices", "+", "1.0", ")", "hfb", "[", ":", "self", ".", "nr", "]", "=", "np", ".", "multiply", ".", "accumulate", "(", "hfb", "[", ":", "self", ".", "nr", "]", ")", "# Perform discrete Fourier transform of wfb and hfb time series", "wfb_fft", "=", "np", ".", "fft", ".", "rfft", "(", "wfb", ")", "hfb_fft", "=", "np", ".", "fft", ".", "rfft", "(", "hfb", ")", "# Perform inverse Fourier transform of the product of wfb and hfb FFTs", "time_series", "=", "np", ".", "fft", ".", "irfft", "(", "wfb_fft", "*", "hfb_fft", ")", "[", ":", "self", ".", "nr", "]", "self", ".", "time_series", "=", "time_series" ]
Generate noise time series based on input parameters Returns ------- time_series: np.array Time series with colored noise. len(time_series) == nr
[ "Generate", "noise", "time", "series", "based", "on", "input", "parameters" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/noise_kasdin.py#L104-L130
236,916
aewallin/allantools
allantools/noise_kasdin.py
Noise.adev
def adev(self, tau0, tau): """ return predicted ADEV of noise-type at given tau """ prefactor = self.adev_from_qd(tau0=tau0, tau=tau) c = self.c_avar() avar = pow(prefactor, 2)*pow(tau, c) return np.sqrt(avar)
python
def adev(self, tau0, tau): """ return predicted ADEV of noise-type at given tau """ prefactor = self.adev_from_qd(tau0=tau0, tau=tau) c = self.c_avar() avar = pow(prefactor, 2)*pow(tau, c) return np.sqrt(avar)
[ "def", "adev", "(", "self", ",", "tau0", ",", "tau", ")", ":", "prefactor", "=", "self", ".", "adev_from_qd", "(", "tau0", "=", "tau0", ",", "tau", "=", "tau", ")", "c", "=", "self", ".", "c_avar", "(", ")", "avar", "=", "pow", "(", "prefactor", ",", "2", ")", "*", "pow", "(", "tau", ",", "c", ")", "return", "np", ".", "sqrt", "(", "avar", ")" ]
return predicted ADEV of noise-type at given tau
[ "return", "predicted", "ADEV", "of", "noise", "-", "type", "at", "given", "tau" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/noise_kasdin.py#L161-L168
236,917
aewallin/allantools
allantools/noise_kasdin.py
Noise.mdev
def mdev(self, tau0, tau): """ return predicted MDEV of noise-type at given tau """ prefactor = self.mdev_from_qd(tau0=tau0, tau=tau) c = self.c_mvar() mvar = pow(prefactor, 2)*pow(tau, c) return np.sqrt(mvar)
python
def mdev(self, tau0, tau): """ return predicted MDEV of noise-type at given tau """ prefactor = self.mdev_from_qd(tau0=tau0, tau=tau) c = self.c_mvar() mvar = pow(prefactor, 2)*pow(tau, c) return np.sqrt(mvar)
[ "def", "mdev", "(", "self", ",", "tau0", ",", "tau", ")", ":", "prefactor", "=", "self", ".", "mdev_from_qd", "(", "tau0", "=", "tau0", ",", "tau", "=", "tau", ")", "c", "=", "self", ".", "c_mvar", "(", ")", "mvar", "=", "pow", "(", "prefactor", ",", "2", ")", "*", "pow", "(", "tau", ",", "c", ")", "return", "np", ".", "sqrt", "(", "mvar", ")" ]
return predicted MDEV of noise-type at given tau
[ "return", "predicted", "MDEV", "of", "noise", "-", "type", "at", "given", "tau" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/noise_kasdin.py#L170-L177
236,918
aewallin/allantools
allantools/noise.py
scipy_psd
def scipy_psd(x, f_sample=1.0, nr_segments=4): """ PSD routine from scipy we can compare our own numpy result against this one """ f_axis, psd_of_x = scipy.signal.welch(x, f_sample, nperseg=len(x)/nr_segments) return f_axis, psd_of_x
python
def scipy_psd(x, f_sample=1.0, nr_segments=4): """ PSD routine from scipy we can compare our own numpy result against this one """ f_axis, psd_of_x = scipy.signal.welch(x, f_sample, nperseg=len(x)/nr_segments) return f_axis, psd_of_x
[ "def", "scipy_psd", "(", "x", ",", "f_sample", "=", "1.0", ",", "nr_segments", "=", "4", ")", ":", "f_axis", ",", "psd_of_x", "=", "scipy", ".", "signal", ".", "welch", "(", "x", ",", "f_sample", ",", "nperseg", "=", "len", "(", "x", ")", "/", "nr_segments", ")", "return", "f_axis", ",", "psd_of_x" ]
PSD routine from scipy we can compare our own numpy result against this one
[ "PSD", "routine", "from", "scipy", "we", "can", "compare", "our", "own", "numpy", "result", "against", "this", "one" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/noise.py#L37-L42
236,919
aewallin/allantools
allantools/noise.py
iterpink
def iterpink(depth=20): """Generate a sequence of samples of pink noise. pink noise generator from http://pydoc.net/Python/lmj.sound/0.1.1/lmj.sound.noise/ Based on the Voss-McCartney algorithm, discussion and code examples at http://www.firstpr.com.au/dsp/pink-noise/ depth: Use this many samples of white noise to calculate the output. A higher number is slower to run, but renders low frequencies with more correct power spectra. Generates a never-ending sequence of floating-point values. Any continuous set of these samples will tend to have a 1/f power spectrum. """ values = numpy.random.randn(depth) smooth = numpy.random.randn(depth) source = numpy.random.randn(depth) sumvals = values.sum() i = 0 while True: yield sumvals + smooth[i] # advance the index by 1. if the index wraps, generate noise to use in # the calculations, but do not update any of the pink noise values. i += 1 if i == depth: i = 0 smooth = numpy.random.randn(depth) source = numpy.random.randn(depth) continue # count trailing zeros in i c = 0 while not (i >> c) & 1: c += 1 # replace value c with a new source element sumvals += source[i] - values[c] values[c] = source[i]
python
def iterpink(depth=20): """Generate a sequence of samples of pink noise. pink noise generator from http://pydoc.net/Python/lmj.sound/0.1.1/lmj.sound.noise/ Based on the Voss-McCartney algorithm, discussion and code examples at http://www.firstpr.com.au/dsp/pink-noise/ depth: Use this many samples of white noise to calculate the output. A higher number is slower to run, but renders low frequencies with more correct power spectra. Generates a never-ending sequence of floating-point values. Any continuous set of these samples will tend to have a 1/f power spectrum. """ values = numpy.random.randn(depth) smooth = numpy.random.randn(depth) source = numpy.random.randn(depth) sumvals = values.sum() i = 0 while True: yield sumvals + smooth[i] # advance the index by 1. if the index wraps, generate noise to use in # the calculations, but do not update any of the pink noise values. i += 1 if i == depth: i = 0 smooth = numpy.random.randn(depth) source = numpy.random.randn(depth) continue # count trailing zeros in i c = 0 while not (i >> c) & 1: c += 1 # replace value c with a new source element sumvals += source[i] - values[c] values[c] = source[i]
[ "def", "iterpink", "(", "depth", "=", "20", ")", ":", "values", "=", "numpy", ".", "random", ".", "randn", "(", "depth", ")", "smooth", "=", "numpy", ".", "random", ".", "randn", "(", "depth", ")", "source", "=", "numpy", ".", "random", ".", "randn", "(", "depth", ")", "sumvals", "=", "values", ".", "sum", "(", ")", "i", "=", "0", "while", "True", ":", "yield", "sumvals", "+", "smooth", "[", "i", "]", "# advance the index by 1. if the index wraps, generate noise to use in", "# the calculations, but do not update any of the pink noise values.", "i", "+=", "1", "if", "i", "==", "depth", ":", "i", "=", "0", "smooth", "=", "numpy", ".", "random", ".", "randn", "(", "depth", ")", "source", "=", "numpy", ".", "random", ".", "randn", "(", "depth", ")", "continue", "# count trailing zeros in i", "c", "=", "0", "while", "not", "(", "i", ">>", "c", ")", "&", "1", ":", "c", "+=", "1", "# replace value c with a new source element", "sumvals", "+=", "source", "[", "i", "]", "-", "values", "[", "c", "]", "values", "[", "c", "]", "=", "source", "[", "i", "]" ]
Generate a sequence of samples of pink noise. pink noise generator from http://pydoc.net/Python/lmj.sound/0.1.1/lmj.sound.noise/ Based on the Voss-McCartney algorithm, discussion and code examples at http://www.firstpr.com.au/dsp/pink-noise/ depth: Use this many samples of white noise to calculate the output. A higher number is slower to run, but renders low frequencies with more correct power spectra. Generates a never-ending sequence of floating-point values. Any continuous set of these samples will tend to have a 1/f power spectrum.
[ "Generate", "a", "sequence", "of", "samples", "of", "pink", "noise", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/noise.py#L85-L125
236,920
aewallin/allantools
examples/noise-color-demo.py
plotline
def plotline(plt, alpha, taus, style,label=""): """ plot a line with the slope alpha """ y = [pow(tt, alpha) for tt in taus] plt.loglog(taus, y, style,label=label)
python
def plotline(plt, alpha, taus, style,label=""): """ plot a line with the slope alpha """ y = [pow(tt, alpha) for tt in taus] plt.loglog(taus, y, style,label=label)
[ "def", "plotline", "(", "plt", ",", "alpha", ",", "taus", ",", "style", ",", "label", "=", "\"\"", ")", ":", "y", "=", "[", "pow", "(", "tt", ",", "alpha", ")", "for", "tt", "in", "taus", "]", "plt", ".", "loglog", "(", "taus", ",", "y", ",", "style", ",", "label", "=", "label", ")" ]
plot a line with the slope alpha
[ "plot", "a", "line", "with", "the", "slope", "alpha" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/examples/noise-color-demo.py#L38-L41
236,921
aewallin/allantools
examples/b1_noise_id_figure.py
b1_noise_id
def b1_noise_id(x, af, rate): """ B1 ratio for noise identification ratio of Standard Variace to AVAR """ (taus,devs,errs,ns) = at.adev(x,taus=[af*rate],data_type="phase", rate=rate) oadev_x = devs[0] y = np.diff(x) y_cut = np.array( y[:len(y)-(len(y)%af)] ) # cut to length assert len(y_cut)%af == 0 y_shaped = y_cut.reshape( ( int(len(y_cut)/af), af) ) y_averaged = np.average(y_shaped,axis=1) # average var = np.var(y_averaged, ddof=1) return var/pow(oadev_x,2.0)
python
def b1_noise_id(x, af, rate): """ B1 ratio for noise identification ratio of Standard Variace to AVAR """ (taus,devs,errs,ns) = at.adev(x,taus=[af*rate],data_type="phase", rate=rate) oadev_x = devs[0] y = np.diff(x) y_cut = np.array( y[:len(y)-(len(y)%af)] ) # cut to length assert len(y_cut)%af == 0 y_shaped = y_cut.reshape( ( int(len(y_cut)/af), af) ) y_averaged = np.average(y_shaped,axis=1) # average var = np.var(y_averaged, ddof=1) return var/pow(oadev_x,2.0)
[ "def", "b1_noise_id", "(", "x", ",", "af", ",", "rate", ")", ":", "(", "taus", ",", "devs", ",", "errs", ",", "ns", ")", "=", "at", ".", "adev", "(", "x", ",", "taus", "=", "[", "af", "*", "rate", "]", ",", "data_type", "=", "\"phase\"", ",", "rate", "=", "rate", ")", "oadev_x", "=", "devs", "[", "0", "]", "y", "=", "np", ".", "diff", "(", "x", ")", "y_cut", "=", "np", ".", "array", "(", "y", "[", ":", "len", "(", "y", ")", "-", "(", "len", "(", "y", ")", "%", "af", ")", "]", ")", "# cut to length", "assert", "len", "(", "y_cut", ")", "%", "af", "==", "0", "y_shaped", "=", "y_cut", ".", "reshape", "(", "(", "int", "(", "len", "(", "y_cut", ")", "/", "af", ")", ",", "af", ")", ")", "y_averaged", "=", "np", ".", "average", "(", "y_shaped", ",", "axis", "=", "1", ")", "# average", "var", "=", "np", ".", "var", "(", "y_averaged", ",", "ddof", "=", "1", ")", "return", "var", "/", "pow", "(", "oadev_x", ",", "2.0", ")" ]
B1 ratio for noise identification ratio of Standard Variace to AVAR
[ "B1", "ratio", "for", "noise", "identification", "ratio", "of", "Standard", "Variace", "to", "AVAR" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/examples/b1_noise_id_figure.py#L5-L19
236,922
aewallin/allantools
allantools/plot.py
Plot.plot
def plot(self, atDataset, errorbars=False, grid=False): """ use matplotlib methods for plotting Parameters ---------- atDataset : allantools.Dataset() a dataset with computed data errorbars : boolean Plot errorbars. Defaults to False grid : boolean Plot grid. Defaults to False """ if errorbars: self.ax.errorbar(atDataset.out["taus"], atDataset.out["stat"], yerr=atDataset.out["stat_err"], ) else: self.ax.plot(atDataset.out["taus"], atDataset.out["stat"], ) self.ax.set_xlabel("Tau") self.ax.set_ylabel(atDataset.out["stat_id"]) self.ax.grid(grid, which="minor", ls="-", color='0.65') self.ax.grid(grid, which="major", ls="-", color='0.25')
python
def plot(self, atDataset, errorbars=False, grid=False): """ use matplotlib methods for plotting Parameters ---------- atDataset : allantools.Dataset() a dataset with computed data errorbars : boolean Plot errorbars. Defaults to False grid : boolean Plot grid. Defaults to False """ if errorbars: self.ax.errorbar(atDataset.out["taus"], atDataset.out["stat"], yerr=atDataset.out["stat_err"], ) else: self.ax.plot(atDataset.out["taus"], atDataset.out["stat"], ) self.ax.set_xlabel("Tau") self.ax.set_ylabel(atDataset.out["stat_id"]) self.ax.grid(grid, which="minor", ls="-", color='0.65') self.ax.grid(grid, which="major", ls="-", color='0.25')
[ "def", "plot", "(", "self", ",", "atDataset", ",", "errorbars", "=", "False", ",", "grid", "=", "False", ")", ":", "if", "errorbars", ":", "self", ".", "ax", ".", "errorbar", "(", "atDataset", ".", "out", "[", "\"taus\"", "]", ",", "atDataset", ".", "out", "[", "\"stat\"", "]", ",", "yerr", "=", "atDataset", ".", "out", "[", "\"stat_err\"", "]", ",", ")", "else", ":", "self", ".", "ax", ".", "plot", "(", "atDataset", ".", "out", "[", "\"taus\"", "]", ",", "atDataset", ".", "out", "[", "\"stat\"", "]", ",", ")", "self", ".", "ax", ".", "set_xlabel", "(", "\"Tau\"", ")", "self", ".", "ax", ".", "set_ylabel", "(", "atDataset", ".", "out", "[", "\"stat_id\"", "]", ")", "self", ".", "ax", ".", "grid", "(", "grid", ",", "which", "=", "\"minor\"", ",", "ls", "=", "\"-\"", ",", "color", "=", "'0.65'", ")", "self", ".", "ax", ".", "grid", "(", "grid", ",", "which", "=", "\"major\"", ",", "ls", "=", "\"-\"", ",", "color", "=", "'0.25'", ")" ]
use matplotlib methods for plotting Parameters ---------- atDataset : allantools.Dataset() a dataset with computed data errorbars : boolean Plot errorbars. Defaults to False grid : boolean Plot grid. Defaults to False
[ "use", "matplotlib", "methods", "for", "plotting" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/plot.py#L66-L92
236,923
aewallin/allantools
allantools/ci.py
greenhall_table2
def greenhall_table2(alpha, d): """ Table 2 from Greenhall 2004 """ row_idx = int(-alpha+2) # map 2-> row0 and -4-> row6 assert(row_idx in [0, 1, 2, 3, 4, 5]) col_idx = int(d-1) table2 = [[(3.0/2.0, 1.0/2.0), (35.0/18.0, 1.0), (231.0/100.0, 3.0/2.0)], # alpha=+2 [(78.6, 25.2), (790.0, 410.0), (9950.0, 6520.0)], [(2.0/3.0, 1.0/6.0), (2.0/3.0, 1.0/3.0), (7.0/9.0, 1.0/2.0)], # alpha=0 [(-1, -1), (0.852, 0.375), (0.997, 0.617)], # -1 [(-1, -1), (1.079, 0.368), (1.033, 0.607)], #-2 [(-1, -1), (-1, -1), (1.053, 0.553)], #-3 [(-1, -1), (-1, -1), (1.302, 0.535)], # alpha=-4 ] #print("table2 = ", table2[row_idx][col_idx]) return table2[row_idx][col_idx]
python
def greenhall_table2(alpha, d): """ Table 2 from Greenhall 2004 """ row_idx = int(-alpha+2) # map 2-> row0 and -4-> row6 assert(row_idx in [0, 1, 2, 3, 4, 5]) col_idx = int(d-1) table2 = [[(3.0/2.0, 1.0/2.0), (35.0/18.0, 1.0), (231.0/100.0, 3.0/2.0)], # alpha=+2 [(78.6, 25.2), (790.0, 410.0), (9950.0, 6520.0)], [(2.0/3.0, 1.0/6.0), (2.0/3.0, 1.0/3.0), (7.0/9.0, 1.0/2.0)], # alpha=0 [(-1, -1), (0.852, 0.375), (0.997, 0.617)], # -1 [(-1, -1), (1.079, 0.368), (1.033, 0.607)], #-2 [(-1, -1), (-1, -1), (1.053, 0.553)], #-3 [(-1, -1), (-1, -1), (1.302, 0.535)], # alpha=-4 ] #print("table2 = ", table2[row_idx][col_idx]) return table2[row_idx][col_idx]
[ "def", "greenhall_table2", "(", "alpha", ",", "d", ")", ":", "row_idx", "=", "int", "(", "-", "alpha", "+", "2", ")", "# map 2-> row0 and -4-> row6", "assert", "(", "row_idx", "in", "[", "0", ",", "1", ",", "2", ",", "3", ",", "4", ",", "5", "]", ")", "col_idx", "=", "int", "(", "d", "-", "1", ")", "table2", "=", "[", "[", "(", "3.0", "/", "2.0", ",", "1.0", "/", "2.0", ")", ",", "(", "35.0", "/", "18.0", ",", "1.0", ")", ",", "(", "231.0", "/", "100.0", ",", "3.0", "/", "2.0", ")", "]", ",", "# alpha=+2", "[", "(", "78.6", ",", "25.2", ")", ",", "(", "790.0", ",", "410.0", ")", ",", "(", "9950.0", ",", "6520.0", ")", "]", ",", "[", "(", "2.0", "/", "3.0", ",", "1.0", "/", "6.0", ")", ",", "(", "2.0", "/", "3.0", ",", "1.0", "/", "3.0", ")", ",", "(", "7.0", "/", "9.0", ",", "1.0", "/", "2.0", ")", "]", ",", "# alpha=0", "[", "(", "-", "1", ",", "-", "1", ")", ",", "(", "0.852", ",", "0.375", ")", ",", "(", "0.997", ",", "0.617", ")", "]", ",", "# -1", "[", "(", "-", "1", ",", "-", "1", ")", ",", "(", "1.079", ",", "0.368", ")", ",", "(", "1.033", ",", "0.607", ")", "]", ",", "#-2", "[", "(", "-", "1", ",", "-", "1", ")", ",", "(", "-", "1", ",", "-", "1", ")", ",", "(", "1.053", ",", "0.553", ")", "]", ",", "#-3", "[", "(", "-", "1", ",", "-", "1", ")", ",", "(", "-", "1", ",", "-", "1", ")", ",", "(", "1.302", ",", "0.535", ")", "]", ",", "# alpha=-4", "]", "#print(\"table2 = \", table2[row_idx][col_idx])", "return", "table2", "[", "row_idx", "]", "[", "col_idx", "]" ]
Table 2 from Greenhall 2004
[ "Table", "2", "from", "Greenhall", "2004" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/ci.py#L662-L676
236,924
aewallin/allantools
allantools/ci.py
greenhall_table1
def greenhall_table1(alpha, d): """ Table 1 from Greenhall 2004 """ row_idx = int(-alpha+2) # map 2-> row0 and -4-> row6 col_idx = int(d-1) table1 = [[(2.0/3.0, 1.0/3.0), (7.0/9.0, 1.0/2.0), (22.0/25.0, 2.0/3.0)], # alpha=+2 [(0.840, 0.345), (0.997, 0.616), (1.141, 0.843)], [(1.079, 0.368), (1.033, 0.607), (1.184, 0.848)], [(-1, -1), (1.048, 0.534), (1.180, 0.816)], # -1 [(-1, -1), (1.302, 0.535), (1.175, 0.777)], #-2 [(-1, -1), (-1, -1), (1.194, 0.703)], #-3 [(-1, -1), (-1, -1), (1.489, 0.702)], # alpha=-4 ] #print("table1 = ", table1[row_idx][col_idx]) return table1[row_idx][col_idx]
python
def greenhall_table1(alpha, d): """ Table 1 from Greenhall 2004 """ row_idx = int(-alpha+2) # map 2-> row0 and -4-> row6 col_idx = int(d-1) table1 = [[(2.0/3.0, 1.0/3.0), (7.0/9.0, 1.0/2.0), (22.0/25.0, 2.0/3.0)], # alpha=+2 [(0.840, 0.345), (0.997, 0.616), (1.141, 0.843)], [(1.079, 0.368), (1.033, 0.607), (1.184, 0.848)], [(-1, -1), (1.048, 0.534), (1.180, 0.816)], # -1 [(-1, -1), (1.302, 0.535), (1.175, 0.777)], #-2 [(-1, -1), (-1, -1), (1.194, 0.703)], #-3 [(-1, -1), (-1, -1), (1.489, 0.702)], # alpha=-4 ] #print("table1 = ", table1[row_idx][col_idx]) return table1[row_idx][col_idx]
[ "def", "greenhall_table1", "(", "alpha", ",", "d", ")", ":", "row_idx", "=", "int", "(", "-", "alpha", "+", "2", ")", "# map 2-> row0 and -4-> row6", "col_idx", "=", "int", "(", "d", "-", "1", ")", "table1", "=", "[", "[", "(", "2.0", "/", "3.0", ",", "1.0", "/", "3.0", ")", ",", "(", "7.0", "/", "9.0", ",", "1.0", "/", "2.0", ")", ",", "(", "22.0", "/", "25.0", ",", "2.0", "/", "3.0", ")", "]", ",", "# alpha=+2", "[", "(", "0.840", ",", "0.345", ")", ",", "(", "0.997", ",", "0.616", ")", ",", "(", "1.141", ",", "0.843", ")", "]", ",", "[", "(", "1.079", ",", "0.368", ")", ",", "(", "1.033", ",", "0.607", ")", ",", "(", "1.184", ",", "0.848", ")", "]", ",", "[", "(", "-", "1", ",", "-", "1", ")", ",", "(", "1.048", ",", "0.534", ")", ",", "(", "1.180", ",", "0.816", ")", "]", ",", "# -1", "[", "(", "-", "1", ",", "-", "1", ")", ",", "(", "1.302", ",", "0.535", ")", ",", "(", "1.175", ",", "0.777", ")", "]", ",", "#-2", "[", "(", "-", "1", ",", "-", "1", ")", ",", "(", "-", "1", ",", "-", "1", ")", ",", "(", "1.194", ",", "0.703", ")", "]", ",", "#-3", "[", "(", "-", "1", ",", "-", "1", ")", ",", "(", "-", "1", ",", "-", "1", ")", ",", "(", "1.489", ",", "0.702", ")", "]", ",", "# alpha=-4", "]", "#print(\"table1 = \", table1[row_idx][col_idx])", "return", "table1", "[", "row_idx", "]", "[", "col_idx", "]" ]
Table 1 from Greenhall 2004
[ "Table", "1", "from", "Greenhall", "2004" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/ci.py#L678-L691
236,925
aewallin/allantools
allantools/ci.py
edf_mtotdev
def edf_mtotdev(N, m, alpha): """ Equivalent degrees of freedom for Modified Total Deviation NIST SP1065 page 41, Table 8 """ assert(alpha in [2, 1, 0, -1, -2]) NIST_SP1065_table8 = [(1.90, 2.1), (1.20, 1.40), (1.10, 1.2), (0.85, 0.50), (0.75, 0.31)] #(b, c) = NIST_SP1065_table8[ abs(alpha-2) ] (b, c) = NIST_SP1065_table8[abs(alpha-2)] edf = b*(float(N)/float(m))-c print("mtotdev b,c= ", (b, c), " edf=", edf) return edf
python
def edf_mtotdev(N, m, alpha): """ Equivalent degrees of freedom for Modified Total Deviation NIST SP1065 page 41, Table 8 """ assert(alpha in [2, 1, 0, -1, -2]) NIST_SP1065_table8 = [(1.90, 2.1), (1.20, 1.40), (1.10, 1.2), (0.85, 0.50), (0.75, 0.31)] #(b, c) = NIST_SP1065_table8[ abs(alpha-2) ] (b, c) = NIST_SP1065_table8[abs(alpha-2)] edf = b*(float(N)/float(m))-c print("mtotdev b,c= ", (b, c), " edf=", edf) return edf
[ "def", "edf_mtotdev", "(", "N", ",", "m", ",", "alpha", ")", ":", "assert", "(", "alpha", "in", "[", "2", ",", "1", ",", "0", ",", "-", "1", ",", "-", "2", "]", ")", "NIST_SP1065_table8", "=", "[", "(", "1.90", ",", "2.1", ")", ",", "(", "1.20", ",", "1.40", ")", ",", "(", "1.10", ",", "1.2", ")", ",", "(", "0.85", ",", "0.50", ")", ",", "(", "0.75", ",", "0.31", ")", "]", "#(b, c) = NIST_SP1065_table8[ abs(alpha-2) ]", "(", "b", ",", "c", ")", "=", "NIST_SP1065_table8", "[", "abs", "(", "alpha", "-", "2", ")", "]", "edf", "=", "b", "*", "(", "float", "(", "N", ")", "/", "float", "(", "m", ")", ")", "-", "c", "print", "(", "\"mtotdev b,c= \"", ",", "(", "b", ",", "c", ")", ",", "\" edf=\"", ",", "edf", ")", "return", "edf" ]
Equivalent degrees of freedom for Modified Total Deviation NIST SP1065 page 41, Table 8
[ "Equivalent", "degrees", "of", "freedom", "for", "Modified", "Total", "Deviation", "NIST", "SP1065", "page", "41", "Table", "8" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/ci.py#L710-L721
236,926
aewallin/allantools
allantools/ci.py
edf_simple
def edf_simple(N, m, alpha): """Equivalent degrees of freedom. Simple approximate formulae. Parameters ---------- N : int the number of phase samples m : int averaging factor, tau = m * tau0 alpha: int exponent of f for the frequency PSD: 'wp' returns white phase noise. alpha=+2 'wf' returns white frequency noise. alpha= 0 'fp' returns flicker phase noise. alpha=+1 'ff' returns flicker frequency noise. alpha=-1 'rf' returns random walk frequency noise. alpha=-2 If the input is not recognized, it defaults to idealized, uncorrelated noise with (N-1) degrees of freedom. Notes ----- S. Stein, Frequency and Time - Their Measurement and Characterization. Precision Frequency Control Vol 2, 1985, pp 191-416. http://tf.boulder.nist.gov/general/pdf/666.pdf Returns ------- edf : float Equivalent degrees of freedom """ N = float(N) m = float(m) if alpha in [2, 1, 0, -1, -2]: # NIST SP 1065, Table 5 if alpha == +2: edf = (N + 1) * (N - 2*m) / (2 * (N - m)) if alpha == 0: edf = (((3 * (N - 1) / (2 * m)) - (2 * (N - 2) / N)) * ((4*pow(m, 2)) / ((4*pow(m, 2)) + 5))) if alpha == 1: a = (N - 1)/(2 * m) b = (2 * m + 1) * (N - 1) / 4 edf = np.exp(np.sqrt(np.log(a) * np.log(b))) if alpha == -1: if m == 1: edf = 2 * (N - 2) /(2.3 * N - 4.9) if m >= 2: edf = 5 * N**2 / (4 * m * (N + (3 * m))) if alpha == -2: a = (N - 2) / (m * (N - 3)**2) b = (N - 1)**2 c = 3 * m * (N - 1) d = 4 * m **2 edf = a * (b - c + d) else: edf = (N - 1) print("Noise type not recognized. Defaulting to N - 1 degrees of freedom.") return edf
python
def edf_simple(N, m, alpha): """Equivalent degrees of freedom. Simple approximate formulae. Parameters ---------- N : int the number of phase samples m : int averaging factor, tau = m * tau0 alpha: int exponent of f for the frequency PSD: 'wp' returns white phase noise. alpha=+2 'wf' returns white frequency noise. alpha= 0 'fp' returns flicker phase noise. alpha=+1 'ff' returns flicker frequency noise. alpha=-1 'rf' returns random walk frequency noise. alpha=-2 If the input is not recognized, it defaults to idealized, uncorrelated noise with (N-1) degrees of freedom. Notes ----- S. Stein, Frequency and Time - Their Measurement and Characterization. Precision Frequency Control Vol 2, 1985, pp 191-416. http://tf.boulder.nist.gov/general/pdf/666.pdf Returns ------- edf : float Equivalent degrees of freedom """ N = float(N) m = float(m) if alpha in [2, 1, 0, -1, -2]: # NIST SP 1065, Table 5 if alpha == +2: edf = (N + 1) * (N - 2*m) / (2 * (N - m)) if alpha == 0: edf = (((3 * (N - 1) / (2 * m)) - (2 * (N - 2) / N)) * ((4*pow(m, 2)) / ((4*pow(m, 2)) + 5))) if alpha == 1: a = (N - 1)/(2 * m) b = (2 * m + 1) * (N - 1) / 4 edf = np.exp(np.sqrt(np.log(a) * np.log(b))) if alpha == -1: if m == 1: edf = 2 * (N - 2) /(2.3 * N - 4.9) if m >= 2: edf = 5 * N**2 / (4 * m * (N + (3 * m))) if alpha == -2: a = (N - 2) / (m * (N - 3)**2) b = (N - 1)**2 c = 3 * m * (N - 1) d = 4 * m **2 edf = a * (b - c + d) else: edf = (N - 1) print("Noise type not recognized. Defaulting to N - 1 degrees of freedom.") return edf
[ "def", "edf_simple", "(", "N", ",", "m", ",", "alpha", ")", ":", "N", "=", "float", "(", "N", ")", "m", "=", "float", "(", "m", ")", "if", "alpha", "in", "[", "2", ",", "1", ",", "0", ",", "-", "1", ",", "-", "2", "]", ":", "# NIST SP 1065, Table 5", "if", "alpha", "==", "+", "2", ":", "edf", "=", "(", "N", "+", "1", ")", "*", "(", "N", "-", "2", "*", "m", ")", "/", "(", "2", "*", "(", "N", "-", "m", ")", ")", "if", "alpha", "==", "0", ":", "edf", "=", "(", "(", "(", "3", "*", "(", "N", "-", "1", ")", "/", "(", "2", "*", "m", ")", ")", "-", "(", "2", "*", "(", "N", "-", "2", ")", "/", "N", ")", ")", "*", "(", "(", "4", "*", "pow", "(", "m", ",", "2", ")", ")", "/", "(", "(", "4", "*", "pow", "(", "m", ",", "2", ")", ")", "+", "5", ")", ")", ")", "if", "alpha", "==", "1", ":", "a", "=", "(", "N", "-", "1", ")", "/", "(", "2", "*", "m", ")", "b", "=", "(", "2", "*", "m", "+", "1", ")", "*", "(", "N", "-", "1", ")", "/", "4", "edf", "=", "np", ".", "exp", "(", "np", ".", "sqrt", "(", "np", ".", "log", "(", "a", ")", "*", "np", ".", "log", "(", "b", ")", ")", ")", "if", "alpha", "==", "-", "1", ":", "if", "m", "==", "1", ":", "edf", "=", "2", "*", "(", "N", "-", "2", ")", "/", "(", "2.3", "*", "N", "-", "4.9", ")", "if", "m", ">=", "2", ":", "edf", "=", "5", "*", "N", "**", "2", "/", "(", "4", "*", "m", "*", "(", "N", "+", "(", "3", "*", "m", ")", ")", ")", "if", "alpha", "==", "-", "2", ":", "a", "=", "(", "N", "-", "2", ")", "/", "(", "m", "*", "(", "N", "-", "3", ")", "**", "2", ")", "b", "=", "(", "N", "-", "1", ")", "**", "2", "c", "=", "3", "*", "m", "*", "(", "N", "-", "1", ")", "d", "=", "4", "*", "m", "**", "2", "edf", "=", "a", "*", "(", "b", "-", "c", "+", "d", ")", "else", ":", "edf", "=", "(", "N", "-", "1", ")", "print", "(", "\"Noise type not recognized. Defaulting to N - 1 degrees of freedom.\"", ")", "return", "edf" ]
Equivalent degrees of freedom. Simple approximate formulae. Parameters ---------- N : int the number of phase samples m : int averaging factor, tau = m * tau0 alpha: int exponent of f for the frequency PSD: 'wp' returns white phase noise. alpha=+2 'wf' returns white frequency noise. alpha= 0 'fp' returns flicker phase noise. alpha=+1 'ff' returns flicker frequency noise. alpha=-1 'rf' returns random walk frequency noise. alpha=-2 If the input is not recognized, it defaults to idealized, uncorrelated noise with (N-1) degrees of freedom. Notes ----- S. Stein, Frequency and Time - Their Measurement and Characterization. Precision Frequency Control Vol 2, 1985, pp 191-416. http://tf.boulder.nist.gov/general/pdf/666.pdf Returns ------- edf : float Equivalent degrees of freedom
[ "Equivalent", "degrees", "of", "freedom", ".", "Simple", "approximate", "formulae", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/ci.py#L723-L789
236,927
aewallin/allantools
examples/gradev-demo.py
example1
def example1(): """ Compute the GRADEV of a white phase noise. Compares two different scenarios. 1) The original data and 2) ADEV estimate with gap robust ADEV. """ N = 1000 f = 1 y = np.random.randn(1,N)[0,:] x = [xx for xx in np.linspace(1,len(y),len(y))] x_ax, y_ax, (err_l, err_h), ns = allan.gradev(y,data_type='phase',rate=f,taus=x) plt.errorbar(x_ax, y_ax,yerr=[err_l,err_h],label='GRADEV, no gaps') y[int(np.floor(0.4*N)):int(np.floor(0.6*N))] = np.NaN # Simulate missing data x_ax, y_ax, (err_l, err_h) , ns = allan.gradev(y,data_type='phase',rate=f,taus=x) plt.errorbar(x_ax, y_ax,yerr=[err_l,err_h], label='GRADEV, with gaps') plt.xscale('log') plt.yscale('log') plt.grid() plt.legend() plt.xlabel('Tau / s') plt.ylabel('Overlapping Allan deviation') plt.show()
python
def example1(): """ Compute the GRADEV of a white phase noise. Compares two different scenarios. 1) The original data and 2) ADEV estimate with gap robust ADEV. """ N = 1000 f = 1 y = np.random.randn(1,N)[0,:] x = [xx for xx in np.linspace(1,len(y),len(y))] x_ax, y_ax, (err_l, err_h), ns = allan.gradev(y,data_type='phase',rate=f,taus=x) plt.errorbar(x_ax, y_ax,yerr=[err_l,err_h],label='GRADEV, no gaps') y[int(np.floor(0.4*N)):int(np.floor(0.6*N))] = np.NaN # Simulate missing data x_ax, y_ax, (err_l, err_h) , ns = allan.gradev(y,data_type='phase',rate=f,taus=x) plt.errorbar(x_ax, y_ax,yerr=[err_l,err_h], label='GRADEV, with gaps') plt.xscale('log') plt.yscale('log') plt.grid() plt.legend() plt.xlabel('Tau / s') plt.ylabel('Overlapping Allan deviation') plt.show()
[ "def", "example1", "(", ")", ":", "N", "=", "1000", "f", "=", "1", "y", "=", "np", ".", "random", ".", "randn", "(", "1", ",", "N", ")", "[", "0", ",", ":", "]", "x", "=", "[", "xx", "for", "xx", "in", "np", ".", "linspace", "(", "1", ",", "len", "(", "y", ")", ",", "len", "(", "y", ")", ")", "]", "x_ax", ",", "y_ax", ",", "(", "err_l", ",", "err_h", ")", ",", "ns", "=", "allan", ".", "gradev", "(", "y", ",", "data_type", "=", "'phase'", ",", "rate", "=", "f", ",", "taus", "=", "x", ")", "plt", ".", "errorbar", "(", "x_ax", ",", "y_ax", ",", "yerr", "=", "[", "err_l", ",", "err_h", "]", ",", "label", "=", "'GRADEV, no gaps'", ")", "y", "[", "int", "(", "np", ".", "floor", "(", "0.4", "*", "N", ")", ")", ":", "int", "(", "np", ".", "floor", "(", "0.6", "*", "N", ")", ")", "]", "=", "np", ".", "NaN", "# Simulate missing data", "x_ax", ",", "y_ax", ",", "(", "err_l", ",", "err_h", ")", ",", "ns", "=", "allan", ".", "gradev", "(", "y", ",", "data_type", "=", "'phase'", ",", "rate", "=", "f", ",", "taus", "=", "x", ")", "plt", ".", "errorbar", "(", "x_ax", ",", "y_ax", ",", "yerr", "=", "[", "err_l", ",", "err_h", "]", ",", "label", "=", "'GRADEV, with gaps'", ")", "plt", ".", "xscale", "(", "'log'", ")", "plt", ".", "yscale", "(", "'log'", ")", "plt", ".", "grid", "(", ")", "plt", ".", "legend", "(", ")", "plt", ".", "xlabel", "(", "'Tau / s'", ")", "plt", ".", "ylabel", "(", "'Overlapping Allan deviation'", ")", "plt", ".", "show", "(", ")" ]
Compute the GRADEV of a white phase noise. Compares two different scenarios. 1) The original data and 2) ADEV estimate with gap robust ADEV.
[ "Compute", "the", "GRADEV", "of", "a", "white", "phase", "noise", ".", "Compares", "two", "different", "scenarios", ".", "1", ")", "The", "original", "data", "and", "2", ")", "ADEV", "estimate", "with", "gap", "robust", "ADEV", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/examples/gradev-demo.py#L10-L32
236,928
aewallin/allantools
examples/gradev-demo.py
example2
def example2(): """ Compute the GRADEV of a nonstationary white phase noise. """ N=1000 # number of samples f = 1 # data samples per second s=1+5/N*np.arange(0,N) y=s*np.random.randn(1,N)[0,:] x = [xx for xx in np.linspace(1,len(y),len(y))] x_ax, y_ax, (err_l, err_h) , ns = allan.gradev(y,data_type='phase',rate=f,taus=x) plt.loglog(x_ax, y_ax,'b.',label="No gaps") y[int(0.4*N):int(0.6*N,)] = np.NaN # Simulate missing data x_ax, y_ax, (err_l, err_h), ns = allan.gradev(y,data_type='phase',rate=f,taus=x) plt.loglog(x_ax, y_ax,'g.',label="With gaps") plt.grid() plt.legend() plt.xlabel('Tau / s') plt.ylabel('Overlapping Allan deviation') plt.show()
python
def example2(): """ Compute the GRADEV of a nonstationary white phase noise. """ N=1000 # number of samples f = 1 # data samples per second s=1+5/N*np.arange(0,N) y=s*np.random.randn(1,N)[0,:] x = [xx for xx in np.linspace(1,len(y),len(y))] x_ax, y_ax, (err_l, err_h) , ns = allan.gradev(y,data_type='phase',rate=f,taus=x) plt.loglog(x_ax, y_ax,'b.',label="No gaps") y[int(0.4*N):int(0.6*N,)] = np.NaN # Simulate missing data x_ax, y_ax, (err_l, err_h), ns = allan.gradev(y,data_type='phase',rate=f,taus=x) plt.loglog(x_ax, y_ax,'g.',label="With gaps") plt.grid() plt.legend() plt.xlabel('Tau / s') plt.ylabel('Overlapping Allan deviation') plt.show()
[ "def", "example2", "(", ")", ":", "N", "=", "1000", "# number of samples", "f", "=", "1", "# data samples per second", "s", "=", "1", "+", "5", "/", "N", "*", "np", ".", "arange", "(", "0", ",", "N", ")", "y", "=", "s", "*", "np", ".", "random", ".", "randn", "(", "1", ",", "N", ")", "[", "0", ",", ":", "]", "x", "=", "[", "xx", "for", "xx", "in", "np", ".", "linspace", "(", "1", ",", "len", "(", "y", ")", ",", "len", "(", "y", ")", ")", "]", "x_ax", ",", "y_ax", ",", "(", "err_l", ",", "err_h", ")", ",", "ns", "=", "allan", ".", "gradev", "(", "y", ",", "data_type", "=", "'phase'", ",", "rate", "=", "f", ",", "taus", "=", "x", ")", "plt", ".", "loglog", "(", "x_ax", ",", "y_ax", ",", "'b.'", ",", "label", "=", "\"No gaps\"", ")", "y", "[", "int", "(", "0.4", "*", "N", ")", ":", "int", "(", "0.6", "*", "N", ",", ")", "]", "=", "np", ".", "NaN", "# Simulate missing data", "x_ax", ",", "y_ax", ",", "(", "err_l", ",", "err_h", ")", ",", "ns", "=", "allan", ".", "gradev", "(", "y", ",", "data_type", "=", "'phase'", ",", "rate", "=", "f", ",", "taus", "=", "x", ")", "plt", ".", "loglog", "(", "x_ax", ",", "y_ax", ",", "'g.'", ",", "label", "=", "\"With gaps\"", ")", "plt", ".", "grid", "(", ")", "plt", ".", "legend", "(", ")", "plt", ".", "xlabel", "(", "'Tau / s'", ")", "plt", ".", "ylabel", "(", "'Overlapping Allan deviation'", ")", "plt", ".", "show", "(", ")" ]
Compute the GRADEV of a nonstationary white phase noise.
[ "Compute", "the", "GRADEV", "of", "a", "nonstationary", "white", "phase", "noise", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/examples/gradev-demo.py#L34-L52
236,929
aewallin/allantools
allantools/allantools.py
tdev
def tdev(data, rate=1.0, data_type="phase", taus=None): """ Time deviation. Based on modified Allan variance. .. math:: \\sigma^2_{TDEV}( \\tau ) = { \\tau^2 \\over 3 } \\sigma^2_{MDEV}( \\tau ) Note that TDEV has a unit of seconds. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus, tdev, tdev_error, ns): tuple Tuple of values taus: np.array Tau values for which td computed tdev: np.array Computed time deviations (in seconds) for each tau value tdev_errors: np.array Time deviation errors ns: np.array Values of N used in mdev_phase() Notes ----- http://en.wikipedia.org/wiki/Time_deviation """ phase = input_to_phase(data, rate, data_type) (taus, md, mde, ns) = mdev(phase, rate=rate, taus=taus) td = taus * md / np.sqrt(3.0) tde = td / np.sqrt(ns) return taus, td, tde, ns
python
def tdev(data, rate=1.0, data_type="phase", taus=None): """ Time deviation. Based on modified Allan variance. .. math:: \\sigma^2_{TDEV}( \\tau ) = { \\tau^2 \\over 3 } \\sigma^2_{MDEV}( \\tau ) Note that TDEV has a unit of seconds. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus, tdev, tdev_error, ns): tuple Tuple of values taus: np.array Tau values for which td computed tdev: np.array Computed time deviations (in seconds) for each tau value tdev_errors: np.array Time deviation errors ns: np.array Values of N used in mdev_phase() Notes ----- http://en.wikipedia.org/wiki/Time_deviation """ phase = input_to_phase(data, rate, data_type) (taus, md, mde, ns) = mdev(phase, rate=rate, taus=taus) td = taus * md / np.sqrt(3.0) tde = td / np.sqrt(ns) return taus, td, tde, ns
[ "def", "tdev", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "(", "taus", ",", "md", ",", "mde", ",", "ns", ")", "=", "mdev", "(", "phase", ",", "rate", "=", "rate", ",", "taus", "=", "taus", ")", "td", "=", "taus", "*", "md", "/", "np", ".", "sqrt", "(", "3.0", ")", "tde", "=", "td", "/", "np", ".", "sqrt", "(", "ns", ")", "return", "taus", ",", "td", ",", "tde", ",", "ns" ]
Time deviation. Based on modified Allan variance. .. math:: \\sigma^2_{TDEV}( \\tau ) = { \\tau^2 \\over 3 } \\sigma^2_{MDEV}( \\tau ) Note that TDEV has a unit of seconds. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus, tdev, tdev_error, ns): tuple Tuple of values taus: np.array Tau values for which td computed tdev: np.array Computed time deviations (in seconds) for each tau value tdev_errors: np.array Time deviation errors ns: np.array Values of N used in mdev_phase() Notes ----- http://en.wikipedia.org/wiki/Time_deviation
[ "Time", "deviation", ".", "Based", "on", "modified", "Allan", "variance", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L109-L155
236,930
aewallin/allantools
allantools/allantools.py
mdev
def mdev(data, rate=1.0, data_type="phase", taus=None): """ Modified Allan deviation. Used to distinguish between White and Flicker Phase Modulation. .. math:: \\sigma^2_{MDEV}(m\\tau_0) = { 1 \\over 2 (m \\tau_0 )^2 (N-3m+1) } \\sum_{j=1}^{N-3m+1} \\lbrace \\sum_{i=j}^{j+m-1} {x}_{i+2m} - 2x_{i+m} + x_{i} \\rbrace^2 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, md, mde, ns): tuple Tuple of values taus2: np.array Tau values for which td computed md: np.array Computed mdev for each tau value mde: np.array mdev errors ns: np.array Values of N used in each mdev calculation Notes ----- see http://www.leapsecond.com/tools/adev_lib.c NIST SP 1065 eqn (14) and (15), page 17 """ phase = input_to_phase(data, rate, data_type) (phase, ms, taus_used) = tau_generator(phase, rate, taus=taus) data, taus = np.array(phase), np.array(taus) md = np.zeros_like(ms) mderr = np.zeros_like(ms) ns = np.zeros_like(ms) # this is a 'loop-unrolled' algorithm following # http://www.leapsecond.com/tools/adev_lib.c for idx, m in enumerate(ms): m = int(m) # without this we get: VisibleDeprecationWarning: # using a non-integer number instead of an integer # will result in an error in the future tau = taus_used[idx] # First loop sum d0 = phase[0:m] d1 = phase[m:2*m] d2 = phase[2*m:3*m] e = min(len(d0), len(d1), len(d2)) v = np.sum(d2[:e] - 2* d1[:e] + d0[:e]) s = v * v # Second part of sum d3 = phase[3*m:] d2 = phase[2*m:] d1 = phase[1*m:] d0 = phase[0:] e = min(len(d0), len(d1), len(d2), len(d3)) n = e + 1 v_arr = v + np.cumsum(d3[:e] - 3 * d2[:e] + 3 * d1[:e] - d0[:e]) s = s + np.sum(v_arr * v_arr) s /= 2.0 * m * m * tau * tau * n s = np.sqrt(s) md[idx] = s mderr[idx] = (s / np.sqrt(n)) ns[idx] = n return remove_small_ns(taus_used, md, mderr, ns)
python
def mdev(data, rate=1.0, data_type="phase", taus=None): """ Modified Allan deviation. Used to distinguish between White and Flicker Phase Modulation. .. math:: \\sigma^2_{MDEV}(m\\tau_0) = { 1 \\over 2 (m \\tau_0 )^2 (N-3m+1) } \\sum_{j=1}^{N-3m+1} \\lbrace \\sum_{i=j}^{j+m-1} {x}_{i+2m} - 2x_{i+m} + x_{i} \\rbrace^2 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, md, mde, ns): tuple Tuple of values taus2: np.array Tau values for which td computed md: np.array Computed mdev for each tau value mde: np.array mdev errors ns: np.array Values of N used in each mdev calculation Notes ----- see http://www.leapsecond.com/tools/adev_lib.c NIST SP 1065 eqn (14) and (15), page 17 """ phase = input_to_phase(data, rate, data_type) (phase, ms, taus_used) = tau_generator(phase, rate, taus=taus) data, taus = np.array(phase), np.array(taus) md = np.zeros_like(ms) mderr = np.zeros_like(ms) ns = np.zeros_like(ms) # this is a 'loop-unrolled' algorithm following # http://www.leapsecond.com/tools/adev_lib.c for idx, m in enumerate(ms): m = int(m) # without this we get: VisibleDeprecationWarning: # using a non-integer number instead of an integer # will result in an error in the future tau = taus_used[idx] # First loop sum d0 = phase[0:m] d1 = phase[m:2*m] d2 = phase[2*m:3*m] e = min(len(d0), len(d1), len(d2)) v = np.sum(d2[:e] - 2* d1[:e] + d0[:e]) s = v * v # Second part of sum d3 = phase[3*m:] d2 = phase[2*m:] d1 = phase[1*m:] d0 = phase[0:] e = min(len(d0), len(d1), len(d2), len(d3)) n = e + 1 v_arr = v + np.cumsum(d3[:e] - 3 * d2[:e] + 3 * d1[:e] - d0[:e]) s = s + np.sum(v_arr * v_arr) s /= 2.0 * m * m * tau * tau * n s = np.sqrt(s) md[idx] = s mderr[idx] = (s / np.sqrt(n)) ns[idx] = n return remove_small_ns(taus_used, md, mderr, ns)
[ "def", "mdev", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "(", "phase", ",", "ms", ",", "taus_used", ")", "=", "tau_generator", "(", "phase", ",", "rate", ",", "taus", "=", "taus", ")", "data", ",", "taus", "=", "np", ".", "array", "(", "phase", ")", ",", "np", ".", "array", "(", "taus", ")", "md", "=", "np", ".", "zeros_like", "(", "ms", ")", "mderr", "=", "np", ".", "zeros_like", "(", "ms", ")", "ns", "=", "np", ".", "zeros_like", "(", "ms", ")", "# this is a 'loop-unrolled' algorithm following", "# http://www.leapsecond.com/tools/adev_lib.c", "for", "idx", ",", "m", "in", "enumerate", "(", "ms", ")", ":", "m", "=", "int", "(", "m", ")", "# without this we get: VisibleDeprecationWarning:", "# using a non-integer number instead of an integer", "# will result in an error in the future", "tau", "=", "taus_used", "[", "idx", "]", "# First loop sum", "d0", "=", "phase", "[", "0", ":", "m", "]", "d1", "=", "phase", "[", "m", ":", "2", "*", "m", "]", "d2", "=", "phase", "[", "2", "*", "m", ":", "3", "*", "m", "]", "e", "=", "min", "(", "len", "(", "d0", ")", ",", "len", "(", "d1", ")", ",", "len", "(", "d2", ")", ")", "v", "=", "np", ".", "sum", "(", "d2", "[", ":", "e", "]", "-", "2", "*", "d1", "[", ":", "e", "]", "+", "d0", "[", ":", "e", "]", ")", "s", "=", "v", "*", "v", "# Second part of sum", "d3", "=", "phase", "[", "3", "*", "m", ":", "]", "d2", "=", "phase", "[", "2", "*", "m", ":", "]", "d1", "=", "phase", "[", "1", "*", "m", ":", "]", "d0", "=", "phase", "[", "0", ":", "]", "e", "=", "min", "(", "len", "(", "d0", ")", ",", "len", "(", "d1", ")", ",", "len", "(", "d2", ")", ",", "len", "(", "d3", ")", ")", "n", "=", "e", "+", "1", "v_arr", "=", "v", "+", "np", ".", "cumsum", "(", "d3", "[", ":", "e", "]", "-", "3", "*", "d2", "[", ":", "e", "]", "+", "3", "*", "d1", "[", ":", "e", "]", "-", "d0", "[", ":", "e", "]", ")", "s", "=", "s", "+", "np", ".", "sum", "(", "v_arr", "*", "v_arr", ")", "s", "/=", "2.0", "*", "m", "*", "m", "*", "tau", "*", "tau", "*", "n", "s", "=", "np", ".", "sqrt", "(", "s", ")", "md", "[", "idx", "]", "=", "s", "mderr", "[", "idx", "]", "=", "(", "s", "/", "np", ".", "sqrt", "(", "n", ")", ")", "ns", "[", "idx", "]", "=", "n", "return", "remove_small_ns", "(", "taus_used", ",", "md", ",", "mderr", ",", "ns", ")" ]
Modified Allan deviation. Used to distinguish between White and Flicker Phase Modulation. .. math:: \\sigma^2_{MDEV}(m\\tau_0) = { 1 \\over 2 (m \\tau_0 )^2 (N-3m+1) } \\sum_{j=1}^{N-3m+1} \\lbrace \\sum_{i=j}^{j+m-1} {x}_{i+2m} - 2x_{i+m} + x_{i} \\rbrace^2 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, md, mde, ns): tuple Tuple of values taus2: np.array Tau values for which td computed md: np.array Computed mdev for each tau value mde: np.array mdev errors ns: np.array Values of N used in each mdev calculation Notes ----- see http://www.leapsecond.com/tools/adev_lib.c NIST SP 1065 eqn (14) and (15), page 17
[ "Modified", "Allan", "deviation", ".", "Used", "to", "distinguish", "between", "White", "and", "Flicker", "Phase", "Modulation", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L157-L245
236,931
aewallin/allantools
allantools/allantools.py
adev
def adev(data, rate=1.0, data_type="phase", taus=None): """ Allan deviation. Classic - use only if required - relatively poor confidence. .. math:: \\sigma^2_{ADEV}(\\tau) = { 1 \\over 2 \\tau^2 } \\langle ( {x}_{n+2} - 2x_{n+1} + x_{n} )^2 \\rangle = { 1 \\over 2 (N-2) \\tau^2 } \\sum_{n=1}^{N-2} ( {x}_{n+2} - 2x_{n+1} + x_{n} )^2 where :math:`x_n` is the time-series of phase observations, spaced by the measurement interval :math:`\\tau`, and with length :math:`N`. Or alternatively calculated from a time-series of fractional frequency: .. math:: \\sigma^{2}_{ADEV}(\\tau) = { 1 \\over 2 } \\langle ( \\bar{y}_{n+1} - \\bar{y}_n )^2 \\rangle where :math:`\\bar{y}_n` is the time-series of fractional frequency at averaging time :math:`\\tau` NIST SP 1065 eqn (6) and (7), pages 14 and 15 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, ad, ade, ns): tuple Tuple of values taus2: np.array Tau values for which td computed ad: np.array Computed adev for each tau value ade: np.array adev errors ns: np.array Values of N used in each adev calculation """ phase = input_to_phase(data, rate, data_type) (phase, m, taus_used) = tau_generator(phase, rate, taus) ad = np.zeros_like(taus_used) ade = np.zeros_like(taus_used) adn = np.zeros_like(taus_used) for idx, mj in enumerate(m): # loop through each tau value m(j) (ad[idx], ade[idx], adn[idx]) = calc_adev_phase(phase, rate, mj, mj) return remove_small_ns(taus_used, ad, ade, adn)
python
def adev(data, rate=1.0, data_type="phase", taus=None): """ Allan deviation. Classic - use only if required - relatively poor confidence. .. math:: \\sigma^2_{ADEV}(\\tau) = { 1 \\over 2 \\tau^2 } \\langle ( {x}_{n+2} - 2x_{n+1} + x_{n} )^2 \\rangle = { 1 \\over 2 (N-2) \\tau^2 } \\sum_{n=1}^{N-2} ( {x}_{n+2} - 2x_{n+1} + x_{n} )^2 where :math:`x_n` is the time-series of phase observations, spaced by the measurement interval :math:`\\tau`, and with length :math:`N`. Or alternatively calculated from a time-series of fractional frequency: .. math:: \\sigma^{2}_{ADEV}(\\tau) = { 1 \\over 2 } \\langle ( \\bar{y}_{n+1} - \\bar{y}_n )^2 \\rangle where :math:`\\bar{y}_n` is the time-series of fractional frequency at averaging time :math:`\\tau` NIST SP 1065 eqn (6) and (7), pages 14 and 15 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, ad, ade, ns): tuple Tuple of values taus2: np.array Tau values for which td computed ad: np.array Computed adev for each tau value ade: np.array adev errors ns: np.array Values of N used in each adev calculation """ phase = input_to_phase(data, rate, data_type) (phase, m, taus_used) = tau_generator(phase, rate, taus) ad = np.zeros_like(taus_used) ade = np.zeros_like(taus_used) adn = np.zeros_like(taus_used) for idx, mj in enumerate(m): # loop through each tau value m(j) (ad[idx], ade[idx], adn[idx]) = calc_adev_phase(phase, rate, mj, mj) return remove_small_ns(taus_used, ad, ade, adn)
[ "def", "adev", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "(", "phase", ",", "m", ",", "taus_used", ")", "=", "tau_generator", "(", "phase", ",", "rate", ",", "taus", ")", "ad", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ade", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "adn", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "for", "idx", ",", "mj", "in", "enumerate", "(", "m", ")", ":", "# loop through each tau value m(j)", "(", "ad", "[", "idx", "]", ",", "ade", "[", "idx", "]", ",", "adn", "[", "idx", "]", ")", "=", "calc_adev_phase", "(", "phase", ",", "rate", ",", "mj", ",", "mj", ")", "return", "remove_small_ns", "(", "taus_used", ",", "ad", ",", "ade", ",", "adn", ")" ]
Allan deviation. Classic - use only if required - relatively poor confidence. .. math:: \\sigma^2_{ADEV}(\\tau) = { 1 \\over 2 \\tau^2 } \\langle ( {x}_{n+2} - 2x_{n+1} + x_{n} )^2 \\rangle = { 1 \\over 2 (N-2) \\tau^2 } \\sum_{n=1}^{N-2} ( {x}_{n+2} - 2x_{n+1} + x_{n} )^2 where :math:`x_n` is the time-series of phase observations, spaced by the measurement interval :math:`\\tau`, and with length :math:`N`. Or alternatively calculated from a time-series of fractional frequency: .. math:: \\sigma^{2}_{ADEV}(\\tau) = { 1 \\over 2 } \\langle ( \\bar{y}_{n+1} - \\bar{y}_n )^2 \\rangle where :math:`\\bar{y}_n` is the time-series of fractional frequency at averaging time :math:`\\tau` NIST SP 1065 eqn (6) and (7), pages 14 and 15 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, ad, ade, ns): tuple Tuple of values taus2: np.array Tau values for which td computed ad: np.array Computed adev for each tau value ade: np.array adev errors ns: np.array Values of N used in each adev calculation
[ "Allan", "deviation", ".", "Classic", "-", "use", "only", "if", "required", "-", "relatively", "poor", "confidence", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L247-L311
236,932
aewallin/allantools
allantools/allantools.py
ohdev
def ohdev(data, rate=1.0, data_type="phase", taus=None): """ Overlapping Hadamard deviation. Better confidence than normal Hadamard. .. math:: \\sigma^2_{OHDEV}(m\\tau_0) = { 1 \\over 6 (m \\tau_0 )^2 (N-3m) } \\sum_{i=1}^{N-3m} ( {x}_{i+3m} - 3x_{i+2m} + 3x_{i+m} - x_{i} )^2 where :math:`x_i` is the time-series of phase observations, spaced by the measurement interval :math:`\\tau_0`, and with length :math:`N`. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, hd, hde, ns): tuple Tuple of values taus2: np.array Tau values for which td computed hd: np.array Computed hdev for each tau value hde: np.array hdev errors ns: np.array Values of N used in each hdev calculation """ phase = input_to_phase(data, rate, data_type) (phase, m, taus_used) = tau_generator(phase, rate, taus) hdevs = np.zeros_like(taus_used) hdeverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) for idx, mj in enumerate(m): (hdevs[idx], hdeverrs[idx], ns[idx]) = calc_hdev_phase(phase, rate, mj, 1) return remove_small_ns(taus_used, hdevs, hdeverrs, ns)
python
def ohdev(data, rate=1.0, data_type="phase", taus=None): """ Overlapping Hadamard deviation. Better confidence than normal Hadamard. .. math:: \\sigma^2_{OHDEV}(m\\tau_0) = { 1 \\over 6 (m \\tau_0 )^2 (N-3m) } \\sum_{i=1}^{N-3m} ( {x}_{i+3m} - 3x_{i+2m} + 3x_{i+m} - x_{i} )^2 where :math:`x_i` is the time-series of phase observations, spaced by the measurement interval :math:`\\tau_0`, and with length :math:`N`. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, hd, hde, ns): tuple Tuple of values taus2: np.array Tau values for which td computed hd: np.array Computed hdev for each tau value hde: np.array hdev errors ns: np.array Values of N used in each hdev calculation """ phase = input_to_phase(data, rate, data_type) (phase, m, taus_used) = tau_generator(phase, rate, taus) hdevs = np.zeros_like(taus_used) hdeverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) for idx, mj in enumerate(m): (hdevs[idx], hdeverrs[idx], ns[idx]) = calc_hdev_phase(phase, rate, mj, 1) return remove_small_ns(taus_used, hdevs, hdeverrs, ns)
[ "def", "ohdev", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "(", "phase", ",", "m", ",", "taus_used", ")", "=", "tau_generator", "(", "phase", ",", "rate", ",", "taus", ")", "hdevs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "hdeverrs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ns", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "for", "idx", ",", "mj", "in", "enumerate", "(", "m", ")", ":", "(", "hdevs", "[", "idx", "]", ",", "hdeverrs", "[", "idx", "]", ",", "ns", "[", "idx", "]", ")", "=", "calc_hdev_phase", "(", "phase", ",", "rate", ",", "mj", ",", "1", ")", "return", "remove_small_ns", "(", "taus_used", ",", "hdevs", ",", "hdeverrs", ",", "ns", ")" ]
Overlapping Hadamard deviation. Better confidence than normal Hadamard. .. math:: \\sigma^2_{OHDEV}(m\\tau_0) = { 1 \\over 6 (m \\tau_0 )^2 (N-3m) } \\sum_{i=1}^{N-3m} ( {x}_{i+3m} - 3x_{i+2m} + 3x_{i+m} - x_{i} )^2 where :math:`x_i` is the time-series of phase observations, spaced by the measurement interval :math:`\\tau_0`, and with length :math:`N`. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Returns ------- (taus2, hd, hde, ns): tuple Tuple of values taus2: np.array Tau values for which td computed hd: np.array Computed hdev for each tau value hde: np.array hdev errors ns: np.array Values of N used in each hdev calculation
[ "Overlapping", "Hadamard", "deviation", ".", "Better", "confidence", "than", "normal", "Hadamard", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L420-L471
236,933
aewallin/allantools
allantools/allantools.py
calc_hdev_phase
def calc_hdev_phase(phase, rate, mj, stride): """ main calculation fungtion for HDEV and OHDEV Parameters ---------- phase: np.array Phase data in seconds. rate: float The sampling rate for phase or frequency, in Hz mj: int M index value for stride stride: int Size of stride Returns ------- (dev, deverr, n): tuple Array of computed values. Notes ----- http://www.leapsecond.com/tools/adev_lib.c 1 N-3 s2y(t) = --------------- sum [x(i+3) - 3x(i+2) + 3x(i+1) - x(i) ]^2 6*tau^2 (N-3m) i=1 N=M+1 phase measurements m is averaging factor NIST SP 1065 eqn (18) and (20) pages 20 and 21 """ tau0 = 1.0 / float(rate) mj = int(mj) stride = int(stride) d3 = phase[3 * mj::stride] d2 = phase[2 * mj::stride] d1 = phase[1 * mj::stride] d0 = phase[::stride] n = min(len(d0), len(d1), len(d2), len(d3)) v_arr = d3[:n] - 3 * d2[:n] + 3 * d1[:n] - d0[:n] s = np.sum(v_arr * v_arr) if n == 0: n = 1 h = np.sqrt(s / 6.0 / float(n)) / float(tau0 * mj) e = h / np.sqrt(n) return h, e, n
python
def calc_hdev_phase(phase, rate, mj, stride): """ main calculation fungtion for HDEV and OHDEV Parameters ---------- phase: np.array Phase data in seconds. rate: float The sampling rate for phase or frequency, in Hz mj: int M index value for stride stride: int Size of stride Returns ------- (dev, deverr, n): tuple Array of computed values. Notes ----- http://www.leapsecond.com/tools/adev_lib.c 1 N-3 s2y(t) = --------------- sum [x(i+3) - 3x(i+2) + 3x(i+1) - x(i) ]^2 6*tau^2 (N-3m) i=1 N=M+1 phase measurements m is averaging factor NIST SP 1065 eqn (18) and (20) pages 20 and 21 """ tau0 = 1.0 / float(rate) mj = int(mj) stride = int(stride) d3 = phase[3 * mj::stride] d2 = phase[2 * mj::stride] d1 = phase[1 * mj::stride] d0 = phase[::stride] n = min(len(d0), len(d1), len(d2), len(d3)) v_arr = d3[:n] - 3 * d2[:n] + 3 * d1[:n] - d0[:n] s = np.sum(v_arr * v_arr) if n == 0: n = 1 h = np.sqrt(s / 6.0 / float(n)) / float(tau0 * mj) e = h / np.sqrt(n) return h, e, n
[ "def", "calc_hdev_phase", "(", "phase", ",", "rate", ",", "mj", ",", "stride", ")", ":", "tau0", "=", "1.0", "/", "float", "(", "rate", ")", "mj", "=", "int", "(", "mj", ")", "stride", "=", "int", "(", "stride", ")", "d3", "=", "phase", "[", "3", "*", "mj", ":", ":", "stride", "]", "d2", "=", "phase", "[", "2", "*", "mj", ":", ":", "stride", "]", "d1", "=", "phase", "[", "1", "*", "mj", ":", ":", "stride", "]", "d0", "=", "phase", "[", ":", ":", "stride", "]", "n", "=", "min", "(", "len", "(", "d0", ")", ",", "len", "(", "d1", ")", ",", "len", "(", "d2", ")", ",", "len", "(", "d3", ")", ")", "v_arr", "=", "d3", "[", ":", "n", "]", "-", "3", "*", "d2", "[", ":", "n", "]", "+", "3", "*", "d1", "[", ":", "n", "]", "-", "d0", "[", ":", "n", "]", "s", "=", "np", ".", "sum", "(", "v_arr", "*", "v_arr", ")", "if", "n", "==", "0", ":", "n", "=", "1", "h", "=", "np", ".", "sqrt", "(", "s", "/", "6.0", "/", "float", "(", "n", ")", ")", "/", "float", "(", "tau0", "*", "mj", ")", "e", "=", "h", "/", "np", ".", "sqrt", "(", "n", ")", "return", "h", ",", "e", ",", "n" ]
main calculation fungtion for HDEV and OHDEV Parameters ---------- phase: np.array Phase data in seconds. rate: float The sampling rate for phase or frequency, in Hz mj: int M index value for stride stride: int Size of stride Returns ------- (dev, deverr, n): tuple Array of computed values. Notes ----- http://www.leapsecond.com/tools/adev_lib.c 1 N-3 s2y(t) = --------------- sum [x(i+3) - 3x(i+2) + 3x(i+1) - x(i) ]^2 6*tau^2 (N-3m) i=1 N=M+1 phase measurements m is averaging factor NIST SP 1065 eqn (18) and (20) pages 20 and 21
[ "main", "calculation", "fungtion", "for", "HDEV", "and", "OHDEV" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L515-L566
236,934
aewallin/allantools
allantools/allantools.py
totdev
def totdev(data, rate=1.0, data_type="phase", taus=None): """ Total deviation. Better confidence at long averages for Allan. .. math:: \\sigma^2_{TOTDEV}( m\\tau_0 ) = { 1 \\over 2 (m\\tau_0)^2 (N-2) } \\sum_{i=2}^{N-1} ( {x}^*_{i-m} - 2x^*_{i} + x^*_{i+m} )^2 Where :math:`x^*_i` is a new time-series of length :math:`3N-4` derived from the original phase time-series :math:`x_n` of length :math:`N` by reflection at both ends. FIXME: better description of reflection operation. the original data x is in the center of x*: x*(1-j) = 2x(1) - x(1+j) for j=1..N-2 x*(i) = x(i) for i=1..N x*(N+j) = 2x(N) - x(N-j) for j=1..N-2 x* has length 3N-4 tau = m*tau0 FIXME: bias correction http://www.wriley.com/CI2.pdf page 5 Parameters ---------- phase: np.array Phase data in seconds. Provide either phase or frequency. frequency: np.array Fractional frequency data (nondimensional). Provide either frequency or phase. rate: float The sampling rate for phase or frequency, in Hz taus: np.array Array of tau values for which to compute measurement References ---------- David A. Howe, *The total deviation approach to long-term characterization of frequency stability*, IEEE tr. UFFC vol 47 no 5 (2000) NIST SP 1065 eqn (25) page 23 """ phase = input_to_phase(data, rate, data_type) (phase, m, taus_used) = tau_generator(phase, rate, taus) N = len(phase) # totdev requires a new dataset # Begin by adding reflected data before dataset x1 = 2.0 * phase[0] * np.ones((N - 2,)) x1 = x1 - phase[1:-1] x1 = x1[::-1] # Reflected data at end of dataset x2 = 2.0 * phase[-1] * np.ones((N - 2,)) x2 = x2 - phase[1:-1][::-1] # check length of new dataset assert len(x1)+len(phase)+len(x2) == 3*N - 4 # Combine into a single array x = np.zeros((3*N - 4)) x[0:N-2] = x1 x[N-2:2*(N-2)+2] = phase # original data in the middle x[2*(N-2)+2:] = x2 devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) mid = len(x1) for idx, mj in enumerate(m): mj = int(mj) d0 = x[mid + 1:] d1 = x[mid + mj + 1:] d1n = x[mid - mj + 1:] e = min(len(d0), len(d1), len(d1n)) v_arr = d1n[:e] - 2.0 * d0[:e] + d1[:e] dev = np.sum(v_arr[:mid] * v_arr[:mid]) dev /= float(2 * pow(mj / rate, 2) * (N - 2)) dev = np.sqrt(dev) devs[idx] = dev deverrs[idx] = dev / np.sqrt(mid) ns[idx] = mid return remove_small_ns(taus_used, devs, deverrs, ns)
python
def totdev(data, rate=1.0, data_type="phase", taus=None): """ Total deviation. Better confidence at long averages for Allan. .. math:: \\sigma^2_{TOTDEV}( m\\tau_0 ) = { 1 \\over 2 (m\\tau_0)^2 (N-2) } \\sum_{i=2}^{N-1} ( {x}^*_{i-m} - 2x^*_{i} + x^*_{i+m} )^2 Where :math:`x^*_i` is a new time-series of length :math:`3N-4` derived from the original phase time-series :math:`x_n` of length :math:`N` by reflection at both ends. FIXME: better description of reflection operation. the original data x is in the center of x*: x*(1-j) = 2x(1) - x(1+j) for j=1..N-2 x*(i) = x(i) for i=1..N x*(N+j) = 2x(N) - x(N-j) for j=1..N-2 x* has length 3N-4 tau = m*tau0 FIXME: bias correction http://www.wriley.com/CI2.pdf page 5 Parameters ---------- phase: np.array Phase data in seconds. Provide either phase or frequency. frequency: np.array Fractional frequency data (nondimensional). Provide either frequency or phase. rate: float The sampling rate for phase or frequency, in Hz taus: np.array Array of tau values for which to compute measurement References ---------- David A. Howe, *The total deviation approach to long-term characterization of frequency stability*, IEEE tr. UFFC vol 47 no 5 (2000) NIST SP 1065 eqn (25) page 23 """ phase = input_to_phase(data, rate, data_type) (phase, m, taus_used) = tau_generator(phase, rate, taus) N = len(phase) # totdev requires a new dataset # Begin by adding reflected data before dataset x1 = 2.0 * phase[0] * np.ones((N - 2,)) x1 = x1 - phase[1:-1] x1 = x1[::-1] # Reflected data at end of dataset x2 = 2.0 * phase[-1] * np.ones((N - 2,)) x2 = x2 - phase[1:-1][::-1] # check length of new dataset assert len(x1)+len(phase)+len(x2) == 3*N - 4 # Combine into a single array x = np.zeros((3*N - 4)) x[0:N-2] = x1 x[N-2:2*(N-2)+2] = phase # original data in the middle x[2*(N-2)+2:] = x2 devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) mid = len(x1) for idx, mj in enumerate(m): mj = int(mj) d0 = x[mid + 1:] d1 = x[mid + mj + 1:] d1n = x[mid - mj + 1:] e = min(len(d0), len(d1), len(d1n)) v_arr = d1n[:e] - 2.0 * d0[:e] + d1[:e] dev = np.sum(v_arr[:mid] * v_arr[:mid]) dev /= float(2 * pow(mj / rate, 2) * (N - 2)) dev = np.sqrt(dev) devs[idx] = dev deverrs[idx] = dev / np.sqrt(mid) ns[idx] = mid return remove_small_ns(taus_used, devs, deverrs, ns)
[ "def", "totdev", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "(", "phase", ",", "m", ",", "taus_used", ")", "=", "tau_generator", "(", "phase", ",", "rate", ",", "taus", ")", "N", "=", "len", "(", "phase", ")", "# totdev requires a new dataset", "# Begin by adding reflected data before dataset", "x1", "=", "2.0", "*", "phase", "[", "0", "]", "*", "np", ".", "ones", "(", "(", "N", "-", "2", ",", ")", ")", "x1", "=", "x1", "-", "phase", "[", "1", ":", "-", "1", "]", "x1", "=", "x1", "[", ":", ":", "-", "1", "]", "# Reflected data at end of dataset", "x2", "=", "2.0", "*", "phase", "[", "-", "1", "]", "*", "np", ".", "ones", "(", "(", "N", "-", "2", ",", ")", ")", "x2", "=", "x2", "-", "phase", "[", "1", ":", "-", "1", "]", "[", ":", ":", "-", "1", "]", "# check length of new dataset", "assert", "len", "(", "x1", ")", "+", "len", "(", "phase", ")", "+", "len", "(", "x2", ")", "==", "3", "*", "N", "-", "4", "# Combine into a single array", "x", "=", "np", ".", "zeros", "(", "(", "3", "*", "N", "-", "4", ")", ")", "x", "[", "0", ":", "N", "-", "2", "]", "=", "x1", "x", "[", "N", "-", "2", ":", "2", "*", "(", "N", "-", "2", ")", "+", "2", "]", "=", "phase", "# original data in the middle", "x", "[", "2", "*", "(", "N", "-", "2", ")", "+", "2", ":", "]", "=", "x2", "devs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "deverrs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ns", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "mid", "=", "len", "(", "x1", ")", "for", "idx", ",", "mj", "in", "enumerate", "(", "m", ")", ":", "mj", "=", "int", "(", "mj", ")", "d0", "=", "x", "[", "mid", "+", "1", ":", "]", "d1", "=", "x", "[", "mid", "+", "mj", "+", "1", ":", "]", "d1n", "=", "x", "[", "mid", "-", "mj", "+", "1", ":", "]", "e", "=", "min", "(", "len", "(", "d0", ")", ",", "len", "(", "d1", ")", ",", "len", "(", "d1n", ")", ")", "v_arr", "=", "d1n", "[", ":", "e", "]", "-", "2.0", "*", "d0", "[", ":", "e", "]", "+", "d1", "[", ":", "e", "]", "dev", "=", "np", ".", "sum", "(", "v_arr", "[", ":", "mid", "]", "*", "v_arr", "[", ":", "mid", "]", ")", "dev", "/=", "float", "(", "2", "*", "pow", "(", "mj", "/", "rate", ",", "2", ")", "*", "(", "N", "-", "2", ")", ")", "dev", "=", "np", ".", "sqrt", "(", "dev", ")", "devs", "[", "idx", "]", "=", "dev", "deverrs", "[", "idx", "]", "=", "dev", "/", "np", ".", "sqrt", "(", "mid", ")", "ns", "[", "idx", "]", "=", "mid", "return", "remove_small_ns", "(", "taus_used", ",", "devs", ",", "deverrs", ",", "ns", ")" ]
Total deviation. Better confidence at long averages for Allan. .. math:: \\sigma^2_{TOTDEV}( m\\tau_0 ) = { 1 \\over 2 (m\\tau_0)^2 (N-2) } \\sum_{i=2}^{N-1} ( {x}^*_{i-m} - 2x^*_{i} + x^*_{i+m} )^2 Where :math:`x^*_i` is a new time-series of length :math:`3N-4` derived from the original phase time-series :math:`x_n` of length :math:`N` by reflection at both ends. FIXME: better description of reflection operation. the original data x is in the center of x*: x*(1-j) = 2x(1) - x(1+j) for j=1..N-2 x*(i) = x(i) for i=1..N x*(N+j) = 2x(N) - x(N-j) for j=1..N-2 x* has length 3N-4 tau = m*tau0 FIXME: bias correction http://www.wriley.com/CI2.pdf page 5 Parameters ---------- phase: np.array Phase data in seconds. Provide either phase or frequency. frequency: np.array Fractional frequency data (nondimensional). Provide either frequency or phase. rate: float The sampling rate for phase or frequency, in Hz taus: np.array Array of tau values for which to compute measurement References ---------- David A. Howe, *The total deviation approach to long-term characterization of frequency stability*, IEEE tr. UFFC vol 47 no 5 (2000) NIST SP 1065 eqn (25) page 23
[ "Total", "deviation", ".", "Better", "confidence", "at", "long", "averages", "for", "Allan", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L568-L660
236,935
aewallin/allantools
allantools/allantools.py
mtotdev
def mtotdev(data, rate=1.0, data_type="phase", taus=None): """ PRELIMINARY - REQUIRES FURTHER TESTING. Modified Total deviation. Better confidence at long averages for modified Allan FIXME: bias-correction http://www.wriley.com/CI2.pdf page 6 The variance is scaled up (divided by this number) based on the noise-type identified. WPM 0.94 FPM 0.83 WFM 0.73 FFM 0.70 RWFM 0.69 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. NIST SP 1065 eqn (27) page 25 """ phase = input_to_phase(data, rate, data_type) (phase, ms, taus_used) = tau_generator(phase, rate, taus, maximum_m=float(len(phase))/3.0) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) for idx, mj in enumerate(ms): devs[idx], deverrs[idx], ns[idx] = calc_mtotdev_phase(phase, rate, mj) return remove_small_ns(taus_used, devs, deverrs, ns)
python
def mtotdev(data, rate=1.0, data_type="phase", taus=None): """ PRELIMINARY - REQUIRES FURTHER TESTING. Modified Total deviation. Better confidence at long averages for modified Allan FIXME: bias-correction http://www.wriley.com/CI2.pdf page 6 The variance is scaled up (divided by this number) based on the noise-type identified. WPM 0.94 FPM 0.83 WFM 0.73 FFM 0.70 RWFM 0.69 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. NIST SP 1065 eqn (27) page 25 """ phase = input_to_phase(data, rate, data_type) (phase, ms, taus_used) = tau_generator(phase, rate, taus, maximum_m=float(len(phase))/3.0) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) for idx, mj in enumerate(ms): devs[idx], deverrs[idx], ns[idx] = calc_mtotdev_phase(phase, rate, mj) return remove_small_ns(taus_used, devs, deverrs, ns)
[ "def", "mtotdev", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "(", "phase", ",", "ms", ",", "taus_used", ")", "=", "tau_generator", "(", "phase", ",", "rate", ",", "taus", ",", "maximum_m", "=", "float", "(", "len", "(", "phase", ")", ")", "/", "3.0", ")", "devs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "deverrs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ns", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "for", "idx", ",", "mj", "in", "enumerate", "(", "ms", ")", ":", "devs", "[", "idx", "]", ",", "deverrs", "[", "idx", "]", ",", "ns", "[", "idx", "]", "=", "calc_mtotdev_phase", "(", "phase", ",", "rate", ",", "mj", ")", "return", "remove_small_ns", "(", "taus_used", ",", "devs", ",", "deverrs", ",", "ns", ")" ]
PRELIMINARY - REQUIRES FURTHER TESTING. Modified Total deviation. Better confidence at long averages for modified Allan FIXME: bias-correction http://www.wriley.com/CI2.pdf page 6 The variance is scaled up (divided by this number) based on the noise-type identified. WPM 0.94 FPM 0.83 WFM 0.73 FFM 0.70 RWFM 0.69 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. NIST SP 1065 eqn (27) page 25
[ "PRELIMINARY", "-", "REQUIRES", "FURTHER", "TESTING", ".", "Modified", "Total", "deviation", ".", "Better", "confidence", "at", "long", "averages", "for", "modified", "Allan" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L674-L716
236,936
aewallin/allantools
allantools/allantools.py
htotdev
def htotdev(data, rate=1.0, data_type="phase", taus=None): """ PRELIMINARY - REQUIRES FURTHER TESTING. Hadamard Total deviation. Better confidence at long averages for Hadamard deviation FIXME: bias corrections from http://www.wriley.com/CI2.pdf W FM 0.995 alpha= 0 F FM 0.851 alpha=-1 RW FM 0.771 alpha=-2 FW FM 0.717 alpha=-3 RR FM 0.679 alpha=-4 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. """ if data_type == "phase": phase = data freq = phase2frequency(phase, rate) elif data_type == "freq": phase = frequency2phase(data, rate) freq = data else: raise Exception("unknown data_type: " + data_type) rate = float(rate) (freq, ms, taus_used) = tau_generator(freq, rate, taus, maximum_m=float(len(freq))/3.0) phase = np.array(phase) freq = np.array(freq) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) # NOTE at mj==1 we use ohdev(), based on comment from here: # http://www.wriley.com/paper4ht.htm # "For best consistency, the overlapping Hadamard variance is used # instead of the Hadamard total variance at m=1" # FIXME: this uses both freq and phase datasets, which uses double the memory really needed... for idx, mj in enumerate(ms): if int(mj) == 1: (devs[idx], deverrs[idx], ns[idx]) = calc_hdev_phase(phase, rate, mj, 1) else: (devs[idx], deverrs[idx], ns[idx]) = calc_htotdev_freq(freq, mj) return remove_small_ns(taus_used, devs, deverrs, ns)
python
def htotdev(data, rate=1.0, data_type="phase", taus=None): """ PRELIMINARY - REQUIRES FURTHER TESTING. Hadamard Total deviation. Better confidence at long averages for Hadamard deviation FIXME: bias corrections from http://www.wriley.com/CI2.pdf W FM 0.995 alpha= 0 F FM 0.851 alpha=-1 RW FM 0.771 alpha=-2 FW FM 0.717 alpha=-3 RR FM 0.679 alpha=-4 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. """ if data_type == "phase": phase = data freq = phase2frequency(phase, rate) elif data_type == "freq": phase = frequency2phase(data, rate) freq = data else: raise Exception("unknown data_type: " + data_type) rate = float(rate) (freq, ms, taus_used) = tau_generator(freq, rate, taus, maximum_m=float(len(freq))/3.0) phase = np.array(phase) freq = np.array(freq) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) # NOTE at mj==1 we use ohdev(), based on comment from here: # http://www.wriley.com/paper4ht.htm # "For best consistency, the overlapping Hadamard variance is used # instead of the Hadamard total variance at m=1" # FIXME: this uses both freq and phase datasets, which uses double the memory really needed... for idx, mj in enumerate(ms): if int(mj) == 1: (devs[idx], deverrs[idx], ns[idx]) = calc_hdev_phase(phase, rate, mj, 1) else: (devs[idx], deverrs[idx], ns[idx]) = calc_htotdev_freq(freq, mj) return remove_small_ns(taus_used, devs, deverrs, ns)
[ "def", "htotdev", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "if", "data_type", "==", "\"phase\"", ":", "phase", "=", "data", "freq", "=", "phase2frequency", "(", "phase", ",", "rate", ")", "elif", "data_type", "==", "\"freq\"", ":", "phase", "=", "frequency2phase", "(", "data", ",", "rate", ")", "freq", "=", "data", "else", ":", "raise", "Exception", "(", "\"unknown data_type: \"", "+", "data_type", ")", "rate", "=", "float", "(", "rate", ")", "(", "freq", ",", "ms", ",", "taus_used", ")", "=", "tau_generator", "(", "freq", ",", "rate", ",", "taus", ",", "maximum_m", "=", "float", "(", "len", "(", "freq", ")", ")", "/", "3.0", ")", "phase", "=", "np", ".", "array", "(", "phase", ")", "freq", "=", "np", ".", "array", "(", "freq", ")", "devs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "deverrs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ns", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "# NOTE at mj==1 we use ohdev(), based on comment from here:", "# http://www.wriley.com/paper4ht.htm", "# \"For best consistency, the overlapping Hadamard variance is used", "# instead of the Hadamard total variance at m=1\"", "# FIXME: this uses both freq and phase datasets, which uses double the memory really needed...", "for", "idx", ",", "mj", "in", "enumerate", "(", "ms", ")", ":", "if", "int", "(", "mj", ")", "==", "1", ":", "(", "devs", "[", "idx", "]", ",", "deverrs", "[", "idx", "]", ",", "ns", "[", "idx", "]", ")", "=", "calc_hdev_phase", "(", "phase", ",", "rate", ",", "mj", ",", "1", ")", "else", ":", "(", "devs", "[", "idx", "]", ",", "deverrs", "[", "idx", "]", ",", "ns", "[", "idx", "]", ")", "=", "calc_htotdev_freq", "(", "freq", ",", "mj", ")", "return", "remove_small_ns", "(", "taus_used", ",", "devs", ",", "deverrs", ",", "ns", ")" ]
PRELIMINARY - REQUIRES FURTHER TESTING. Hadamard Total deviation. Better confidence at long averages for Hadamard deviation FIXME: bias corrections from http://www.wriley.com/CI2.pdf W FM 0.995 alpha= 0 F FM 0.851 alpha=-1 RW FM 0.771 alpha=-2 FW FM 0.717 alpha=-3 RR FM 0.679 alpha=-4 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation.
[ "PRELIMINARY", "-", "REQUIRES", "FURTHER", "TESTING", ".", "Hadamard", "Total", "deviation", ".", "Better", "confidence", "at", "long", "averages", "for", "Hadamard", "deviation" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L787-L847
236,937
aewallin/allantools
allantools/allantools.py
theo1
def theo1(data, rate=1.0, data_type="phase", taus=None): """ PRELIMINARY - REQUIRES FURTHER TESTING. Theo1 is a two-sample variance with improved confidence and extended averaging factor range. .. math:: \\sigma^2_{THEO1}(m\\tau_0) = { 1 \\over (m \\tau_0 )^2 (N-m) } \\sum_{i=1}^{N-m} \\sum_{\\delta=0}^{m/2-1} {1\\over m/2-\\delta}\\lbrace ({x}_{i} - x_{i-\\delta +m/2}) + (x_{i+m}- x_{i+\\delta +m/2}) \\rbrace^2 Where :math:`10<=m<=N-1` is even. FIXME: bias correction NIST SP 1065 eq (30) page 29 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. """ phase = input_to_phase(data, rate, data_type) tau0 = 1.0/rate (phase, ms, taus_used) = tau_generator(phase, rate, taus, even=True) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) N = len(phase) for idx, m in enumerate(ms): m = int(m) # to avoid: VisibleDeprecationWarning: using a # non-integer number instead of an integer will # result in an error in the future assert m % 2 == 0 # m must be even dev = 0 n = 0 for i in range(int(N-m)): s = 0 for d in range(int(m/2)): # inner sum pre = 1.0 / (float(m)/2 - float(d)) s += pre*pow(phase[i]-phase[i-d+int(m/2)] + phase[i+m]-phase[i+d+int(m/2)], 2) n = n+1 dev += s assert n == (N-m)*m/2 # N-m outer sums, m/2 inner sums dev = dev/(0.75*(N-m)*pow(m*tau0, 2)) # factor 0.75 used here? http://tf.nist.gov/general/pdf/1990.pdf # but not here? http://tf.nist.gov/timefreq/general/pdf/2220.pdf page 29 devs[idx] = np.sqrt(dev) deverrs[idx] = devs[idx] / np.sqrt(N-m) ns[idx] = n return remove_small_ns(taus_used, devs, deverrs, ns)
python
def theo1(data, rate=1.0, data_type="phase", taus=None): """ PRELIMINARY - REQUIRES FURTHER TESTING. Theo1 is a two-sample variance with improved confidence and extended averaging factor range. .. math:: \\sigma^2_{THEO1}(m\\tau_0) = { 1 \\over (m \\tau_0 )^2 (N-m) } \\sum_{i=1}^{N-m} \\sum_{\\delta=0}^{m/2-1} {1\\over m/2-\\delta}\\lbrace ({x}_{i} - x_{i-\\delta +m/2}) + (x_{i+m}- x_{i+\\delta +m/2}) \\rbrace^2 Where :math:`10<=m<=N-1` is even. FIXME: bias correction NIST SP 1065 eq (30) page 29 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. """ phase = input_to_phase(data, rate, data_type) tau0 = 1.0/rate (phase, ms, taus_used) = tau_generator(phase, rate, taus, even=True) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) N = len(phase) for idx, m in enumerate(ms): m = int(m) # to avoid: VisibleDeprecationWarning: using a # non-integer number instead of an integer will # result in an error in the future assert m % 2 == 0 # m must be even dev = 0 n = 0 for i in range(int(N-m)): s = 0 for d in range(int(m/2)): # inner sum pre = 1.0 / (float(m)/2 - float(d)) s += pre*pow(phase[i]-phase[i-d+int(m/2)] + phase[i+m]-phase[i+d+int(m/2)], 2) n = n+1 dev += s assert n == (N-m)*m/2 # N-m outer sums, m/2 inner sums dev = dev/(0.75*(N-m)*pow(m*tau0, 2)) # factor 0.75 used here? http://tf.nist.gov/general/pdf/1990.pdf # but not here? http://tf.nist.gov/timefreq/general/pdf/2220.pdf page 29 devs[idx] = np.sqrt(dev) deverrs[idx] = devs[idx] / np.sqrt(N-m) ns[idx] = n return remove_small_ns(taus_used, devs, deverrs, ns)
[ "def", "theo1", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "tau0", "=", "1.0", "/", "rate", "(", "phase", ",", "ms", ",", "taus_used", ")", "=", "tau_generator", "(", "phase", ",", "rate", ",", "taus", ",", "even", "=", "True", ")", "devs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "deverrs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ns", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "N", "=", "len", "(", "phase", ")", "for", "idx", ",", "m", "in", "enumerate", "(", "ms", ")", ":", "m", "=", "int", "(", "m", ")", "# to avoid: VisibleDeprecationWarning: using a", "# non-integer number instead of an integer will", "# result in an error in the future", "assert", "m", "%", "2", "==", "0", "# m must be even", "dev", "=", "0", "n", "=", "0", "for", "i", "in", "range", "(", "int", "(", "N", "-", "m", ")", ")", ":", "s", "=", "0", "for", "d", "in", "range", "(", "int", "(", "m", "/", "2", ")", ")", ":", "# inner sum", "pre", "=", "1.0", "/", "(", "float", "(", "m", ")", "/", "2", "-", "float", "(", "d", ")", ")", "s", "+=", "pre", "*", "pow", "(", "phase", "[", "i", "]", "-", "phase", "[", "i", "-", "d", "+", "int", "(", "m", "/", "2", ")", "]", "+", "phase", "[", "i", "+", "m", "]", "-", "phase", "[", "i", "+", "d", "+", "int", "(", "m", "/", "2", ")", "]", ",", "2", ")", "n", "=", "n", "+", "1", "dev", "+=", "s", "assert", "n", "==", "(", "N", "-", "m", ")", "*", "m", "/", "2", "# N-m outer sums, m/2 inner sums", "dev", "=", "dev", "/", "(", "0.75", "*", "(", "N", "-", "m", ")", "*", "pow", "(", "m", "*", "tau0", ",", "2", ")", ")", "# factor 0.75 used here? http://tf.nist.gov/general/pdf/1990.pdf", "# but not here? http://tf.nist.gov/timefreq/general/pdf/2220.pdf page 29", "devs", "[", "idx", "]", "=", "np", ".", "sqrt", "(", "dev", ")", "deverrs", "[", "idx", "]", "=", "devs", "[", "idx", "]", "/", "np", ".", "sqrt", "(", "N", "-", "m", ")", "ns", "[", "idx", "]", "=", "n", "return", "remove_small_ns", "(", "taus_used", ",", "devs", ",", "deverrs", ",", "ns", ")" ]
PRELIMINARY - REQUIRES FURTHER TESTING. Theo1 is a two-sample variance with improved confidence and extended averaging factor range. .. math:: \\sigma^2_{THEO1}(m\\tau_0) = { 1 \\over (m \\tau_0 )^2 (N-m) } \\sum_{i=1}^{N-m} \\sum_{\\delta=0}^{m/2-1} {1\\over m/2-\\delta}\\lbrace ({x}_{i} - x_{i-\\delta +m/2}) + (x_{i+m}- x_{i+\\delta +m/2}) \\rbrace^2 Where :math:`10<=m<=N-1` is even. FIXME: bias correction NIST SP 1065 eq (30) page 29 Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation.
[ "PRELIMINARY", "-", "REQUIRES", "FURTHER", "TESTING", ".", "Theo1", "is", "a", "two", "-", "sample", "variance", "with", "improved", "confidence", "and", "extended", "averaging", "factor", "range", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L919-L987
236,938
aewallin/allantools
allantools/allantools.py
tierms
def tierms(data, rate=1.0, data_type="phase", taus=None): """ Time Interval Error RMS. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. """ phase = input_to_phase(data, rate, data_type) (data, m, taus_used) = tau_generator(phase, rate, taus) count = len(phase) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) for idx, mj in enumerate(m): mj = int(mj) # This seems like an unusual way to phases = np.column_stack((phase[:-mj], phase[mj:])) p_max = np.max(phases, axis=1) p_min = np.min(phases, axis=1) phases = p_max - p_min tie = np.sqrt(np.mean(phases * phases)) ncount = count - mj devs[idx] = tie deverrs[idx] = 0 / np.sqrt(ncount) # TODO! I THINK THIS IS WRONG! ns[idx] = ncount return remove_small_ns(taus_used, devs, deverrs, ns)
python
def tierms(data, rate=1.0, data_type="phase", taus=None): """ Time Interval Error RMS. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. """ phase = input_to_phase(data, rate, data_type) (data, m, taus_used) = tau_generator(phase, rate, taus) count = len(phase) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) for idx, mj in enumerate(m): mj = int(mj) # This seems like an unusual way to phases = np.column_stack((phase[:-mj], phase[mj:])) p_max = np.max(phases, axis=1) p_min = np.min(phases, axis=1) phases = p_max - p_min tie = np.sqrt(np.mean(phases * phases)) ncount = count - mj devs[idx] = tie deverrs[idx] = 0 / np.sqrt(ncount) # TODO! I THINK THIS IS WRONG! ns[idx] = ncount return remove_small_ns(taus_used, devs, deverrs, ns)
[ "def", "tierms", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "(", "data", ",", "m", ",", "taus_used", ")", "=", "tau_generator", "(", "phase", ",", "rate", ",", "taus", ")", "count", "=", "len", "(", "phase", ")", "devs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "deverrs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ns", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "for", "idx", ",", "mj", "in", "enumerate", "(", "m", ")", ":", "mj", "=", "int", "(", "mj", ")", "# This seems like an unusual way to", "phases", "=", "np", ".", "column_stack", "(", "(", "phase", "[", ":", "-", "mj", "]", ",", "phase", "[", "mj", ":", "]", ")", ")", "p_max", "=", "np", ".", "max", "(", "phases", ",", "axis", "=", "1", ")", "p_min", "=", "np", ".", "min", "(", "phases", ",", "axis", "=", "1", ")", "phases", "=", "p_max", "-", "p_min", "tie", "=", "np", ".", "sqrt", "(", "np", ".", "mean", "(", "phases", "*", "phases", ")", ")", "ncount", "=", "count", "-", "mj", "devs", "[", "idx", "]", "=", "tie", "deverrs", "[", "idx", "]", "=", "0", "/", "np", ".", "sqrt", "(", "ncount", ")", "# TODO! I THINK THIS IS WRONG!", "ns", "[", "idx", "]", "=", "ncount", "return", "remove_small_ns", "(", "taus_used", ",", "devs", ",", "deverrs", ",", "ns", ")" ]
Time Interval Error RMS. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation.
[ "Time", "Interval", "Error", "RMS", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L990-L1033
236,939
aewallin/allantools
allantools/allantools.py
mtie
def mtie(data, rate=1.0, data_type="phase", taus=None): """ Maximum Time Interval Error. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Notes ----- this seems to correspond to Stable32 setting "Fast(u)" Stable32 also has "Decade" and "Octave" modes where the dataset is extended somehow? """ phase = input_to_phase(data, rate, data_type) (phase, m, taus_used) = tau_generator(phase, rate, taus) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) for idx, mj in enumerate(m): rw = mtie_rolling_window(phase, int(mj + 1)) win_max = np.max(rw, axis=1) win_min = np.min(rw, axis=1) tie = win_max - win_min dev = np.max(tie) ncount = phase.shape[0] - mj devs[idx] = dev deverrs[idx] = dev / np.sqrt(ncount) ns[idx] = ncount return remove_small_ns(taus_used, devs, deverrs, ns)
python
def mtie(data, rate=1.0, data_type="phase", taus=None): """ Maximum Time Interval Error. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Notes ----- this seems to correspond to Stable32 setting "Fast(u)" Stable32 also has "Decade" and "Octave" modes where the dataset is extended somehow? """ phase = input_to_phase(data, rate, data_type) (phase, m, taus_used) = tau_generator(phase, rate, taus) devs = np.zeros_like(taus_used) deverrs = np.zeros_like(taus_used) ns = np.zeros_like(taus_used) for idx, mj in enumerate(m): rw = mtie_rolling_window(phase, int(mj + 1)) win_max = np.max(rw, axis=1) win_min = np.min(rw, axis=1) tie = win_max - win_min dev = np.max(tie) ncount = phase.shape[0] - mj devs[idx] = dev deverrs[idx] = dev / np.sqrt(ncount) ns[idx] = ncount return remove_small_ns(taus_used, devs, deverrs, ns)
[ "def", "mtie", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "(", "phase", ",", "m", ",", "taus_used", ")", "=", "tau_generator", "(", "phase", ",", "rate", ",", "taus", ")", "devs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "deverrs", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ns", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "for", "idx", ",", "mj", "in", "enumerate", "(", "m", ")", ":", "rw", "=", "mtie_rolling_window", "(", "phase", ",", "int", "(", "mj", "+", "1", ")", ")", "win_max", "=", "np", ".", "max", "(", "rw", ",", "axis", "=", "1", ")", "win_min", "=", "np", ".", "min", "(", "rw", ",", "axis", "=", "1", ")", "tie", "=", "win_max", "-", "win_min", "dev", "=", "np", ".", "max", "(", "tie", ")", "ncount", "=", "phase", ".", "shape", "[", "0", "]", "-", "mj", "devs", "[", "idx", "]", "=", "dev", "deverrs", "[", "idx", "]", "=", "dev", "/", "np", ".", "sqrt", "(", "ncount", ")", "ns", "[", "idx", "]", "=", "ncount", "return", "remove_small_ns", "(", "taus_used", ",", "devs", ",", "deverrs", ",", "ns", ")" ]
Maximum Time Interval Error. Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. Notes ----- this seems to correspond to Stable32 setting "Fast(u)" Stable32 also has "Decade" and "Octave" modes where the dataset is extended somehow?
[ "Maximum", "Time", "Interval", "Error", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L1061-L1101
236,940
aewallin/allantools
allantools/allantools.py
mtie_phase_fast
def mtie_phase_fast(phase, rate=1.0, data_type="phase", taus=None): """ fast binary decomposition algorithm for MTIE See: STEFANO BREGNI "Fast Algorithms for TVAR and MTIE Computation in Characterization of Network Synchronization Performance" """ rate = float(rate) phase = np.asarray(phase) k_max = int(np.floor(np.log2(len(phase)))) phase = phase[0:pow(2, k_max)] # truncate data to 2**k_max datapoints assert len(phase) == pow(2, k_max) #k = 1 taus = [ pow(2,k) for k in range(k_max)] #while k <= k_max: # tau = pow(2, k) # taus.append(tau) #print tau # k += 1 print("taus N=", len(taus), " ",taus) devs = np.zeros(len(taus)) deverrs = np.zeros(len(taus)) ns = np.zeros(len(taus)) taus_used = np.array(taus) # [(1.0/rate)*t for t in taus] # matrices to store results mtie_max = np.zeros((len(phase)-1, k_max)) mtie_min = np.zeros((len(phase)-1, k_max)) for kidx in range(k_max): k = kidx+1 imax = len(phase)-pow(2, k)+1 #print k, imax tie = np.zeros(imax) ns[kidx]=imax #print np.max( tie ) for i in range(imax): if k == 1: mtie_max[i, kidx] = max(phase[i], phase[i+1]) mtie_min[i, kidx] = min(phase[i], phase[i+1]) else: p = int(pow(2, k-1)) mtie_max[i, kidx] = max(mtie_max[i, kidx-1], mtie_max[i+p, kidx-1]) mtie_min[i, kidx] = min(mtie_min[i, kidx-1], mtie_min[i+p, kidx-1]) #for i in range(imax): tie[i] = mtie_max[i, kidx] - mtie_min[i, kidx] #print tie[i] devs[kidx] = np.amax(tie) # maximum along axis #print "maximum %2.4f" % devs[kidx] #print np.amax( tie ) #for tau in taus: #for devs = np.array(devs) print("devs N=",len(devs)," ",devs) print("taus N=", len(taus_used), " ",taus_used) return remove_small_ns(taus_used, devs, deverrs, ns)
python
def mtie_phase_fast(phase, rate=1.0, data_type="phase", taus=None): """ fast binary decomposition algorithm for MTIE See: STEFANO BREGNI "Fast Algorithms for TVAR and MTIE Computation in Characterization of Network Synchronization Performance" """ rate = float(rate) phase = np.asarray(phase) k_max = int(np.floor(np.log2(len(phase)))) phase = phase[0:pow(2, k_max)] # truncate data to 2**k_max datapoints assert len(phase) == pow(2, k_max) #k = 1 taus = [ pow(2,k) for k in range(k_max)] #while k <= k_max: # tau = pow(2, k) # taus.append(tau) #print tau # k += 1 print("taus N=", len(taus), " ",taus) devs = np.zeros(len(taus)) deverrs = np.zeros(len(taus)) ns = np.zeros(len(taus)) taus_used = np.array(taus) # [(1.0/rate)*t for t in taus] # matrices to store results mtie_max = np.zeros((len(phase)-1, k_max)) mtie_min = np.zeros((len(phase)-1, k_max)) for kidx in range(k_max): k = kidx+1 imax = len(phase)-pow(2, k)+1 #print k, imax tie = np.zeros(imax) ns[kidx]=imax #print np.max( tie ) for i in range(imax): if k == 1: mtie_max[i, kidx] = max(phase[i], phase[i+1]) mtie_min[i, kidx] = min(phase[i], phase[i+1]) else: p = int(pow(2, k-1)) mtie_max[i, kidx] = max(mtie_max[i, kidx-1], mtie_max[i+p, kidx-1]) mtie_min[i, kidx] = min(mtie_min[i, kidx-1], mtie_min[i+p, kidx-1]) #for i in range(imax): tie[i] = mtie_max[i, kidx] - mtie_min[i, kidx] #print tie[i] devs[kidx] = np.amax(tie) # maximum along axis #print "maximum %2.4f" % devs[kidx] #print np.amax( tie ) #for tau in taus: #for devs = np.array(devs) print("devs N=",len(devs)," ",devs) print("taus N=", len(taus_used), " ",taus_used) return remove_small_ns(taus_used, devs, deverrs, ns)
[ "def", "mtie_phase_fast", "(", "phase", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "rate", "=", "float", "(", "rate", ")", "phase", "=", "np", ".", "asarray", "(", "phase", ")", "k_max", "=", "int", "(", "np", ".", "floor", "(", "np", ".", "log2", "(", "len", "(", "phase", ")", ")", ")", ")", "phase", "=", "phase", "[", "0", ":", "pow", "(", "2", ",", "k_max", ")", "]", "# truncate data to 2**k_max datapoints", "assert", "len", "(", "phase", ")", "==", "pow", "(", "2", ",", "k_max", ")", "#k = 1", "taus", "=", "[", "pow", "(", "2", ",", "k", ")", "for", "k", "in", "range", "(", "k_max", ")", "]", "#while k <= k_max:", "# tau = pow(2, k)", "# taus.append(tau)", "#print tau", "# k += 1", "print", "(", "\"taus N=\"", ",", "len", "(", "taus", ")", ",", "\" \"", ",", "taus", ")", "devs", "=", "np", ".", "zeros", "(", "len", "(", "taus", ")", ")", "deverrs", "=", "np", ".", "zeros", "(", "len", "(", "taus", ")", ")", "ns", "=", "np", ".", "zeros", "(", "len", "(", "taus", ")", ")", "taus_used", "=", "np", ".", "array", "(", "taus", ")", "# [(1.0/rate)*t for t in taus]", "# matrices to store results", "mtie_max", "=", "np", ".", "zeros", "(", "(", "len", "(", "phase", ")", "-", "1", ",", "k_max", ")", ")", "mtie_min", "=", "np", ".", "zeros", "(", "(", "len", "(", "phase", ")", "-", "1", ",", "k_max", ")", ")", "for", "kidx", "in", "range", "(", "k_max", ")", ":", "k", "=", "kidx", "+", "1", "imax", "=", "len", "(", "phase", ")", "-", "pow", "(", "2", ",", "k", ")", "+", "1", "#print k, imax", "tie", "=", "np", ".", "zeros", "(", "imax", ")", "ns", "[", "kidx", "]", "=", "imax", "#print np.max( tie )", "for", "i", "in", "range", "(", "imax", ")", ":", "if", "k", "==", "1", ":", "mtie_max", "[", "i", ",", "kidx", "]", "=", "max", "(", "phase", "[", "i", "]", ",", "phase", "[", "i", "+", "1", "]", ")", "mtie_min", "[", "i", ",", "kidx", "]", "=", "min", "(", "phase", "[", "i", "]", ",", "phase", "[", "i", "+", "1", "]", ")", "else", ":", "p", "=", "int", "(", "pow", "(", "2", ",", "k", "-", "1", ")", ")", "mtie_max", "[", "i", ",", "kidx", "]", "=", "max", "(", "mtie_max", "[", "i", ",", "kidx", "-", "1", "]", ",", "mtie_max", "[", "i", "+", "p", ",", "kidx", "-", "1", "]", ")", "mtie_min", "[", "i", ",", "kidx", "]", "=", "min", "(", "mtie_min", "[", "i", ",", "kidx", "-", "1", "]", ",", "mtie_min", "[", "i", "+", "p", ",", "kidx", "-", "1", "]", ")", "#for i in range(imax):", "tie", "[", "i", "]", "=", "mtie_max", "[", "i", ",", "kidx", "]", "-", "mtie_min", "[", "i", ",", "kidx", "]", "#print tie[i]", "devs", "[", "kidx", "]", "=", "np", ".", "amax", "(", "tie", ")", "# maximum along axis", "#print \"maximum %2.4f\" % devs[kidx]", "#print np.amax( tie )", "#for tau in taus:", "#for", "devs", "=", "np", ".", "array", "(", "devs", ")", "print", "(", "\"devs N=\"", ",", "len", "(", "devs", ")", ",", "\" \"", ",", "devs", ")", "print", "(", "\"taus N=\"", ",", "len", "(", "taus_used", ")", ",", "\" \"", ",", "taus_used", ")", "return", "remove_small_ns", "(", "taus_used", ",", "devs", ",", "deverrs", ",", "ns", ")" ]
fast binary decomposition algorithm for MTIE See: STEFANO BREGNI "Fast Algorithms for TVAR and MTIE Computation in Characterization of Network Synchronization Performance"
[ "fast", "binary", "decomposition", "algorithm", "for", "MTIE" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L1108-L1163
236,941
aewallin/allantools
allantools/allantools.py
gradev
def gradev(data, rate=1.0, data_type="phase", taus=None, ci=0.9, noisetype='wp'): """ gap resistant overlapping Allan deviation Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). Warning : phase data works better (frequency data is first trantformed into phase using numpy.cumsum() function, which can lead to poor results). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. ci: float the total confidence interval desired, i.e. if ci = 0.9, the bounds will be at 0.05 and 0.95. noisetype: string the type of noise desired: 'wp' returns white phase noise. 'wf' returns white frequency noise. 'fp' returns flicker phase noise. 'ff' returns flicker frequency noise. 'rf' returns random walk frequency noise. If the input is not recognized, it defaults to idealized, uncorrelated noise with (N-1) degrees of freedom. Returns ------- taus: np.array list of tau vales in seconds adev: np.array deviations [err_l, err_h] : list of len()==2, np.array the upper and lower bounds of the confidence interval taken as distances from the the estimated two sample variance. ns: np.array numper of terms n in the adev estimate. """ if (data_type == "freq"): print("Warning : phase data is preferred as input to gradev()") phase = input_to_phase(data, rate, data_type) (data, m, taus_used) = tau_generator(phase, rate, taus) ad = np.zeros_like(taus_used) ade_l = np.zeros_like(taus_used) ade_h = np.zeros_like(taus_used) adn = np.zeros_like(taus_used) for idx, mj in enumerate(m): (dev, deverr, n) = calc_gradev_phase(data, rate, mj, 1, ci, noisetype) # stride=1 for overlapping ADEV ad[idx] = dev ade_l[idx] = deverr[0] ade_h[idx] = deverr[1] adn[idx] = n # Note that errors are split in 2 arrays return remove_small_ns(taus_used, ad, [ade_l, ade_h], adn)
python
def gradev(data, rate=1.0, data_type="phase", taus=None, ci=0.9, noisetype='wp'): """ gap resistant overlapping Allan deviation Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). Warning : phase data works better (frequency data is first trantformed into phase using numpy.cumsum() function, which can lead to poor results). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. ci: float the total confidence interval desired, i.e. if ci = 0.9, the bounds will be at 0.05 and 0.95. noisetype: string the type of noise desired: 'wp' returns white phase noise. 'wf' returns white frequency noise. 'fp' returns flicker phase noise. 'ff' returns flicker frequency noise. 'rf' returns random walk frequency noise. If the input is not recognized, it defaults to idealized, uncorrelated noise with (N-1) degrees of freedom. Returns ------- taus: np.array list of tau vales in seconds adev: np.array deviations [err_l, err_h] : list of len()==2, np.array the upper and lower bounds of the confidence interval taken as distances from the the estimated two sample variance. ns: np.array numper of terms n in the adev estimate. """ if (data_type == "freq"): print("Warning : phase data is preferred as input to gradev()") phase = input_to_phase(data, rate, data_type) (data, m, taus_used) = tau_generator(phase, rate, taus) ad = np.zeros_like(taus_used) ade_l = np.zeros_like(taus_used) ade_h = np.zeros_like(taus_used) adn = np.zeros_like(taus_used) for idx, mj in enumerate(m): (dev, deverr, n) = calc_gradev_phase(data, rate, mj, 1, ci, noisetype) # stride=1 for overlapping ADEV ad[idx] = dev ade_l[idx] = deverr[0] ade_h[idx] = deverr[1] adn[idx] = n # Note that errors are split in 2 arrays return remove_small_ns(taus_used, ad, [ade_l, ade_h], adn)
[ "def", "gradev", "(", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ",", "ci", "=", "0.9", ",", "noisetype", "=", "'wp'", ")", ":", "if", "(", "data_type", "==", "\"freq\"", ")", ":", "print", "(", "\"Warning : phase data is preferred as input to gradev()\"", ")", "phase", "=", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", "(", "data", ",", "m", ",", "taus_used", ")", "=", "tau_generator", "(", "phase", ",", "rate", ",", "taus", ")", "ad", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ade_l", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "ade_h", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "adn", "=", "np", ".", "zeros_like", "(", "taus_used", ")", "for", "idx", ",", "mj", "in", "enumerate", "(", "m", ")", ":", "(", "dev", ",", "deverr", ",", "n", ")", "=", "calc_gradev_phase", "(", "data", ",", "rate", ",", "mj", ",", "1", ",", "ci", ",", "noisetype", ")", "# stride=1 for overlapping ADEV", "ad", "[", "idx", "]", "=", "dev", "ade_l", "[", "idx", "]", "=", "deverr", "[", "0", "]", "ade_h", "[", "idx", "]", "=", "deverr", "[", "1", "]", "adn", "[", "idx", "]", "=", "n", "# Note that errors are split in 2 arrays", "return", "remove_small_ns", "(", "taus_used", ",", "ad", ",", "[", "ade_l", ",", "ade_h", "]", ",", "adn", ")" ]
gap resistant overlapping Allan deviation Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional). Warning : phase data works better (frequency data is first trantformed into phase using numpy.cumsum() function, which can lead to poor results). rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic tau-list generation. ci: float the total confidence interval desired, i.e. if ci = 0.9, the bounds will be at 0.05 and 0.95. noisetype: string the type of noise desired: 'wp' returns white phase noise. 'wf' returns white frequency noise. 'fp' returns flicker phase noise. 'ff' returns flicker frequency noise. 'rf' returns random walk frequency noise. If the input is not recognized, it defaults to idealized, uncorrelated noise with (N-1) degrees of freedom. Returns ------- taus: np.array list of tau vales in seconds adev: np.array deviations [err_l, err_h] : list of len()==2, np.array the upper and lower bounds of the confidence interval taken as distances from the the estimated two sample variance. ns: np.array numper of terms n in the adev estimate.
[ "gap", "resistant", "overlapping", "Allan", "deviation" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L1173-L1242
236,942
aewallin/allantools
allantools/allantools.py
input_to_phase
def input_to_phase(data, rate, data_type): """ Take either phase or frequency as input and return phase """ if data_type == "phase": return data elif data_type == "freq": return frequency2phase(data, rate) else: raise Exception("unknown data_type: " + data_type)
python
def input_to_phase(data, rate, data_type): """ Take either phase or frequency as input and return phase """ if data_type == "phase": return data elif data_type == "freq": return frequency2phase(data, rate) else: raise Exception("unknown data_type: " + data_type)
[ "def", "input_to_phase", "(", "data", ",", "rate", ",", "data_type", ")", ":", "if", "data_type", "==", "\"phase\"", ":", "return", "data", "elif", "data_type", "==", "\"freq\"", ":", "return", "frequency2phase", "(", "data", ",", "rate", ")", "else", ":", "raise", "Exception", "(", "\"unknown data_type: \"", "+", "data_type", ")" ]
Take either phase or frequency as input and return phase
[ "Take", "either", "phase", "or", "frequency", "as", "input", "and", "return", "phase" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L1297-L1305
236,943
aewallin/allantools
allantools/allantools.py
trim_data
def trim_data(x): """ Trim leading and trailing NaNs from dataset This is done by browsing the array from each end and store the index of the first non-NaN in each case, the return the appropriate slice of the array """ # Find indices for first and last valid data first = 0 while np.isnan(x[first]): first += 1 last = len(x) while np.isnan(x[last - 1]): last -= 1 return x[first:last]
python
def trim_data(x): """ Trim leading and trailing NaNs from dataset This is done by browsing the array from each end and store the index of the first non-NaN in each case, the return the appropriate slice of the array """ # Find indices for first and last valid data first = 0 while np.isnan(x[first]): first += 1 last = len(x) while np.isnan(x[last - 1]): last -= 1 return x[first:last]
[ "def", "trim_data", "(", "x", ")", ":", "# Find indices for first and last valid data", "first", "=", "0", "while", "np", ".", "isnan", "(", "x", "[", "first", "]", ")", ":", "first", "+=", "1", "last", "=", "len", "(", "x", ")", "while", "np", ".", "isnan", "(", "x", "[", "last", "-", "1", "]", ")", ":", "last", "-=", "1", "return", "x", "[", "first", ":", "last", "]" ]
Trim leading and trailing NaNs from dataset This is done by browsing the array from each end and store the index of the first non-NaN in each case, the return the appropriate slice of the array
[ "Trim", "leading", "and", "trailing", "NaNs", "from", "dataset", "This", "is", "done", "by", "browsing", "the", "array", "from", "each", "end", "and", "store", "the", "index", "of", "the", "first", "non", "-", "NaN", "in", "each", "case", "the", "return", "the", "appropriate", "slice", "of", "the", "array" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L1498-L1511
236,944
aewallin/allantools
allantools/allantools.py
three_cornered_hat_phase
def three_cornered_hat_phase(phasedata_ab, phasedata_bc, phasedata_ca, rate, taus, function): """ Three Cornered Hat Method Given three clocks A, B, C, we seek to find their variances :math:`\\sigma^2_A`, :math:`\\sigma^2_B`, :math:`\\sigma^2_C`. We measure three phase differences, assuming no correlation between the clocks, the measurements have variances: .. math:: \\sigma^2_{AB} = \\sigma^2_{A} + \\sigma^2_{B} \\sigma^2_{BC} = \\sigma^2_{B} + \\sigma^2_{C} \\sigma^2_{CA} = \\sigma^2_{C} + \\sigma^2_{A} Which allows solving for the variance of one clock as: .. math:: \\sigma^2_{A} = {1 \\over 2} ( \\sigma^2_{AB} + \\sigma^2_{CA} - \\sigma^2_{BC} ) and similarly cyclic permutations for :math:`\\sigma^2_B` and :math:`\\sigma^2_C` Parameters ---------- phasedata_ab: np.array phase measurements between clock A and B, in seconds phasedata_bc: np.array phase measurements between clock B and C, in seconds phasedata_ca: np.array phase measurements between clock C and A, in seconds rate: float The sampling rate for phase, in Hz taus: np.array The tau values for deviations, in seconds function: allantools deviation function The type of statistic to compute, e.g. allantools.oadev Returns ------- tau_ab: np.array Tau values corresponding to output deviations dev_a: np.array List of computed values for clock A References ---------- http://www.wriley.com/3-CornHat.htm """ (tau_ab, dev_ab, err_ab, ns_ab) = function(phasedata_ab, data_type='phase', rate=rate, taus=taus) (tau_bc, dev_bc, err_bc, ns_bc) = function(phasedata_bc, data_type='phase', rate=rate, taus=taus) (tau_ca, dev_ca, err_ca, ns_ca) = function(phasedata_ca, data_type='phase', rate=rate, taus=taus) var_ab = dev_ab * dev_ab var_bc = dev_bc * dev_bc var_ca = dev_ca * dev_ca assert len(var_ab) == len(var_bc) == len(var_ca) var_a = 0.5 * (var_ab + var_ca - var_bc) var_a[var_a < 0] = 0 # don't return imaginary deviations (?) dev_a = np.sqrt(var_a) err_a = [d/np.sqrt(nn) for (d, nn) in zip(dev_a, ns_ab)] return tau_ab, dev_a, err_a, ns_ab
python
def three_cornered_hat_phase(phasedata_ab, phasedata_bc, phasedata_ca, rate, taus, function): """ Three Cornered Hat Method Given three clocks A, B, C, we seek to find their variances :math:`\\sigma^2_A`, :math:`\\sigma^2_B`, :math:`\\sigma^2_C`. We measure three phase differences, assuming no correlation between the clocks, the measurements have variances: .. math:: \\sigma^2_{AB} = \\sigma^2_{A} + \\sigma^2_{B} \\sigma^2_{BC} = \\sigma^2_{B} + \\sigma^2_{C} \\sigma^2_{CA} = \\sigma^2_{C} + \\sigma^2_{A} Which allows solving for the variance of one clock as: .. math:: \\sigma^2_{A} = {1 \\over 2} ( \\sigma^2_{AB} + \\sigma^2_{CA} - \\sigma^2_{BC} ) and similarly cyclic permutations for :math:`\\sigma^2_B` and :math:`\\sigma^2_C` Parameters ---------- phasedata_ab: np.array phase measurements between clock A and B, in seconds phasedata_bc: np.array phase measurements between clock B and C, in seconds phasedata_ca: np.array phase measurements between clock C and A, in seconds rate: float The sampling rate for phase, in Hz taus: np.array The tau values for deviations, in seconds function: allantools deviation function The type of statistic to compute, e.g. allantools.oadev Returns ------- tau_ab: np.array Tau values corresponding to output deviations dev_a: np.array List of computed values for clock A References ---------- http://www.wriley.com/3-CornHat.htm """ (tau_ab, dev_ab, err_ab, ns_ab) = function(phasedata_ab, data_type='phase', rate=rate, taus=taus) (tau_bc, dev_bc, err_bc, ns_bc) = function(phasedata_bc, data_type='phase', rate=rate, taus=taus) (tau_ca, dev_ca, err_ca, ns_ca) = function(phasedata_ca, data_type='phase', rate=rate, taus=taus) var_ab = dev_ab * dev_ab var_bc = dev_bc * dev_bc var_ca = dev_ca * dev_ca assert len(var_ab) == len(var_bc) == len(var_ca) var_a = 0.5 * (var_ab + var_ca - var_bc) var_a[var_a < 0] = 0 # don't return imaginary deviations (?) dev_a = np.sqrt(var_a) err_a = [d/np.sqrt(nn) for (d, nn) in zip(dev_a, ns_ab)] return tau_ab, dev_a, err_a, ns_ab
[ "def", "three_cornered_hat_phase", "(", "phasedata_ab", ",", "phasedata_bc", ",", "phasedata_ca", ",", "rate", ",", "taus", ",", "function", ")", ":", "(", "tau_ab", ",", "dev_ab", ",", "err_ab", ",", "ns_ab", ")", "=", "function", "(", "phasedata_ab", ",", "data_type", "=", "'phase'", ",", "rate", "=", "rate", ",", "taus", "=", "taus", ")", "(", "tau_bc", ",", "dev_bc", ",", "err_bc", ",", "ns_bc", ")", "=", "function", "(", "phasedata_bc", ",", "data_type", "=", "'phase'", ",", "rate", "=", "rate", ",", "taus", "=", "taus", ")", "(", "tau_ca", ",", "dev_ca", ",", "err_ca", ",", "ns_ca", ")", "=", "function", "(", "phasedata_ca", ",", "data_type", "=", "'phase'", ",", "rate", "=", "rate", ",", "taus", "=", "taus", ")", "var_ab", "=", "dev_ab", "*", "dev_ab", "var_bc", "=", "dev_bc", "*", "dev_bc", "var_ca", "=", "dev_ca", "*", "dev_ca", "assert", "len", "(", "var_ab", ")", "==", "len", "(", "var_bc", ")", "==", "len", "(", "var_ca", ")", "var_a", "=", "0.5", "*", "(", "var_ab", "+", "var_ca", "-", "var_bc", ")", "var_a", "[", "var_a", "<", "0", "]", "=", "0", "# don't return imaginary deviations (?)", "dev_a", "=", "np", ".", "sqrt", "(", "var_a", ")", "err_a", "=", "[", "d", "/", "np", ".", "sqrt", "(", "nn", ")", "for", "(", "d", ",", "nn", ")", "in", "zip", "(", "dev_a", ",", "ns_ab", ")", "]", "return", "tau_ab", ",", "dev_a", ",", "err_a", ",", "ns_ab" ]
Three Cornered Hat Method Given three clocks A, B, C, we seek to find their variances :math:`\\sigma^2_A`, :math:`\\sigma^2_B`, :math:`\\sigma^2_C`. We measure three phase differences, assuming no correlation between the clocks, the measurements have variances: .. math:: \\sigma^2_{AB} = \\sigma^2_{A} + \\sigma^2_{B} \\sigma^2_{BC} = \\sigma^2_{B} + \\sigma^2_{C} \\sigma^2_{CA} = \\sigma^2_{C} + \\sigma^2_{A} Which allows solving for the variance of one clock as: .. math:: \\sigma^2_{A} = {1 \\over 2} ( \\sigma^2_{AB} + \\sigma^2_{CA} - \\sigma^2_{BC} ) and similarly cyclic permutations for :math:`\\sigma^2_B` and :math:`\\sigma^2_C` Parameters ---------- phasedata_ab: np.array phase measurements between clock A and B, in seconds phasedata_bc: np.array phase measurements between clock B and C, in seconds phasedata_ca: np.array phase measurements between clock C and A, in seconds rate: float The sampling rate for phase, in Hz taus: np.array The tau values for deviations, in seconds function: allantools deviation function The type of statistic to compute, e.g. allantools.oadev Returns ------- tau_ab: np.array Tau values corresponding to output deviations dev_a: np.array List of computed values for clock A References ---------- http://www.wriley.com/3-CornHat.htm
[ "Three", "Cornered", "Hat", "Method" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L1513-L1588
236,945
aewallin/allantools
allantools/allantools.py
frequency2phase
def frequency2phase(freqdata, rate): """ integrate fractional frequency data and output phase data Parameters ---------- freqdata: np.array Data array of fractional frequency measurements (nondimensional) rate: float The sampling rate for phase or frequency, in Hz Returns ------- phasedata: np.array Time integral of fractional frequency data, i.e. phase (time) data in units of seconds. For phase in units of radians, see phase2radians() """ dt = 1.0 / float(rate) # Protect against NaN values in input array (issue #60) # Reintroduces data trimming as in commit 503cb82 freqdata = trim_data(freqdata) phasedata = np.cumsum(freqdata) * dt phasedata = np.insert(phasedata, 0, 0) # FIXME: why do we do this? # so that phase starts at zero and len(phase)=len(freq)+1 ?? return phasedata
python
def frequency2phase(freqdata, rate): """ integrate fractional frequency data and output phase data Parameters ---------- freqdata: np.array Data array of fractional frequency measurements (nondimensional) rate: float The sampling rate for phase or frequency, in Hz Returns ------- phasedata: np.array Time integral of fractional frequency data, i.e. phase (time) data in units of seconds. For phase in units of radians, see phase2radians() """ dt = 1.0 / float(rate) # Protect against NaN values in input array (issue #60) # Reintroduces data trimming as in commit 503cb82 freqdata = trim_data(freqdata) phasedata = np.cumsum(freqdata) * dt phasedata = np.insert(phasedata, 0, 0) # FIXME: why do we do this? # so that phase starts at zero and len(phase)=len(freq)+1 ?? return phasedata
[ "def", "frequency2phase", "(", "freqdata", ",", "rate", ")", ":", "dt", "=", "1.0", "/", "float", "(", "rate", ")", "# Protect against NaN values in input array (issue #60)", "# Reintroduces data trimming as in commit 503cb82", "freqdata", "=", "trim_data", "(", "freqdata", ")", "phasedata", "=", "np", ".", "cumsum", "(", "freqdata", ")", "*", "dt", "phasedata", "=", "np", ".", "insert", "(", "phasedata", ",", "0", ",", "0", ")", "# FIXME: why do we do this?", "# so that phase starts at zero and len(phase)=len(freq)+1 ??", "return", "phasedata" ]
integrate fractional frequency data and output phase data Parameters ---------- freqdata: np.array Data array of fractional frequency measurements (nondimensional) rate: float The sampling rate for phase or frequency, in Hz Returns ------- phasedata: np.array Time integral of fractional frequency data, i.e. phase (time) data in units of seconds. For phase in units of radians, see phase2radians()
[ "integrate", "fractional", "frequency", "data", "and", "output", "phase", "data" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L1595-L1619
236,946
aewallin/allantools
allantools/allantools.py
phase2radians
def phase2radians(phasedata, v0): """ Convert phase in seconds to phase in radians Parameters ---------- phasedata: np.array Data array of phase in seconds v0: float Nominal oscillator frequency in Hz Returns ------- fi: phase data in radians """ fi = [2*np.pi*v0*xx for xx in phasedata] return fi
python
def phase2radians(phasedata, v0): """ Convert phase in seconds to phase in radians Parameters ---------- phasedata: np.array Data array of phase in seconds v0: float Nominal oscillator frequency in Hz Returns ------- fi: phase data in radians """ fi = [2*np.pi*v0*xx for xx in phasedata] return fi
[ "def", "phase2radians", "(", "phasedata", ",", "v0", ")", ":", "fi", "=", "[", "2", "*", "np", ".", "pi", "*", "v0", "*", "xx", "for", "xx", "in", "phasedata", "]", "return", "fi" ]
Convert phase in seconds to phase in radians Parameters ---------- phasedata: np.array Data array of phase in seconds v0: float Nominal oscillator frequency in Hz Returns ------- fi: phase data in radians
[ "Convert", "phase", "in", "seconds", "to", "phase", "in", "radians" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L1621-L1637
236,947
aewallin/allantools
allantools/allantools.py
frequency2fractional
def frequency2fractional(frequency, mean_frequency=-1): """ Convert frequency in Hz to fractional frequency Parameters ---------- frequency: np.array Data array of frequency in Hz mean_frequency: float (optional) The nominal mean frequency, in Hz if omitted, defaults to mean frequency=np.mean(frequency) Returns ------- y: Data array of fractional frequency """ if mean_frequency == -1: mu = np.mean(frequency) else: mu = mean_frequency y = [(x-mu)/mu for x in frequency] return y
python
def frequency2fractional(frequency, mean_frequency=-1): """ Convert frequency in Hz to fractional frequency Parameters ---------- frequency: np.array Data array of frequency in Hz mean_frequency: float (optional) The nominal mean frequency, in Hz if omitted, defaults to mean frequency=np.mean(frequency) Returns ------- y: Data array of fractional frequency """ if mean_frequency == -1: mu = np.mean(frequency) else: mu = mean_frequency y = [(x-mu)/mu for x in frequency] return y
[ "def", "frequency2fractional", "(", "frequency", ",", "mean_frequency", "=", "-", "1", ")", ":", "if", "mean_frequency", "==", "-", "1", ":", "mu", "=", "np", ".", "mean", "(", "frequency", ")", "else", ":", "mu", "=", "mean_frequency", "y", "=", "[", "(", "x", "-", "mu", ")", "/", "mu", "for", "x", "in", "frequency", "]", "return", "y" ]
Convert frequency in Hz to fractional frequency Parameters ---------- frequency: np.array Data array of frequency in Hz mean_frequency: float (optional) The nominal mean frequency, in Hz if omitted, defaults to mean frequency=np.mean(frequency) Returns ------- y: Data array of fractional frequency
[ "Convert", "frequency", "in", "Hz", "to", "fractional", "frequency" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/allantools.py#L1657-L1678
236,948
aewallin/allantools
allantools/dataset.py
Dataset.set_input
def set_input(self, data, rate=1.0, data_type="phase", taus=None): """ Optionnal method if you chose not to set inputs on init Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional) rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic """ self.inp["data"] = data self.inp["rate"] = rate self.inp["data_type"] = data_type self.inp["taus"] = taus
python
def set_input(self, data, rate=1.0, data_type="phase", taus=None): """ Optionnal method if you chose not to set inputs on init Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional) rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic """ self.inp["data"] = data self.inp["rate"] = rate self.inp["data_type"] = data_type self.inp["taus"] = taus
[ "def", "set_input", "(", "self", ",", "data", ",", "rate", "=", "1.0", ",", "data_type", "=", "\"phase\"", ",", "taus", "=", "None", ")", ":", "self", ".", "inp", "[", "\"data\"", "]", "=", "data", "self", ".", "inp", "[", "\"rate\"", "]", "=", "rate", "self", ".", "inp", "[", "\"data_type\"", "]", "=", "data_type", "self", ".", "inp", "[", "\"taus\"", "]", "=", "taus" ]
Optionnal method if you chose not to set inputs on init Parameters ---------- data: np.array Input data. Provide either phase or frequency (fractional, adimensional) rate: float The sampling rate for data, in Hz. Defaults to 1.0 data_type: {'phase', 'freq'} Data type, i.e. phase or frequency. Defaults to "phase". taus: np.array Array of tau values, in seconds, for which to compute statistic. Optionally set taus=["all"|"octave"|"decade"] for automatic
[ "Optionnal", "method", "if", "you", "chose", "not", "to", "set", "inputs", "on", "init" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/dataset.py#L93-L113
236,949
aewallin/allantools
allantools/dataset.py
Dataset.compute
def compute(self, function): """Evaluate the passed function with the supplied data. Stores result in self.out. Parameters ---------- function: str Name of the :mod:`allantools` function to evaluate Returns ------- result: dict The results of the calculation. """ try: func = getattr(allantools, function) except AttributeError: raise AttributeError("function must be defined in allantools") whitelisted = ["theo1", "mtie", "tierms"] if function[-3:] != "dev" and function not in whitelisted: # this should probably raise a custom exception type so # it's easier to distinguish from other bad things raise RuntimeError("function must be one of the 'dev' functions") result = func(self.inp["data"], rate=self.inp["rate"], data_type=self.inp["data_type"], taus=self.inp["taus"]) keys = ["taus", "stat", "stat_err", "stat_n"] result = {key: result[i] for i, key in enumerate(keys)} self.out = result.copy() self.out["stat_id"] = function return result
python
def compute(self, function): """Evaluate the passed function with the supplied data. Stores result in self.out. Parameters ---------- function: str Name of the :mod:`allantools` function to evaluate Returns ------- result: dict The results of the calculation. """ try: func = getattr(allantools, function) except AttributeError: raise AttributeError("function must be defined in allantools") whitelisted = ["theo1", "mtie", "tierms"] if function[-3:] != "dev" and function not in whitelisted: # this should probably raise a custom exception type so # it's easier to distinguish from other bad things raise RuntimeError("function must be one of the 'dev' functions") result = func(self.inp["data"], rate=self.inp["rate"], data_type=self.inp["data_type"], taus=self.inp["taus"]) keys = ["taus", "stat", "stat_err", "stat_n"] result = {key: result[i] for i, key in enumerate(keys)} self.out = result.copy() self.out["stat_id"] = function return result
[ "def", "compute", "(", "self", ",", "function", ")", ":", "try", ":", "func", "=", "getattr", "(", "allantools", ",", "function", ")", "except", "AttributeError", ":", "raise", "AttributeError", "(", "\"function must be defined in allantools\"", ")", "whitelisted", "=", "[", "\"theo1\"", ",", "\"mtie\"", ",", "\"tierms\"", "]", "if", "function", "[", "-", "3", ":", "]", "!=", "\"dev\"", "and", "function", "not", "in", "whitelisted", ":", "# this should probably raise a custom exception type so", "# it's easier to distinguish from other bad things", "raise", "RuntimeError", "(", "\"function must be one of the 'dev' functions\"", ")", "result", "=", "func", "(", "self", ".", "inp", "[", "\"data\"", "]", ",", "rate", "=", "self", ".", "inp", "[", "\"rate\"", "]", ",", "data_type", "=", "self", ".", "inp", "[", "\"data_type\"", "]", ",", "taus", "=", "self", ".", "inp", "[", "\"taus\"", "]", ")", "keys", "=", "[", "\"taus\"", ",", "\"stat\"", ",", "\"stat_err\"", ",", "\"stat_n\"", "]", "result", "=", "{", "key", ":", "result", "[", "i", "]", "for", "i", ",", "key", "in", "enumerate", "(", "keys", ")", "}", "self", ".", "out", "=", "result", ".", "copy", "(", ")", "self", ".", "out", "[", "\"stat_id\"", "]", "=", "function", "return", "result" ]
Evaluate the passed function with the supplied data. Stores result in self.out. Parameters ---------- function: str Name of the :mod:`allantools` function to evaluate Returns ------- result: dict The results of the calculation.
[ "Evaluate", "the", "passed", "function", "with", "the", "supplied", "data", "." ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/allantools/dataset.py#L115-L148
236,950
aewallin/allantools
examples/noise-color_and_PSD.py
many_psds
def many_psds(k=2,fs=1.0, b0=1.0, N=1024): """ compute average of many PSDs """ psd=[] for j in range(k): print j x = noise.white(N=2*4096,b0=b0,fs=fs) f, tmp = noise.numpy_psd(x,fs) if j==0: psd = tmp else: psd = psd + tmp return f, psd/k
python
def many_psds(k=2,fs=1.0, b0=1.0, N=1024): """ compute average of many PSDs """ psd=[] for j in range(k): print j x = noise.white(N=2*4096,b0=b0,fs=fs) f, tmp = noise.numpy_psd(x,fs) if j==0: psd = tmp else: psd = psd + tmp return f, psd/k
[ "def", "many_psds", "(", "k", "=", "2", ",", "fs", "=", "1.0", ",", "b0", "=", "1.0", ",", "N", "=", "1024", ")", ":", "psd", "=", "[", "]", "for", "j", "in", "range", "(", "k", ")", ":", "print", "j", "x", "=", "noise", ".", "white", "(", "N", "=", "2", "*", "4096", ",", "b0", "=", "b0", ",", "fs", "=", "fs", ")", "f", ",", "tmp", "=", "noise", ".", "numpy_psd", "(", "x", ",", "fs", ")", "if", "j", "==", "0", ":", "psd", "=", "tmp", "else", ":", "psd", "=", "psd", "+", "tmp", "return", "f", ",", "psd", "/", "k" ]
compute average of many PSDs
[ "compute", "average", "of", "many", "PSDs" ]
b5c695a5af4379fcea4d4ce93a066cb902e7ee0a
https://github.com/aewallin/allantools/blob/b5c695a5af4379fcea4d4ce93a066cb902e7ee0a/examples/noise-color_and_PSD.py#L7-L18
236,951
singnet/snet-cli
snet_cli/commands.py
OrganizationCommand.list_my
def list_my(self): """ Find organization that has the current identity as the owner or as the member """ org_list = self.call_contract_command("Registry", "listOrganizations", []) rez_owner = [] rez_member = [] for idx, org_id in enumerate(org_list): (found, org_id, org_name, owner, members, serviceNames, repositoryNames) = self.call_contract_command("Registry", "getOrganizationById", [org_id]) if (not found): raise Exception("Organization was removed during this call. Please retry."); if self.ident.address == owner: rez_owner.append((org_name, bytes32_to_str(org_id))) if self.ident.address in members: rez_member.append((org_name, bytes32_to_str(org_id))) if (rez_owner): self._printout("# Organizations you are the owner of") self._printout("# OrgName OrgId") for n,i in rez_owner: self._printout("%s %s"%(n,i)) if (rez_member): self._printout("# Organizations you are the member of") self._printout("# OrgName OrgId") for n,i in rez_member: self._printout("%s %s"%(n,i))
python
def list_my(self): """ Find organization that has the current identity as the owner or as the member """ org_list = self.call_contract_command("Registry", "listOrganizations", []) rez_owner = [] rez_member = [] for idx, org_id in enumerate(org_list): (found, org_id, org_name, owner, members, serviceNames, repositoryNames) = self.call_contract_command("Registry", "getOrganizationById", [org_id]) if (not found): raise Exception("Organization was removed during this call. Please retry."); if self.ident.address == owner: rez_owner.append((org_name, bytes32_to_str(org_id))) if self.ident.address in members: rez_member.append((org_name, bytes32_to_str(org_id))) if (rez_owner): self._printout("# Organizations you are the owner of") self._printout("# OrgName OrgId") for n,i in rez_owner: self._printout("%s %s"%(n,i)) if (rez_member): self._printout("# Organizations you are the member of") self._printout("# OrgName OrgId") for n,i in rez_member: self._printout("%s %s"%(n,i))
[ "def", "list_my", "(", "self", ")", ":", "org_list", "=", "self", ".", "call_contract_command", "(", "\"Registry\"", ",", "\"listOrganizations\"", ",", "[", "]", ")", "rez_owner", "=", "[", "]", "rez_member", "=", "[", "]", "for", "idx", ",", "org_id", "in", "enumerate", "(", "org_list", ")", ":", "(", "found", ",", "org_id", ",", "org_name", ",", "owner", ",", "members", ",", "serviceNames", ",", "repositoryNames", ")", "=", "self", ".", "call_contract_command", "(", "\"Registry\"", ",", "\"getOrganizationById\"", ",", "[", "org_id", "]", ")", "if", "(", "not", "found", ")", ":", "raise", "Exception", "(", "\"Organization was removed during this call. Please retry.\"", ")", "if", "self", ".", "ident", ".", "address", "==", "owner", ":", "rez_owner", ".", "append", "(", "(", "org_name", ",", "bytes32_to_str", "(", "org_id", ")", ")", ")", "if", "self", ".", "ident", ".", "address", "in", "members", ":", "rez_member", ".", "append", "(", "(", "org_name", ",", "bytes32_to_str", "(", "org_id", ")", ")", ")", "if", "(", "rez_owner", ")", ":", "self", ".", "_printout", "(", "\"# Organizations you are the owner of\"", ")", "self", ".", "_printout", "(", "\"# OrgName OrgId\"", ")", "for", "n", ",", "i", "in", "rez_owner", ":", "self", ".", "_printout", "(", "\"%s %s\"", "%", "(", "n", ",", "i", ")", ")", "if", "(", "rez_member", ")", ":", "self", ".", "_printout", "(", "\"# Organizations you are the member of\"", ")", "self", ".", "_printout", "(", "\"# OrgName OrgId\"", ")", "for", "n", ",", "i", "in", "rez_member", ":", "self", ".", "_printout", "(", "\"%s %s\"", "%", "(", "n", ",", "i", ")", ")" ]
Find organization that has the current identity as the owner or as the member
[ "Find", "organization", "that", "has", "the", "current", "identity", "as", "the", "owner", "or", "as", "the", "member" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/commands.py#L541-L567
236,952
singnet/snet-cli
snet_cli/mpe_service_metadata.py
MPEServiceMetadata.add_group
def add_group(self, group_name, payment_address): """ Return new group_id in base64 """ if (self.is_group_name_exists(group_name)): raise Exception("the group \"%s\" is already present"%str(group_name)) group_id_base64 = base64.b64encode(secrets.token_bytes(32)) self.m["groups"] += [{"group_name" : group_name , "group_id" : group_id_base64.decode("ascii"), "payment_address" : payment_address}] return group_id_base64
python
def add_group(self, group_name, payment_address): """ Return new group_id in base64 """ if (self.is_group_name_exists(group_name)): raise Exception("the group \"%s\" is already present"%str(group_name)) group_id_base64 = base64.b64encode(secrets.token_bytes(32)) self.m["groups"] += [{"group_name" : group_name , "group_id" : group_id_base64.decode("ascii"), "payment_address" : payment_address}] return group_id_base64
[ "def", "add_group", "(", "self", ",", "group_name", ",", "payment_address", ")", ":", "if", "(", "self", ".", "is_group_name_exists", "(", "group_name", ")", ")", ":", "raise", "Exception", "(", "\"the group \\\"%s\\\" is already present\"", "%", "str", "(", "group_name", ")", ")", "group_id_base64", "=", "base64", ".", "b64encode", "(", "secrets", ".", "token_bytes", "(", "32", ")", ")", "self", ".", "m", "[", "\"groups\"", "]", "+=", "[", "{", "\"group_name\"", ":", "group_name", ",", "\"group_id\"", ":", "group_id_base64", ".", "decode", "(", "\"ascii\"", ")", ",", "\"payment_address\"", ":", "payment_address", "}", "]", "return", "group_id_base64" ]
Return new group_id in base64
[ "Return", "new", "group_id", "in", "base64" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_service_metadata.py#L75-L83
236,953
singnet/snet-cli
snet_cli/mpe_service_metadata.py
MPEServiceMetadata.is_group_name_exists
def is_group_name_exists(self, group_name): """ check if group with given name is already exists """ groups = self.m["groups"] for g in groups: if (g["group_name"] == group_name): return True return False
python
def is_group_name_exists(self, group_name): """ check if group with given name is already exists """ groups = self.m["groups"] for g in groups: if (g["group_name"] == group_name): return True return False
[ "def", "is_group_name_exists", "(", "self", ",", "group_name", ")", ":", "groups", "=", "self", ".", "m", "[", "\"groups\"", "]", "for", "g", "in", "groups", ":", "if", "(", "g", "[", "\"group_name\"", "]", "==", "group_name", ")", ":", "return", "True", "return", "False" ]
check if group with given name is already exists
[ "check", "if", "group", "with", "given", "name", "is", "already", "exists" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_service_metadata.py#L103-L109
236,954
singnet/snet-cli
snet_cli/mpe_service_metadata.py
MPEServiceMetadata.get_group_name_nonetrick
def get_group_name_nonetrick(self, group_name = None): """ In all getter function in case of single payment group, group_name can be None """ groups = self.m["groups"] if (len(groups) == 0): raise Exception("Cannot find any groups in metadata") if (not group_name): if (len(groups) > 1): raise Exception("We have more than one payment group in metadata, so group_name should be specified") return groups[0]["group_name"] return group_name
python
def get_group_name_nonetrick(self, group_name = None): """ In all getter function in case of single payment group, group_name can be None """ groups = self.m["groups"] if (len(groups) == 0): raise Exception("Cannot find any groups in metadata") if (not group_name): if (len(groups) > 1): raise Exception("We have more than one payment group in metadata, so group_name should be specified") return groups[0]["group_name"] return group_name
[ "def", "get_group_name_nonetrick", "(", "self", ",", "group_name", "=", "None", ")", ":", "groups", "=", "self", ".", "m", "[", "\"groups\"", "]", "if", "(", "len", "(", "groups", ")", "==", "0", ")", ":", "raise", "Exception", "(", "\"Cannot find any groups in metadata\"", ")", "if", "(", "not", "group_name", ")", ":", "if", "(", "len", "(", "groups", ")", ">", "1", ")", ":", "raise", "Exception", "(", "\"We have more than one payment group in metadata, so group_name should be specified\"", ")", "return", "groups", "[", "0", "]", "[", "\"group_name\"", "]", "return", "group_name" ]
In all getter function in case of single payment group, group_name can be None
[ "In", "all", "getter", "function", "in", "case", "of", "single", "payment", "group", "group_name", "can", "be", "None" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_service_metadata.py#L145-L154
236,955
singnet/snet-cli
snet_cli/utils_ipfs.py
get_from_ipfs_and_checkhash
def get_from_ipfs_and_checkhash(ipfs_client, ipfs_hash_base58, validate=True): """ Get file from ipfs We must check the hash becasue we cannot believe that ipfs_client wasn't been compromise """ if validate: from snet_cli.resources.proto.unixfs_pb2 import Data from snet_cli.resources.proto.merckledag_pb2 import MerkleNode # No nice Python library to parse ipfs blocks, so do it ourselves. block_data = ipfs_client.block_get(ipfs_hash_base58) mn = MerkleNode() mn.ParseFromString(block_data) unixfs_data = Data() unixfs_data.ParseFromString(mn.Data) assert unixfs_data.Type == unixfs_data.DataType.Value('File'), "IPFS hash must be a file" data = unixfs_data.Data # multihash has a badly registered base58 codec, overwrite it... multihash.CodecReg.register('base58', base58.b58encode, base58.b58decode) # create a multihash object from our ipfs hash mh = multihash.decode(ipfs_hash_base58.encode('ascii'), 'base58') # Convenience method lets us directly use a multihash to verify data if not mh.verify(block_data): raise Exception("IPFS hash mismatch with data") else: data = ipfs_client.cat(ipfs_hash_base58) return data
python
def get_from_ipfs_and_checkhash(ipfs_client, ipfs_hash_base58, validate=True): """ Get file from ipfs We must check the hash becasue we cannot believe that ipfs_client wasn't been compromise """ if validate: from snet_cli.resources.proto.unixfs_pb2 import Data from snet_cli.resources.proto.merckledag_pb2 import MerkleNode # No nice Python library to parse ipfs blocks, so do it ourselves. block_data = ipfs_client.block_get(ipfs_hash_base58) mn = MerkleNode() mn.ParseFromString(block_data) unixfs_data = Data() unixfs_data.ParseFromString(mn.Data) assert unixfs_data.Type == unixfs_data.DataType.Value('File'), "IPFS hash must be a file" data = unixfs_data.Data # multihash has a badly registered base58 codec, overwrite it... multihash.CodecReg.register('base58', base58.b58encode, base58.b58decode) # create a multihash object from our ipfs hash mh = multihash.decode(ipfs_hash_base58.encode('ascii'), 'base58') # Convenience method lets us directly use a multihash to verify data if not mh.verify(block_data): raise Exception("IPFS hash mismatch with data") else: data = ipfs_client.cat(ipfs_hash_base58) return data
[ "def", "get_from_ipfs_and_checkhash", "(", "ipfs_client", ",", "ipfs_hash_base58", ",", "validate", "=", "True", ")", ":", "if", "validate", ":", "from", "snet_cli", ".", "resources", ".", "proto", ".", "unixfs_pb2", "import", "Data", "from", "snet_cli", ".", "resources", ".", "proto", ".", "merckledag_pb2", "import", "MerkleNode", "# No nice Python library to parse ipfs blocks, so do it ourselves.", "block_data", "=", "ipfs_client", ".", "block_get", "(", "ipfs_hash_base58", ")", "mn", "=", "MerkleNode", "(", ")", "mn", ".", "ParseFromString", "(", "block_data", ")", "unixfs_data", "=", "Data", "(", ")", "unixfs_data", ".", "ParseFromString", "(", "mn", ".", "Data", ")", "assert", "unixfs_data", ".", "Type", "==", "unixfs_data", ".", "DataType", ".", "Value", "(", "'File'", ")", ",", "\"IPFS hash must be a file\"", "data", "=", "unixfs_data", ".", "Data", "# multihash has a badly registered base58 codec, overwrite it...", "multihash", ".", "CodecReg", ".", "register", "(", "'base58'", ",", "base58", ".", "b58encode", ",", "base58", ".", "b58decode", ")", "# create a multihash object from our ipfs hash", "mh", "=", "multihash", ".", "decode", "(", "ipfs_hash_base58", ".", "encode", "(", "'ascii'", ")", ",", "'base58'", ")", "# Convenience method lets us directly use a multihash to verify data", "if", "not", "mh", ".", "verify", "(", "block_data", ")", ":", "raise", "Exception", "(", "\"IPFS hash mismatch with data\"", ")", "else", ":", "data", "=", "ipfs_client", ".", "cat", "(", "ipfs_hash_base58", ")", "return", "data" ]
Get file from ipfs We must check the hash becasue we cannot believe that ipfs_client wasn't been compromise
[ "Get", "file", "from", "ipfs", "We", "must", "check", "the", "hash", "becasue", "we", "cannot", "believe", "that", "ipfs_client", "wasn", "t", "been", "compromise" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/utils_ipfs.py#L35-L63
236,956
singnet/snet-cli
snet_cli/utils_ipfs.py
hash_to_bytesuri
def hash_to_bytesuri(s): """ Convert in and from bytes uri format used in Registry contract """ # TODO: we should pad string with zeros till closest 32 bytes word because of a bug in processReceipt (in snet_cli.contract.process_receipt) s = "ipfs://" + s return s.encode("ascii").ljust(32 * (len(s)//32 + 1), b"\0")
python
def hash_to_bytesuri(s): """ Convert in and from bytes uri format used in Registry contract """ # TODO: we should pad string with zeros till closest 32 bytes word because of a bug in processReceipt (in snet_cli.contract.process_receipt) s = "ipfs://" + s return s.encode("ascii").ljust(32 * (len(s)//32 + 1), b"\0")
[ "def", "hash_to_bytesuri", "(", "s", ")", ":", "# TODO: we should pad string with zeros till closest 32 bytes word because of a bug in processReceipt (in snet_cli.contract.process_receipt)", "s", "=", "\"ipfs://\"", "+", "s", "return", "s", ".", "encode", "(", "\"ascii\"", ")", ".", "ljust", "(", "32", "*", "(", "len", "(", "s", ")", "//", "32", "+", "1", ")", ",", "b\"\\0\"", ")" ]
Convert in and from bytes uri format used in Registry contract
[ "Convert", "in", "and", "from", "bytes", "uri", "format", "used", "in", "Registry", "contract" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/utils_ipfs.py#L65-L71
236,957
singnet/snet-cli
snet_cli/mpe_treasurer_command.py
MPETreasurerCommand._get_stub_and_request_classes
def _get_stub_and_request_classes(self, service_name): """ import protobuf and return stub and request class """ # Compile protobuf if needed codegen_dir = Path.home().joinpath(".snet", "mpe_client", "control_service") proto_dir = Path(__file__).absolute().parent.joinpath("resources", "proto") if (not codegen_dir.joinpath("control_service_pb2.py").is_file()): compile_proto(proto_dir, codegen_dir, proto_file = "control_service.proto") stub_class, request_class, _ = import_protobuf_from_dir(codegen_dir, service_name) return stub_class, request_class
python
def _get_stub_and_request_classes(self, service_name): """ import protobuf and return stub and request class """ # Compile protobuf if needed codegen_dir = Path.home().joinpath(".snet", "mpe_client", "control_service") proto_dir = Path(__file__).absolute().parent.joinpath("resources", "proto") if (not codegen_dir.joinpath("control_service_pb2.py").is_file()): compile_proto(proto_dir, codegen_dir, proto_file = "control_service.proto") stub_class, request_class, _ = import_protobuf_from_dir(codegen_dir, service_name) return stub_class, request_class
[ "def", "_get_stub_and_request_classes", "(", "self", ",", "service_name", ")", ":", "# Compile protobuf if needed", "codegen_dir", "=", "Path", ".", "home", "(", ")", ".", "joinpath", "(", "\".snet\"", ",", "\"mpe_client\"", ",", "\"control_service\"", ")", "proto_dir", "=", "Path", "(", "__file__", ")", ".", "absolute", "(", ")", ".", "parent", ".", "joinpath", "(", "\"resources\"", ",", "\"proto\"", ")", "if", "(", "not", "codegen_dir", ".", "joinpath", "(", "\"control_service_pb2.py\"", ")", ".", "is_file", "(", ")", ")", ":", "compile_proto", "(", "proto_dir", ",", "codegen_dir", ",", "proto_file", "=", "\"control_service.proto\"", ")", "stub_class", ",", "request_class", ",", "_", "=", "import_protobuf_from_dir", "(", "codegen_dir", ",", "service_name", ")", "return", "stub_class", ",", "request_class" ]
import protobuf and return stub and request class
[ "import", "protobuf", "and", "return", "stub", "and", "request", "class" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_treasurer_command.py#L31-L40
236,958
singnet/snet-cli
snet_cli/mpe_treasurer_command.py
MPETreasurerCommand._start_claim_channels
def _start_claim_channels(self, grpc_channel, channels_ids): """ Safely run StartClaim for given channels """ unclaimed_payments = self._call_GetListUnclaimed(grpc_channel) unclaimed_payments_dict = {p["channel_id"] : p for p in unclaimed_payments} to_claim = [] for channel_id in channels_ids: if (channel_id not in unclaimed_payments_dict or unclaimed_payments_dict[channel_id]["amount"] == 0): self._printout("There is nothing to claim for channel %i, we skip it"%channel_id) continue blockchain = self._get_channel_state_from_blockchain(channel_id) if (unclaimed_payments_dict[channel_id]["nonce"] != blockchain["nonce"]): self._printout("Old payment for channel %i is still in progress. Please run claim for this channel later."%channel_id) continue to_claim.append((channel_id, blockchain["nonce"])) payments = [self._call_StartClaim(grpc_channel, channel_id, nonce) for channel_id, nonce in to_claim] return payments
python
def _start_claim_channels(self, grpc_channel, channels_ids): """ Safely run StartClaim for given channels """ unclaimed_payments = self._call_GetListUnclaimed(grpc_channel) unclaimed_payments_dict = {p["channel_id"] : p for p in unclaimed_payments} to_claim = [] for channel_id in channels_ids: if (channel_id not in unclaimed_payments_dict or unclaimed_payments_dict[channel_id]["amount"] == 0): self._printout("There is nothing to claim for channel %i, we skip it"%channel_id) continue blockchain = self._get_channel_state_from_blockchain(channel_id) if (unclaimed_payments_dict[channel_id]["nonce"] != blockchain["nonce"]): self._printout("Old payment for channel %i is still in progress. Please run claim for this channel later."%channel_id) continue to_claim.append((channel_id, blockchain["nonce"])) payments = [self._call_StartClaim(grpc_channel, channel_id, nonce) for channel_id, nonce in to_claim] return payments
[ "def", "_start_claim_channels", "(", "self", ",", "grpc_channel", ",", "channels_ids", ")", ":", "unclaimed_payments", "=", "self", ".", "_call_GetListUnclaimed", "(", "grpc_channel", ")", "unclaimed_payments_dict", "=", "{", "p", "[", "\"channel_id\"", "]", ":", "p", "for", "p", "in", "unclaimed_payments", "}", "to_claim", "=", "[", "]", "for", "channel_id", "in", "channels_ids", ":", "if", "(", "channel_id", "not", "in", "unclaimed_payments_dict", "or", "unclaimed_payments_dict", "[", "channel_id", "]", "[", "\"amount\"", "]", "==", "0", ")", ":", "self", ".", "_printout", "(", "\"There is nothing to claim for channel %i, we skip it\"", "%", "channel_id", ")", "continue", "blockchain", "=", "self", ".", "_get_channel_state_from_blockchain", "(", "channel_id", ")", "if", "(", "unclaimed_payments_dict", "[", "channel_id", "]", "[", "\"nonce\"", "]", "!=", "blockchain", "[", "\"nonce\"", "]", ")", ":", "self", ".", "_printout", "(", "\"Old payment for channel %i is still in progress. Please run claim for this channel later.\"", "%", "channel_id", ")", "continue", "to_claim", ".", "append", "(", "(", "channel_id", ",", "blockchain", "[", "\"nonce\"", "]", ")", ")", "payments", "=", "[", "self", ".", "_call_StartClaim", "(", "grpc_channel", ",", "channel_id", ",", "nonce", ")", "for", "channel_id", ",", "nonce", "in", "to_claim", "]", "return", "payments" ]
Safely run StartClaim for given channels
[ "Safely", "run", "StartClaim", "for", "given", "channels" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_treasurer_command.py#L103-L120
236,959
singnet/snet-cli
snet_cli/mpe_treasurer_command.py
MPETreasurerCommand._claim_in_progress_and_claim_channels
def _claim_in_progress_and_claim_channels(self, grpc_channel, channels): """ Claim all 'pending' payments in progress and after we claim given channels """ # first we get the list of all 'payments in progress' in case we 'lost' some payments. payments = self._call_GetListInProgress(grpc_channel) if (len(payments) > 0): self._printout("There are %i payments in 'progress' (they haven't been claimed in blockchain). We will claim them."%len(payments)) self._blockchain_claim(payments) payments = self._start_claim_channels(grpc_channel, channels) self._blockchain_claim(payments)
python
def _claim_in_progress_and_claim_channels(self, grpc_channel, channels): """ Claim all 'pending' payments in progress and after we claim given channels """ # first we get the list of all 'payments in progress' in case we 'lost' some payments. payments = self._call_GetListInProgress(grpc_channel) if (len(payments) > 0): self._printout("There are %i payments in 'progress' (they haven't been claimed in blockchain). We will claim them."%len(payments)) self._blockchain_claim(payments) payments = self._start_claim_channels(grpc_channel, channels) self._blockchain_claim(payments)
[ "def", "_claim_in_progress_and_claim_channels", "(", "self", ",", "grpc_channel", ",", "channels", ")", ":", "# first we get the list of all 'payments in progress' in case we 'lost' some payments.", "payments", "=", "self", ".", "_call_GetListInProgress", "(", "grpc_channel", ")", "if", "(", "len", "(", "payments", ")", ">", "0", ")", ":", "self", ".", "_printout", "(", "\"There are %i payments in 'progress' (they haven't been claimed in blockchain). We will claim them.\"", "%", "len", "(", "payments", ")", ")", "self", ".", "_blockchain_claim", "(", "payments", ")", "payments", "=", "self", ".", "_start_claim_channels", "(", "grpc_channel", ",", "channels", ")", "self", ".", "_blockchain_claim", "(", "payments", ")" ]
Claim all 'pending' payments in progress and after we claim given channels
[ "Claim", "all", "pending", "payments", "in", "progress", "and", "after", "we", "claim", "given", "channels" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_treasurer_command.py#L122-L130
236,960
singnet/snet-cli
snet_cli/config.py
Config.create_default_config
def create_default_config(self): """ Create default configuration if config file does not exist """ # make config directory with the minimal possible permission self._config_file.parent.mkdir(mode=0o700, exist_ok=True) self["network.kovan"] = {"default_eth_rpc_endpoint": "https://kovan.infura.io", "default_gas_price" : "medium"} self["network.mainnet"] = {"default_eth_rpc_endpoint": "https://mainnet.infura.io", "default_gas_price" : "medium"} self["network.ropsten"] = {"default_eth_rpc_endpoint": "https://ropsten.infura.io", "default_gas_price" : "medium"} self["network.rinkeby"] = {"default_eth_rpc_endpoint": "https://rinkeby.infura.io", "default_gas_price" : "medium"} self["ipfs"] = {"default_ipfs_endpoint": "http://ipfs.singularitynet.io:80"} self["session"] = { "network": "kovan" } self._persist() print("We've created configuration file with default values in: %s\n"%str(self._config_file))
python
def create_default_config(self): """ Create default configuration if config file does not exist """ # make config directory with the minimal possible permission self._config_file.parent.mkdir(mode=0o700, exist_ok=True) self["network.kovan"] = {"default_eth_rpc_endpoint": "https://kovan.infura.io", "default_gas_price" : "medium"} self["network.mainnet"] = {"default_eth_rpc_endpoint": "https://mainnet.infura.io", "default_gas_price" : "medium"} self["network.ropsten"] = {"default_eth_rpc_endpoint": "https://ropsten.infura.io", "default_gas_price" : "medium"} self["network.rinkeby"] = {"default_eth_rpc_endpoint": "https://rinkeby.infura.io", "default_gas_price" : "medium"} self["ipfs"] = {"default_ipfs_endpoint": "http://ipfs.singularitynet.io:80"} self["session"] = { "network": "kovan" } self._persist() print("We've created configuration file with default values in: %s\n"%str(self._config_file))
[ "def", "create_default_config", "(", "self", ")", ":", "# make config directory with the minimal possible permission", "self", ".", "_config_file", ".", "parent", ".", "mkdir", "(", "mode", "=", "0o700", ",", "exist_ok", "=", "True", ")", "self", "[", "\"network.kovan\"", "]", "=", "{", "\"default_eth_rpc_endpoint\"", ":", "\"https://kovan.infura.io\"", ",", "\"default_gas_price\"", ":", "\"medium\"", "}", "self", "[", "\"network.mainnet\"", "]", "=", "{", "\"default_eth_rpc_endpoint\"", ":", "\"https://mainnet.infura.io\"", ",", "\"default_gas_price\"", ":", "\"medium\"", "}", "self", "[", "\"network.ropsten\"", "]", "=", "{", "\"default_eth_rpc_endpoint\"", ":", "\"https://ropsten.infura.io\"", ",", "\"default_gas_price\"", ":", "\"medium\"", "}", "self", "[", "\"network.rinkeby\"", "]", "=", "{", "\"default_eth_rpc_endpoint\"", ":", "\"https://rinkeby.infura.io\"", ",", "\"default_gas_price\"", ":", "\"medium\"", "}", "self", "[", "\"ipfs\"", "]", "=", "{", "\"default_ipfs_endpoint\"", ":", "\"http://ipfs.singularitynet.io:80\"", "}", "self", "[", "\"session\"", "]", "=", "{", "\"network\"", ":", "\"kovan\"", "}", "self", ".", "_persist", "(", ")", "print", "(", "\"We've created configuration file with default values in: %s\\n\"", "%", "str", "(", "self", ".", "_config_file", ")", ")" ]
Create default configuration if config file does not exist
[ "Create", "default", "configuration", "if", "config", "file", "does", "not", "exist" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/config.py#L175-L187
236,961
singnet/snet-cli
snet_cli/utils_proto.py
switch_to_json_payload_encoding
def switch_to_json_payload_encoding(call_fn, response_class): """ Switch payload encoding to JSON for GRPC call """ def json_serializer(*args, **kwargs): return bytes(json_format.MessageToJson(args[0], True, preserving_proto_field_name=True), "utf-8") def json_deserializer(*args, **kwargs): resp = response_class() json_format.Parse(args[0], resp, True) return resp call_fn._request_serializer = json_serializer call_fn._response_deserializer = json_deserializer
python
def switch_to_json_payload_encoding(call_fn, response_class): """ Switch payload encoding to JSON for GRPC call """ def json_serializer(*args, **kwargs): return bytes(json_format.MessageToJson(args[0], True, preserving_proto_field_name=True), "utf-8") def json_deserializer(*args, **kwargs): resp = response_class() json_format.Parse(args[0], resp, True) return resp call_fn._request_serializer = json_serializer call_fn._response_deserializer = json_deserializer
[ "def", "switch_to_json_payload_encoding", "(", "call_fn", ",", "response_class", ")", ":", "def", "json_serializer", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "bytes", "(", "json_format", ".", "MessageToJson", "(", "args", "[", "0", "]", ",", "True", ",", "preserving_proto_field_name", "=", "True", ")", ",", "\"utf-8\"", ")", "def", "json_deserializer", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "resp", "=", "response_class", "(", ")", "json_format", ".", "Parse", "(", "args", "[", "0", "]", ",", "resp", ",", "True", ")", "return", "resp", "call_fn", ".", "_request_serializer", "=", "json_serializer", "call_fn", ".", "_response_deserializer", "=", "json_deserializer" ]
Switch payload encoding to JSON for GRPC call
[ "Switch", "payload", "encoding", "to", "JSON", "for", "GRPC", "call" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/utils_proto.py#L72-L81
236,962
singnet/snet-cli
snet_cli/mpe_account_command.py
MPEAccountCommand.print_agi_and_mpe_balances
def print_agi_and_mpe_balances(self): """ Print balance of ETH, AGI, and MPE wallet """ if (self.args.account): account = self.args.account else: account = self.ident.address eth_wei = self.w3.eth.getBalance(account) agi_cogs = self.call_contract_command("SingularityNetToken", "balanceOf", [account]) mpe_cogs = self.call_contract_command("MultiPartyEscrow", "balances", [account]) # we cannot use _pprint here because it doesn't conserve order yet self._printout(" account: %s"%account) self._printout(" ETH: %s"%self.w3.fromWei(eth_wei, 'ether')) self._printout(" AGI: %s"%cogs2stragi(agi_cogs)) self._printout(" MPE: %s"%cogs2stragi(mpe_cogs))
python
def print_agi_and_mpe_balances(self): """ Print balance of ETH, AGI, and MPE wallet """ if (self.args.account): account = self.args.account else: account = self.ident.address eth_wei = self.w3.eth.getBalance(account) agi_cogs = self.call_contract_command("SingularityNetToken", "balanceOf", [account]) mpe_cogs = self.call_contract_command("MultiPartyEscrow", "balances", [account]) # we cannot use _pprint here because it doesn't conserve order yet self._printout(" account: %s"%account) self._printout(" ETH: %s"%self.w3.fromWei(eth_wei, 'ether')) self._printout(" AGI: %s"%cogs2stragi(agi_cogs)) self._printout(" MPE: %s"%cogs2stragi(mpe_cogs))
[ "def", "print_agi_and_mpe_balances", "(", "self", ")", ":", "if", "(", "self", ".", "args", ".", "account", ")", ":", "account", "=", "self", ".", "args", ".", "account", "else", ":", "account", "=", "self", ".", "ident", ".", "address", "eth_wei", "=", "self", ".", "w3", ".", "eth", ".", "getBalance", "(", "account", ")", "agi_cogs", "=", "self", ".", "call_contract_command", "(", "\"SingularityNetToken\"", ",", "\"balanceOf\"", ",", "[", "account", "]", ")", "mpe_cogs", "=", "self", ".", "call_contract_command", "(", "\"MultiPartyEscrow\"", ",", "\"balances\"", ",", "[", "account", "]", ")", "# we cannot use _pprint here because it doesn't conserve order yet", "self", ".", "_printout", "(", "\" account: %s\"", "%", "account", ")", "self", ".", "_printout", "(", "\" ETH: %s\"", "%", "self", ".", "w3", ".", "fromWei", "(", "eth_wei", ",", "'ether'", ")", ")", "self", ".", "_printout", "(", "\" AGI: %s\"", "%", "cogs2stragi", "(", "agi_cogs", ")", ")", "self", ".", "_printout", "(", "\" MPE: %s\"", "%", "cogs2stragi", "(", "mpe_cogs", ")", ")" ]
Print balance of ETH, AGI, and MPE wallet
[ "Print", "balance", "of", "ETH", "AGI", "and", "MPE", "wallet" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_account_command.py#L10-L24
236,963
singnet/snet-cli
snet_cli/mpe_service_command.py
MPEServiceCommand.publish_proto_in_ipfs
def publish_proto_in_ipfs(self): """ Publish proto files in ipfs and print hash """ ipfs_hash_base58 = utils_ipfs.publish_proto_in_ipfs(self._get_ipfs_client(), self.args.protodir) self._printout(ipfs_hash_base58)
python
def publish_proto_in_ipfs(self): """ Publish proto files in ipfs and print hash """ ipfs_hash_base58 = utils_ipfs.publish_proto_in_ipfs(self._get_ipfs_client(), self.args.protodir) self._printout(ipfs_hash_base58)
[ "def", "publish_proto_in_ipfs", "(", "self", ")", ":", "ipfs_hash_base58", "=", "utils_ipfs", ".", "publish_proto_in_ipfs", "(", "self", ".", "_get_ipfs_client", "(", ")", ",", "self", ".", "args", ".", "protodir", ")", "self", ".", "_printout", "(", "ipfs_hash_base58", ")" ]
Publish proto files in ipfs and print hash
[ "Publish", "proto", "files", "in", "ipfs", "and", "print", "hash" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_service_command.py#L15-L18
236,964
singnet/snet-cli
snet_cli/mpe_service_command.py
MPEServiceCommand.publish_proto_metadata_update
def publish_proto_metadata_update(self): """ Publish protobuf model in ipfs and update existing metadata file """ metadata = load_mpe_service_metadata(self.args.metadata_file) ipfs_hash_base58 = utils_ipfs.publish_proto_in_ipfs(self._get_ipfs_client(), self.args.protodir) metadata.set_simple_field("model_ipfs_hash", ipfs_hash_base58) metadata.save_pretty(self.args.metadata_file)
python
def publish_proto_metadata_update(self): """ Publish protobuf model in ipfs and update existing metadata file """ metadata = load_mpe_service_metadata(self.args.metadata_file) ipfs_hash_base58 = utils_ipfs.publish_proto_in_ipfs(self._get_ipfs_client(), self.args.protodir) metadata.set_simple_field("model_ipfs_hash", ipfs_hash_base58) metadata.save_pretty(self.args.metadata_file)
[ "def", "publish_proto_metadata_update", "(", "self", ")", ":", "metadata", "=", "load_mpe_service_metadata", "(", "self", ".", "args", ".", "metadata_file", ")", "ipfs_hash_base58", "=", "utils_ipfs", ".", "publish_proto_in_ipfs", "(", "self", ".", "_get_ipfs_client", "(", ")", ",", "self", ".", "args", ".", "protodir", ")", "metadata", ".", "set_simple_field", "(", "\"model_ipfs_hash\"", ",", "ipfs_hash_base58", ")", "metadata", ".", "save_pretty", "(", "self", ".", "args", ".", "metadata_file", ")" ]
Publish protobuf model in ipfs and update existing metadata file
[ "Publish", "protobuf", "model", "in", "ipfs", "and", "update", "existing", "metadata", "file" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_service_command.py#L37-L42
236,965
singnet/snet-cli
snet_cli/mpe_channel_command.py
MPEChannelCommand._get_persistent_mpe_dir
def _get_persistent_mpe_dir(self): """ get persistent storage for mpe """ mpe_address = self.get_mpe_address().lower() registry_address = self.get_registry_address().lower() return Path.home().joinpath(".snet", "mpe_client", "%s_%s"%(mpe_address, registry_address))
python
def _get_persistent_mpe_dir(self): """ get persistent storage for mpe """ mpe_address = self.get_mpe_address().lower() registry_address = self.get_registry_address().lower() return Path.home().joinpath(".snet", "mpe_client", "%s_%s"%(mpe_address, registry_address))
[ "def", "_get_persistent_mpe_dir", "(", "self", ")", ":", "mpe_address", "=", "self", ".", "get_mpe_address", "(", ")", ".", "lower", "(", ")", "registry_address", "=", "self", ".", "get_registry_address", "(", ")", ".", "lower", "(", ")", "return", "Path", ".", "home", "(", ")", ".", "joinpath", "(", "\".snet\"", ",", "\"mpe_client\"", ",", "\"%s_%s\"", "%", "(", "mpe_address", ",", "registry_address", ")", ")" ]
get persistent storage for mpe
[ "get", "persistent", "storage", "for", "mpe" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_channel_command.py#L21-L25
236,966
singnet/snet-cli
snet_cli/mpe_channel_command.py
MPEChannelCommand._check_mpe_address_metadata
def _check_mpe_address_metadata(self, metadata): """ we make sure that MultiPartyEscrow address from metadata is correct """ mpe_address = self.get_mpe_address() if (str(mpe_address).lower() != str(metadata["mpe_address"]).lower()): raise Exception("MultiPartyEscrow contract address from metadata %s do not correspond to current MultiPartyEscrow address %s"%(metadata["mpe_address"], mpe_address))
python
def _check_mpe_address_metadata(self, metadata): """ we make sure that MultiPartyEscrow address from metadata is correct """ mpe_address = self.get_mpe_address() if (str(mpe_address).lower() != str(metadata["mpe_address"]).lower()): raise Exception("MultiPartyEscrow contract address from metadata %s do not correspond to current MultiPartyEscrow address %s"%(metadata["mpe_address"], mpe_address))
[ "def", "_check_mpe_address_metadata", "(", "self", ",", "metadata", ")", ":", "mpe_address", "=", "self", ".", "get_mpe_address", "(", ")", "if", "(", "str", "(", "mpe_address", ")", ".", "lower", "(", ")", "!=", "str", "(", "metadata", "[", "\"mpe_address\"", "]", ")", ".", "lower", "(", ")", ")", ":", "raise", "Exception", "(", "\"MultiPartyEscrow contract address from metadata %s do not correspond to current MultiPartyEscrow address %s\"", "%", "(", "metadata", "[", "\"mpe_address\"", "]", ",", "mpe_address", ")", ")" ]
we make sure that MultiPartyEscrow address from metadata is correct
[ "we", "make", "sure", "that", "MultiPartyEscrow", "address", "from", "metadata", "is", "correct" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_channel_command.py#L74-L78
236,967
singnet/snet-cli
snet_cli/mpe_channel_command.py
MPEChannelCommand._init_or_update_registered_service_if_needed
def _init_or_update_registered_service_if_needed(self): ''' similar to _init_or_update_service_if_needed but we get service_registraion from registry, so we can update only registered services ''' if (self.is_service_initialized()): old_reg = self._read_service_info(self.args.org_id, self.args.service_id) # metadataURI will be in old_reg only for service which was initilized from registry (not from metadata) # we do nothing for services which were initilized from metadata if ("metadataURI" not in old_reg): return service_registration = self._get_service_registration() # if metadataURI hasn't been changed we do nothing if (not self.is_metadataURI_has_changed(service_registration)): return else: service_registration = self._get_service_registration() service_metadata = self._get_service_metadata_from_registry() self._init_or_update_service_if_needed(service_metadata, service_registration)
python
def _init_or_update_registered_service_if_needed(self): ''' similar to _init_or_update_service_if_needed but we get service_registraion from registry, so we can update only registered services ''' if (self.is_service_initialized()): old_reg = self._read_service_info(self.args.org_id, self.args.service_id) # metadataURI will be in old_reg only for service which was initilized from registry (not from metadata) # we do nothing for services which were initilized from metadata if ("metadataURI" not in old_reg): return service_registration = self._get_service_registration() # if metadataURI hasn't been changed we do nothing if (not self.is_metadataURI_has_changed(service_registration)): return else: service_registration = self._get_service_registration() service_metadata = self._get_service_metadata_from_registry() self._init_or_update_service_if_needed(service_metadata, service_registration)
[ "def", "_init_or_update_registered_service_if_needed", "(", "self", ")", ":", "if", "(", "self", ".", "is_service_initialized", "(", ")", ")", ":", "old_reg", "=", "self", ".", "_read_service_info", "(", "self", ".", "args", ".", "org_id", ",", "self", ".", "args", ".", "service_id", ")", "# metadataURI will be in old_reg only for service which was initilized from registry (not from metadata)", "# we do nothing for services which were initilized from metadata", "if", "(", "\"metadataURI\"", "not", "in", "old_reg", ")", ":", "return", "service_registration", "=", "self", ".", "_get_service_registration", "(", ")", "# if metadataURI hasn't been changed we do nothing", "if", "(", "not", "self", ".", "is_metadataURI_has_changed", "(", "service_registration", ")", ")", ":", "return", "else", ":", "service_registration", "=", "self", ".", "_get_service_registration", "(", ")", "service_metadata", "=", "self", ".", "_get_service_metadata_from_registry", "(", ")", "self", ".", "_init_or_update_service_if_needed", "(", "service_metadata", ",", "service_registration", ")" ]
similar to _init_or_update_service_if_needed but we get service_registraion from registry, so we can update only registered services
[ "similar", "to", "_init_or_update_service_if_needed", "but", "we", "get", "service_registraion", "from", "registry", "so", "we", "can", "update", "only", "registered", "services" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_channel_command.py#L115-L136
236,968
singnet/snet-cli
snet_cli/mpe_channel_command.py
MPEChannelCommand._smart_get_initialized_channel_for_service
def _smart_get_initialized_channel_for_service(self, metadata, filter_by, is_try_initailize = True): ''' - filter_by can be sender or signer ''' channels = self._get_initialized_channels_for_service(self.args.org_id, self.args.service_id) group_id = metadata.get_group_id(self.args.group_name) channels = [c for c in channels if c[filter_by].lower() == self.ident.address.lower() and c["groupId"] == group_id] if (len(channels) == 0 and is_try_initailize): # this will work only in simple case where signer == sender self._initialize_already_opened_channel(metadata, self.ident.address, self.ident.address) return self._smart_get_initialized_channel_for_service(metadata, filter_by, is_try_initailize = False) if (len(channels) == 0): raise Exception("Cannot find initialized channel for service with org_id=%s service_id=%s and signer=%s"%(self.args.org_id, self.args.service_id, self.ident.address)) if (self.args.channel_id is None): if (len(channels) > 1): channel_ids = [channel["channelId"] for channel in channels] raise Exception("We have several initialized channel: %s. You should use --channel-id to select one"%str(channel_ids)) return channels[0] for channel in channels: if (channel["channelId"] == self.args.channel_id): return channel raise Exception("Channel %i has not been initialized or your are not the sender/signer of it"%self.args.channel_id)
python
def _smart_get_initialized_channel_for_service(self, metadata, filter_by, is_try_initailize = True): ''' - filter_by can be sender or signer ''' channels = self._get_initialized_channels_for_service(self.args.org_id, self.args.service_id) group_id = metadata.get_group_id(self.args.group_name) channels = [c for c in channels if c[filter_by].lower() == self.ident.address.lower() and c["groupId"] == group_id] if (len(channels) == 0 and is_try_initailize): # this will work only in simple case where signer == sender self._initialize_already_opened_channel(metadata, self.ident.address, self.ident.address) return self._smart_get_initialized_channel_for_service(metadata, filter_by, is_try_initailize = False) if (len(channels) == 0): raise Exception("Cannot find initialized channel for service with org_id=%s service_id=%s and signer=%s"%(self.args.org_id, self.args.service_id, self.ident.address)) if (self.args.channel_id is None): if (len(channels) > 1): channel_ids = [channel["channelId"] for channel in channels] raise Exception("We have several initialized channel: %s. You should use --channel-id to select one"%str(channel_ids)) return channels[0] for channel in channels: if (channel["channelId"] == self.args.channel_id): return channel raise Exception("Channel %i has not been initialized or your are not the sender/signer of it"%self.args.channel_id)
[ "def", "_smart_get_initialized_channel_for_service", "(", "self", ",", "metadata", ",", "filter_by", ",", "is_try_initailize", "=", "True", ")", ":", "channels", "=", "self", ".", "_get_initialized_channels_for_service", "(", "self", ".", "args", ".", "org_id", ",", "self", ".", "args", ".", "service_id", ")", "group_id", "=", "metadata", ".", "get_group_id", "(", "self", ".", "args", ".", "group_name", ")", "channels", "=", "[", "c", "for", "c", "in", "channels", "if", "c", "[", "filter_by", "]", ".", "lower", "(", ")", "==", "self", ".", "ident", ".", "address", ".", "lower", "(", ")", "and", "c", "[", "\"groupId\"", "]", "==", "group_id", "]", "if", "(", "len", "(", "channels", ")", "==", "0", "and", "is_try_initailize", ")", ":", "# this will work only in simple case where signer == sender", "self", ".", "_initialize_already_opened_channel", "(", "metadata", ",", "self", ".", "ident", ".", "address", ",", "self", ".", "ident", ".", "address", ")", "return", "self", ".", "_smart_get_initialized_channel_for_service", "(", "metadata", ",", "filter_by", ",", "is_try_initailize", "=", "False", ")", "if", "(", "len", "(", "channels", ")", "==", "0", ")", ":", "raise", "Exception", "(", "\"Cannot find initialized channel for service with org_id=%s service_id=%s and signer=%s\"", "%", "(", "self", ".", "args", ".", "org_id", ",", "self", ".", "args", ".", "service_id", ",", "self", ".", "ident", ".", "address", ")", ")", "if", "(", "self", ".", "args", ".", "channel_id", "is", "None", ")", ":", "if", "(", "len", "(", "channels", ")", ">", "1", ")", ":", "channel_ids", "=", "[", "channel", "[", "\"channelId\"", "]", "for", "channel", "in", "channels", "]", "raise", "Exception", "(", "\"We have several initialized channel: %s. You should use --channel-id to select one\"", "%", "str", "(", "channel_ids", ")", ")", "return", "channels", "[", "0", "]", "for", "channel", "in", "channels", ":", "if", "(", "channel", "[", "\"channelId\"", "]", "==", "self", ".", "args", ".", "channel_id", ")", ":", "return", "channel", "raise", "Exception", "(", "\"Channel %i has not been initialized or your are not the sender/signer of it\"", "%", "self", ".", "args", ".", "channel_id", ")" ]
- filter_by can be sender or signer
[ "-", "filter_by", "can", "be", "sender", "or", "signer" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_channel_command.py#L297-L320
236,969
singnet/snet-cli
snet_cli/mpe_channel_command.py
MPEChannelCommand._get_all_filtered_channels
def _get_all_filtered_channels(self, topics_without_signature): """ get all filtered chanels from blockchain logs """ mpe_address = self.get_mpe_address() event_signature = self.ident.w3.sha3(text="ChannelOpen(uint256,uint256,address,address,address,bytes32,uint256,uint256)").hex() topics = [event_signature] + topics_without_signature logs = self.ident.w3.eth.getLogs({"fromBlock" : self.args.from_block, "address" : mpe_address, "topics" : topics}) abi = get_contract_def("MultiPartyEscrow") event_abi = abi_get_element_by_name(abi, "ChannelOpen") channels_ids = [get_event_data(event_abi, l)["args"]["channelId"] for l in logs] return channels_ids
python
def _get_all_filtered_channels(self, topics_without_signature): """ get all filtered chanels from blockchain logs """ mpe_address = self.get_mpe_address() event_signature = self.ident.w3.sha3(text="ChannelOpen(uint256,uint256,address,address,address,bytes32,uint256,uint256)").hex() topics = [event_signature] + topics_without_signature logs = self.ident.w3.eth.getLogs({"fromBlock" : self.args.from_block, "address" : mpe_address, "topics" : topics}) abi = get_contract_def("MultiPartyEscrow") event_abi = abi_get_element_by_name(abi, "ChannelOpen") channels_ids = [get_event_data(event_abi, l)["args"]["channelId"] for l in logs] return channels_ids
[ "def", "_get_all_filtered_channels", "(", "self", ",", "topics_without_signature", ")", ":", "mpe_address", "=", "self", ".", "get_mpe_address", "(", ")", "event_signature", "=", "self", ".", "ident", ".", "w3", ".", "sha3", "(", "text", "=", "\"ChannelOpen(uint256,uint256,address,address,address,bytes32,uint256,uint256)\"", ")", ".", "hex", "(", ")", "topics", "=", "[", "event_signature", "]", "+", "topics_without_signature", "logs", "=", "self", ".", "ident", ".", "w3", ".", "eth", ".", "getLogs", "(", "{", "\"fromBlock\"", ":", "self", ".", "args", ".", "from_block", ",", "\"address\"", ":", "mpe_address", ",", "\"topics\"", ":", "topics", "}", ")", "abi", "=", "get_contract_def", "(", "\"MultiPartyEscrow\"", ")", "event_abi", "=", "abi_get_element_by_name", "(", "abi", ",", "\"ChannelOpen\"", ")", "channels_ids", "=", "[", "get_event_data", "(", "event_abi", ",", "l", ")", "[", "\"args\"", "]", "[", "\"channelId\"", "]", "for", "l", "in", "logs", "]", "return", "channels_ids" ]
get all filtered chanels from blockchain logs
[ "get", "all", "filtered", "chanels", "from", "blockchain", "logs" ]
1b5ac98cb9a64211c861ead9fcfe6208f2749032
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/mpe_channel_command.py#L415-L424
236,970
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.list_repo
def list_repo(self): """ Returns info about all Repos. """ req = proto.ListRepoRequest() res = self.stub.ListRepo(req, metadata=self.metadata) if hasattr(res, 'repo_info'): return res.repo_info return []
python
def list_repo(self): """ Returns info about all Repos. """ req = proto.ListRepoRequest() res = self.stub.ListRepo(req, metadata=self.metadata) if hasattr(res, 'repo_info'): return res.repo_info return []
[ "def", "list_repo", "(", "self", ")", ":", "req", "=", "proto", ".", "ListRepoRequest", "(", ")", "res", "=", "self", ".", "stub", ".", "ListRepo", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")", "if", "hasattr", "(", "res", ",", "'repo_info'", ")", ":", "return", "res", ".", "repo_info", "return", "[", "]" ]
Returns info about all Repos.
[ "Returns", "info", "about", "all", "Repos", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L71-L79
236,971
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.delete_repo
def delete_repo(self, repo_name=None, force=False, all=False): """ Deletes a repo and reclaims the storage space it was using. Params: * repo_name: The name of the repo. * force: If set to true, the repo will be removed regardless of errors. This argument should be used with care. * all: Delete all repos. """ if not all: if repo_name: req = proto.DeleteRepoRequest(repo=proto.Repo(name=repo_name), force=force) self.stub.DeleteRepo(req, metadata=self.metadata) else: raise ValueError("Either a repo_name or all=True needs to be provided") else: if not repo_name: req = proto.DeleteRepoRequest(force=force, all=all) self.stub.DeleteRepo(req, metadata=self.metadata) else: raise ValueError("Cannot specify a repo_name if all=True")
python
def delete_repo(self, repo_name=None, force=False, all=False): """ Deletes a repo and reclaims the storage space it was using. Params: * repo_name: The name of the repo. * force: If set to true, the repo will be removed regardless of errors. This argument should be used with care. * all: Delete all repos. """ if not all: if repo_name: req = proto.DeleteRepoRequest(repo=proto.Repo(name=repo_name), force=force) self.stub.DeleteRepo(req, metadata=self.metadata) else: raise ValueError("Either a repo_name or all=True needs to be provided") else: if not repo_name: req = proto.DeleteRepoRequest(force=force, all=all) self.stub.DeleteRepo(req, metadata=self.metadata) else: raise ValueError("Cannot specify a repo_name if all=True")
[ "def", "delete_repo", "(", "self", ",", "repo_name", "=", "None", ",", "force", "=", "False", ",", "all", "=", "False", ")", ":", "if", "not", "all", ":", "if", "repo_name", ":", "req", "=", "proto", ".", "DeleteRepoRequest", "(", "repo", "=", "proto", ".", "Repo", "(", "name", "=", "repo_name", ")", ",", "force", "=", "force", ")", "self", ".", "stub", ".", "DeleteRepo", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")", "else", ":", "raise", "ValueError", "(", "\"Either a repo_name or all=True needs to be provided\"", ")", "else", ":", "if", "not", "repo_name", ":", "req", "=", "proto", ".", "DeleteRepoRequest", "(", "force", "=", "force", ",", "all", "=", "all", ")", "self", ".", "stub", ".", "DeleteRepo", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")", "else", ":", "raise", "ValueError", "(", "\"Cannot specify a repo_name if all=True\"", ")" ]
Deletes a repo and reclaims the storage space it was using. Params: * repo_name: The name of the repo. * force: If set to true, the repo will be removed regardless of errors. This argument should be used with care. * all: Delete all repos.
[ "Deletes", "a", "repo", "and", "reclaims", "the", "storage", "space", "it", "was", "using", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L81-L102
236,972
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.start_commit
def start_commit(self, repo_name, branch=None, parent=None, description=None): """ Begins the process of committing data to a Repo. Once started you can write to the Commit with PutFile and when all the data has been written you must finish the Commit with FinishCommit. NOTE, data is not persisted until FinishCommit is called. A Commit object is returned. Params: * repo_name: The name of the repo. * branch: A more convenient way to build linear chains of commits. When a commit is started with a non-empty branch the value of branch becomes an alias for the created Commit. This enables a more intuitive access pattern. When the commit is started on a branch the previous head of the branch is used as the parent of the commit. * parent: Specifies the parent Commit, upon creation the new Commit will appear identical to the parent Commit, data can safely be added to the new commit without affecting the contents of the parent Commit. You may pass "" as parentCommit in which case the new Commit will have no parent and will initially appear empty. * description: (optional) explanation of the commit for clarity. """ req = proto.StartCommitRequest(parent=proto.Commit(repo=proto.Repo(name=repo_name), id=parent), branch=branch, description=description) res = self.stub.StartCommit(req, metadata=self.metadata) return res
python
def start_commit(self, repo_name, branch=None, parent=None, description=None): """ Begins the process of committing data to a Repo. Once started you can write to the Commit with PutFile and when all the data has been written you must finish the Commit with FinishCommit. NOTE, data is not persisted until FinishCommit is called. A Commit object is returned. Params: * repo_name: The name of the repo. * branch: A more convenient way to build linear chains of commits. When a commit is started with a non-empty branch the value of branch becomes an alias for the created Commit. This enables a more intuitive access pattern. When the commit is started on a branch the previous head of the branch is used as the parent of the commit. * parent: Specifies the parent Commit, upon creation the new Commit will appear identical to the parent Commit, data can safely be added to the new commit without affecting the contents of the parent Commit. You may pass "" as parentCommit in which case the new Commit will have no parent and will initially appear empty. * description: (optional) explanation of the commit for clarity. """ req = proto.StartCommitRequest(parent=proto.Commit(repo=proto.Repo(name=repo_name), id=parent), branch=branch, description=description) res = self.stub.StartCommit(req, metadata=self.metadata) return res
[ "def", "start_commit", "(", "self", ",", "repo_name", ",", "branch", "=", "None", ",", "parent", "=", "None", ",", "description", "=", "None", ")", ":", "req", "=", "proto", ".", "StartCommitRequest", "(", "parent", "=", "proto", ".", "Commit", "(", "repo", "=", "proto", ".", "Repo", "(", "name", "=", "repo_name", ")", ",", "id", "=", "parent", ")", ",", "branch", "=", "branch", ",", "description", "=", "description", ")", "res", "=", "self", ".", "stub", ".", "StartCommit", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")", "return", "res" ]
Begins the process of committing data to a Repo. Once started you can write to the Commit with PutFile and when all the data has been written you must finish the Commit with FinishCommit. NOTE, data is not persisted until FinishCommit is called. A Commit object is returned. Params: * repo_name: The name of the repo. * branch: A more convenient way to build linear chains of commits. When a commit is started with a non-empty branch the value of branch becomes an alias for the created Commit. This enables a more intuitive access pattern. When the commit is started on a branch the previous head of the branch is used as the parent of the commit. * parent: Specifies the parent Commit, upon creation the new Commit will appear identical to the parent Commit, data can safely be added to the new commit without affecting the contents of the parent Commit. You may pass "" as parentCommit in which case the new Commit will have no parent and will initially appear empty. * description: (optional) explanation of the commit for clarity.
[ "Begins", "the", "process", "of", "committing", "data", "to", "a", "Repo", ".", "Once", "started", "you", "can", "write", "to", "the", "Commit", "with", "PutFile", "and", "when", "all", "the", "data", "has", "been", "written", "you", "must", "finish", "the", "Commit", "with", "FinishCommit", ".", "NOTE", "data", "is", "not", "persisted", "until", "FinishCommit", "is", "called", ".", "A", "Commit", "object", "is", "returned", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L104-L129
236,973
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.finish_commit
def finish_commit(self, commit): """ Ends the process of committing data to a Repo and persists the Commit. Once a Commit is finished the data becomes immutable and future attempts to write to it with PutFile will error. Params: * commit: A tuple, string, or Commit object representing the commit. """ req = proto.FinishCommitRequest(commit=commit_from(commit)) res = self.stub.FinishCommit(req, metadata=self.metadata) return res
python
def finish_commit(self, commit): """ Ends the process of committing data to a Repo and persists the Commit. Once a Commit is finished the data becomes immutable and future attempts to write to it with PutFile will error. Params: * commit: A tuple, string, or Commit object representing the commit. """ req = proto.FinishCommitRequest(commit=commit_from(commit)) res = self.stub.FinishCommit(req, metadata=self.metadata) return res
[ "def", "finish_commit", "(", "self", ",", "commit", ")", ":", "req", "=", "proto", ".", "FinishCommitRequest", "(", "commit", "=", "commit_from", "(", "commit", ")", ")", "res", "=", "self", ".", "stub", ".", "FinishCommit", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")", "return", "res" ]
Ends the process of committing data to a Repo and persists the Commit. Once a Commit is finished the data becomes immutable and future attempts to write to it with PutFile will error. Params: * commit: A tuple, string, or Commit object representing the commit.
[ "Ends", "the", "process", "of", "committing", "data", "to", "a", "Repo", "and", "persists", "the", "Commit", ".", "Once", "a", "Commit", "is", "finished", "the", "data", "becomes", "immutable", "and", "future", "attempts", "to", "write", "to", "it", "with", "PutFile", "will", "error", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L131-L142
236,974
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.commit
def commit(self, repo_name, branch=None, parent=None, description=None): """A context manager for doing stuff inside a commit.""" commit = self.start_commit(repo_name, branch, parent, description) try: yield commit except Exception as e: print("An exception occurred during an open commit. " "Trying to finish it (Currently a commit can't be cancelled)") raise e finally: self.finish_commit(commit)
python
def commit(self, repo_name, branch=None, parent=None, description=None): """A context manager for doing stuff inside a commit.""" commit = self.start_commit(repo_name, branch, parent, description) try: yield commit except Exception as e: print("An exception occurred during an open commit. " "Trying to finish it (Currently a commit can't be cancelled)") raise e finally: self.finish_commit(commit)
[ "def", "commit", "(", "self", ",", "repo_name", ",", "branch", "=", "None", ",", "parent", "=", "None", ",", "description", "=", "None", ")", ":", "commit", "=", "self", ".", "start_commit", "(", "repo_name", ",", "branch", ",", "parent", ",", "description", ")", "try", ":", "yield", "commit", "except", "Exception", "as", "e", ":", "print", "(", "\"An exception occurred during an open commit. \"", "\"Trying to finish it (Currently a commit can't be cancelled)\"", ")", "raise", "e", "finally", ":", "self", ".", "finish_commit", "(", "commit", ")" ]
A context manager for doing stuff inside a commit.
[ "A", "context", "manager", "for", "doing", "stuff", "inside", "a", "commit", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L145-L155
236,975
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.inspect_commit
def inspect_commit(self, commit): """ Returns info about a specific Commit. Params: * commit: A tuple, string, or Commit object representing the commit. """ req = proto.InspectCommitRequest(commit=commit_from(commit)) return self.stub.InspectCommit(req, metadata=self.metadata)
python
def inspect_commit(self, commit): """ Returns info about a specific Commit. Params: * commit: A tuple, string, or Commit object representing the commit. """ req = proto.InspectCommitRequest(commit=commit_from(commit)) return self.stub.InspectCommit(req, metadata=self.metadata)
[ "def", "inspect_commit", "(", "self", ",", "commit", ")", ":", "req", "=", "proto", ".", "InspectCommitRequest", "(", "commit", "=", "commit_from", "(", "commit", ")", ")", "return", "self", ".", "stub", ".", "InspectCommit", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")" ]
Returns info about a specific Commit. Params: * commit: A tuple, string, or Commit object representing the commit.
[ "Returns", "info", "about", "a", "specific", "Commit", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L157-L165
236,976
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.list_commit
def list_commit(self, repo_name, to_commit=None, from_commit=None, number=0): """ Gets a list of CommitInfo objects. Params: * repo_name: If only `repo_name` is given, all commits in the repo are returned. * to_commit: Optional. Only the ancestors of `to`, including `to` itself, are considered. * from_commit: Optional. Only the descendants of `from`, including `from` itself, are considered. * number: Optional. Determines how many commits are returned. If `number` is 0, all commits that match the aforementioned criteria are returned. """ req = proto.ListCommitRequest(repo=proto.Repo(name=repo_name), number=number) if to_commit is not None: req.to.CopyFrom(commit_from(to_commit)) if from_commit is not None: getattr(req, 'from').CopyFrom(commit_from(from_commit)) res = self.stub.ListCommit(req, metadata=self.metadata) if hasattr(res, 'commit_info'): return res.commit_info return []
python
def list_commit(self, repo_name, to_commit=None, from_commit=None, number=0): """ Gets a list of CommitInfo objects. Params: * repo_name: If only `repo_name` is given, all commits in the repo are returned. * to_commit: Optional. Only the ancestors of `to`, including `to` itself, are considered. * from_commit: Optional. Only the descendants of `from`, including `from` itself, are considered. * number: Optional. Determines how many commits are returned. If `number` is 0, all commits that match the aforementioned criteria are returned. """ req = proto.ListCommitRequest(repo=proto.Repo(name=repo_name), number=number) if to_commit is not None: req.to.CopyFrom(commit_from(to_commit)) if from_commit is not None: getattr(req, 'from').CopyFrom(commit_from(from_commit)) res = self.stub.ListCommit(req, metadata=self.metadata) if hasattr(res, 'commit_info'): return res.commit_info return []
[ "def", "list_commit", "(", "self", ",", "repo_name", ",", "to_commit", "=", "None", ",", "from_commit", "=", "None", ",", "number", "=", "0", ")", ":", "req", "=", "proto", ".", "ListCommitRequest", "(", "repo", "=", "proto", ".", "Repo", "(", "name", "=", "repo_name", ")", ",", "number", "=", "number", ")", "if", "to_commit", "is", "not", "None", ":", "req", ".", "to", ".", "CopyFrom", "(", "commit_from", "(", "to_commit", ")", ")", "if", "from_commit", "is", "not", "None", ":", "getattr", "(", "req", ",", "'from'", ")", ".", "CopyFrom", "(", "commit_from", "(", "from_commit", ")", ")", "res", "=", "self", ".", "stub", ".", "ListCommit", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")", "if", "hasattr", "(", "res", ",", "'commit_info'", ")", ":", "return", "res", ".", "commit_info", "return", "[", "]" ]
Gets a list of CommitInfo objects. Params: * repo_name: If only `repo_name` is given, all commits in the repo are returned. * to_commit: Optional. Only the ancestors of `to`, including `to` itself, are considered. * from_commit: Optional. Only the descendants of `from`, including `from` itself, are considered. * number: Optional. Determines how many commits are returned. If `number` is 0, all commits that match the aforementioned criteria are returned.
[ "Gets", "a", "list", "of", "CommitInfo", "objects", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L177-L200
236,977
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.delete_commit
def delete_commit(self, commit): """ Deletes a commit. Params: * commit: A tuple, string, or Commit object representing the commit. """ req = proto.DeleteCommitRequest(commit=commit_from(commit)) self.stub.DeleteCommit(req, metadata=self.metadata)
python
def delete_commit(self, commit): """ Deletes a commit. Params: * commit: A tuple, string, or Commit object representing the commit. """ req = proto.DeleteCommitRequest(commit=commit_from(commit)) self.stub.DeleteCommit(req, metadata=self.metadata)
[ "def", "delete_commit", "(", "self", ",", "commit", ")", ":", "req", "=", "proto", ".", "DeleteCommitRequest", "(", "commit", "=", "commit_from", "(", "commit", ")", ")", "self", ".", "stub", ".", "DeleteCommit", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")" ]
Deletes a commit. Params: * commit: A tuple, string, or Commit object representing the commit.
[ "Deletes", "a", "commit", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L202-L210
236,978
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.flush_commit
def flush_commit(self, commits, repos=tuple()): """ Blocks until all of the commits which have a set of commits as provenance have finished. For commits to be considered they must have all of the specified commits as provenance. This in effect waits for all of the jobs that are triggered by a set of commits to complete. It returns an error if any of the commits it's waiting on are cancelled due to one of the jobs encountering an error during runtime. Note that it's never necessary to call FlushCommit to run jobs, they'll run no matter what, FlushCommit just allows you to wait for them to complete and see their output once they do. This returns an iterator of CommitInfo objects. Params: * commits: A commit or a list of commits to wait on. * repos: Optional. Only the commits up to and including those repos. will be considered, otherwise all repos are considered. """ req = proto.FlushCommitRequest(commit=[commit_from(c) for c in commits], to_repo=[proto.Repo(name=r) for r in repos]) res = self.stub.FlushCommit(req, metadata=self.metadata) return res
python
def flush_commit(self, commits, repos=tuple()): """ Blocks until all of the commits which have a set of commits as provenance have finished. For commits to be considered they must have all of the specified commits as provenance. This in effect waits for all of the jobs that are triggered by a set of commits to complete. It returns an error if any of the commits it's waiting on are cancelled due to one of the jobs encountering an error during runtime. Note that it's never necessary to call FlushCommit to run jobs, they'll run no matter what, FlushCommit just allows you to wait for them to complete and see their output once they do. This returns an iterator of CommitInfo objects. Params: * commits: A commit or a list of commits to wait on. * repos: Optional. Only the commits up to and including those repos. will be considered, otherwise all repos are considered. """ req = proto.FlushCommitRequest(commit=[commit_from(c) for c in commits], to_repo=[proto.Repo(name=r) for r in repos]) res = self.stub.FlushCommit(req, metadata=self.metadata) return res
[ "def", "flush_commit", "(", "self", ",", "commits", ",", "repos", "=", "tuple", "(", ")", ")", ":", "req", "=", "proto", ".", "FlushCommitRequest", "(", "commit", "=", "[", "commit_from", "(", "c", ")", "for", "c", "in", "commits", "]", ",", "to_repo", "=", "[", "proto", ".", "Repo", "(", "name", "=", "r", ")", "for", "r", "in", "repos", "]", ")", "res", "=", "self", ".", "stub", ".", "FlushCommit", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")", "return", "res" ]
Blocks until all of the commits which have a set of commits as provenance have finished. For commits to be considered they must have all of the specified commits as provenance. This in effect waits for all of the jobs that are triggered by a set of commits to complete. It returns an error if any of the commits it's waiting on are cancelled due to one of the jobs encountering an error during runtime. Note that it's never necessary to call FlushCommit to run jobs, they'll run no matter what, FlushCommit just allows you to wait for them to complete and see their output once they do. This returns an iterator of CommitInfo objects. Params: * commits: A commit or a list of commits to wait on. * repos: Optional. Only the commits up to and including those repos. will be considered, otherwise all repos are considered.
[ "Blocks", "until", "all", "of", "the", "commits", "which", "have", "a", "set", "of", "commits", "as", "provenance", "have", "finished", ".", "For", "commits", "to", "be", "considered", "they", "must", "have", "all", "of", "the", "specified", "commits", "as", "provenance", ".", "This", "in", "effect", "waits", "for", "all", "of", "the", "jobs", "that", "are", "triggered", "by", "a", "set", "of", "commits", "to", "complete", ".", "It", "returns", "an", "error", "if", "any", "of", "the", "commits", "it", "s", "waiting", "on", "are", "cancelled", "due", "to", "one", "of", "the", "jobs", "encountering", "an", "error", "during", "runtime", ".", "Note", "that", "it", "s", "never", "necessary", "to", "call", "FlushCommit", "to", "run", "jobs", "they", "ll", "run", "no", "matter", "what", "FlushCommit", "just", "allows", "you", "to", "wait", "for", "them", "to", "complete", "and", "see", "their", "output", "once", "they", "do", ".", "This", "returns", "an", "iterator", "of", "CommitInfo", "objects", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L212-L233
236,979
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.subscribe_commit
def subscribe_commit(self, repo_name, branch, from_commit_id=None): """ SubscribeCommit is like ListCommit but it keeps listening for commits as they come in. This returns an iterator Commit objects. Params: * repo_name: Name of the repo. * branch: Branch to subscribe to. * from_commit_id: Optional. Only commits created since this commit are returned. """ repo = proto.Repo(name=repo_name) req = proto.SubscribeCommitRequest(repo=repo, branch=branch) if from_commit_id is not None: getattr(req, 'from').CopyFrom(proto.Commit(repo=repo, id=from_commit_id)) res = self.stub.SubscribeCommit(req, metadata=self.metadata) return res
python
def subscribe_commit(self, repo_name, branch, from_commit_id=None): """ SubscribeCommit is like ListCommit but it keeps listening for commits as they come in. This returns an iterator Commit objects. Params: * repo_name: Name of the repo. * branch: Branch to subscribe to. * from_commit_id: Optional. Only commits created since this commit are returned. """ repo = proto.Repo(name=repo_name) req = proto.SubscribeCommitRequest(repo=repo, branch=branch) if from_commit_id is not None: getattr(req, 'from').CopyFrom(proto.Commit(repo=repo, id=from_commit_id)) res = self.stub.SubscribeCommit(req, metadata=self.metadata) return res
[ "def", "subscribe_commit", "(", "self", ",", "repo_name", ",", "branch", ",", "from_commit_id", "=", "None", ")", ":", "repo", "=", "proto", ".", "Repo", "(", "name", "=", "repo_name", ")", "req", "=", "proto", ".", "SubscribeCommitRequest", "(", "repo", "=", "repo", ",", "branch", "=", "branch", ")", "if", "from_commit_id", "is", "not", "None", ":", "getattr", "(", "req", ",", "'from'", ")", ".", "CopyFrom", "(", "proto", ".", "Commit", "(", "repo", "=", "repo", ",", "id", "=", "from_commit_id", ")", ")", "res", "=", "self", ".", "stub", ".", "SubscribeCommit", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")", "return", "res" ]
SubscribeCommit is like ListCommit but it keeps listening for commits as they come in. This returns an iterator Commit objects. Params: * repo_name: Name of the repo. * branch: Branch to subscribe to. * from_commit_id: Optional. Only commits created since this commit are returned.
[ "SubscribeCommit", "is", "like", "ListCommit", "but", "it", "keeps", "listening", "for", "commits", "as", "they", "come", "in", ".", "This", "returns", "an", "iterator", "Commit", "objects", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L235-L251
236,980
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.list_branch
def list_branch(self, repo_name): """ Lists the active Branch objects on a Repo. Params: * repo_name: The name of the repo. """ req = proto.ListBranchRequest(repo=proto.Repo(name=repo_name)) res = self.stub.ListBranch(req, metadata=self.metadata) if hasattr(res, 'branch_info'): return res.branch_info return []
python
def list_branch(self, repo_name): """ Lists the active Branch objects on a Repo. Params: * repo_name: The name of the repo. """ req = proto.ListBranchRequest(repo=proto.Repo(name=repo_name)) res = self.stub.ListBranch(req, metadata=self.metadata) if hasattr(res, 'branch_info'): return res.branch_info return []
[ "def", "list_branch", "(", "self", ",", "repo_name", ")", ":", "req", "=", "proto", ".", "ListBranchRequest", "(", "repo", "=", "proto", ".", "Repo", "(", "name", "=", "repo_name", ")", ")", "res", "=", "self", ".", "stub", ".", "ListBranch", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")", "if", "hasattr", "(", "res", ",", "'branch_info'", ")", ":", "return", "res", ".", "branch_info", "return", "[", "]" ]
Lists the active Branch objects on a Repo. Params: * repo_name: The name of the repo.
[ "Lists", "the", "active", "Branch", "objects", "on", "a", "Repo", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L253-L264
236,981
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.set_branch
def set_branch(self, commit, branch_name): """ Sets a commit and its ancestors as a branch. Params: * commit: A tuple, string, or Commit object representing the commit. * branch_name: The name for the branch to set. """ res = proto.SetBranchRequest(commit=commit_from(commit), branch=branch_name) self.stub.SetBranch(res, metadata=self.metadata)
python
def set_branch(self, commit, branch_name): """ Sets a commit and its ancestors as a branch. Params: * commit: A tuple, string, or Commit object representing the commit. * branch_name: The name for the branch to set. """ res = proto.SetBranchRequest(commit=commit_from(commit), branch=branch_name) self.stub.SetBranch(res, metadata=self.metadata)
[ "def", "set_branch", "(", "self", ",", "commit", ",", "branch_name", ")", ":", "res", "=", "proto", ".", "SetBranchRequest", "(", "commit", "=", "commit_from", "(", "commit", ")", ",", "branch", "=", "branch_name", ")", "self", ".", "stub", ".", "SetBranch", "(", "res", ",", "metadata", "=", "self", ".", "metadata", ")" ]
Sets a commit and its ancestors as a branch. Params: * commit: A tuple, string, or Commit object representing the commit. * branch_name: The name for the branch to set.
[ "Sets", "a", "commit", "and", "its", "ancestors", "as", "a", "branch", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L266-L275
236,982
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.delete_branch
def delete_branch(self, repo_name, branch_name): """ Deletes a branch, but leaves the commits themselves intact. In other words, those commits can still be accessed via commit IDs and other branches they happen to be on. Params: * repo_name: The name of the repo. * branch_name: The name of the branch to delete. """ res = proto.DeleteBranchRequest(repo=Repo(name=repo_name), branch=branch_name) self.stub.DeleteBranch(res, metadata=self.metadata)
python
def delete_branch(self, repo_name, branch_name): """ Deletes a branch, but leaves the commits themselves intact. In other words, those commits can still be accessed via commit IDs and other branches they happen to be on. Params: * repo_name: The name of the repo. * branch_name: The name of the branch to delete. """ res = proto.DeleteBranchRequest(repo=Repo(name=repo_name), branch=branch_name) self.stub.DeleteBranch(res, metadata=self.metadata)
[ "def", "delete_branch", "(", "self", ",", "repo_name", ",", "branch_name", ")", ":", "res", "=", "proto", ".", "DeleteBranchRequest", "(", "repo", "=", "Repo", "(", "name", "=", "repo_name", ")", ",", "branch", "=", "branch_name", ")", "self", ".", "stub", ".", "DeleteBranch", "(", "res", ",", "metadata", "=", "self", ".", "metadata", ")" ]
Deletes a branch, but leaves the commits themselves intact. In other words, those commits can still be accessed via commit IDs and other branches they happen to be on. Params: * repo_name: The name of the repo. * branch_name: The name of the branch to delete.
[ "Deletes", "a", "branch", "but", "leaves", "the", "commits", "themselves", "intact", ".", "In", "other", "words", "those", "commits", "can", "still", "be", "accessed", "via", "commit", "IDs", "and", "other", "branches", "they", "happen", "to", "be", "on", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L277-L288
236,983
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.put_file_url
def put_file_url(self, commit, path, url, recursive=False): """ Puts a file using the content found at a URL. The URL is sent to the server which performs the request. Params: * commit: A tuple, string, or Commit object representing the commit. * path: The path to the file. * url: The url of the file to put. * recursive: allow for recursive scraping of some types URLs for example on s3:// urls. """ req = iter([ proto.PutFileRequest( file=proto.File(commit=commit_from(commit), path=path), url=url, recursive=recursive ) ]) self.stub.PutFile(req, metadata=self.metadata)
python
def put_file_url(self, commit, path, url, recursive=False): """ Puts a file using the content found at a URL. The URL is sent to the server which performs the request. Params: * commit: A tuple, string, or Commit object representing the commit. * path: The path to the file. * url: The url of the file to put. * recursive: allow for recursive scraping of some types URLs for example on s3:// urls. """ req = iter([ proto.PutFileRequest( file=proto.File(commit=commit_from(commit), path=path), url=url, recursive=recursive ) ]) self.stub.PutFile(req, metadata=self.metadata)
[ "def", "put_file_url", "(", "self", ",", "commit", ",", "path", ",", "url", ",", "recursive", "=", "False", ")", ":", "req", "=", "iter", "(", "[", "proto", ".", "PutFileRequest", "(", "file", "=", "proto", ".", "File", "(", "commit", "=", "commit_from", "(", "commit", ")", ",", "path", "=", "path", ")", ",", "url", "=", "url", ",", "recursive", "=", "recursive", ")", "]", ")", "self", ".", "stub", ".", "PutFile", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")" ]
Puts a file using the content found at a URL. The URL is sent to the server which performs the request. Params: * commit: A tuple, string, or Commit object representing the commit. * path: The path to the file. * url: The url of the file to put. * recursive: allow for recursive scraping of some types URLs for example on s3:// urls.
[ "Puts", "a", "file", "using", "the", "content", "found", "at", "a", "URL", ".", "The", "URL", "is", "sent", "to", "the", "server", "which", "performs", "the", "request", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L363-L382
236,984
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.get_file
def get_file(self, commit, path, offset_bytes=0, size_bytes=0, extract_value=True): """ Returns an iterator of the contents contents of a file at a specific Commit. Params: * commit: A tuple, string, or Commit object representing the commit. * path: The path of the file. * offset_bytes: Optional. specifies a number of bytes that should be skipped in the beginning of the file. * size_bytes: Optional. limits the total amount of data returned, note you will get fewer bytes than size if you pass a value larger than the size of the file. If size is set to 0 then all of the data will be returned. * extract_value: If True, then an ExtractValueIterator will be return, which will iterate over the bytes of the file. If False, then the protobuf response iterator will return. """ req = proto.GetFileRequest( file=proto.File(commit=commit_from(commit), path=path), offset_bytes=offset_bytes, size_bytes=size_bytes ) res = self.stub.GetFile(req, metadata=self.metadata) if extract_value: return ExtractValueIterator(res) return res
python
def get_file(self, commit, path, offset_bytes=0, size_bytes=0, extract_value=True): """ Returns an iterator of the contents contents of a file at a specific Commit. Params: * commit: A tuple, string, or Commit object representing the commit. * path: The path of the file. * offset_bytes: Optional. specifies a number of bytes that should be skipped in the beginning of the file. * size_bytes: Optional. limits the total amount of data returned, note you will get fewer bytes than size if you pass a value larger than the size of the file. If size is set to 0 then all of the data will be returned. * extract_value: If True, then an ExtractValueIterator will be return, which will iterate over the bytes of the file. If False, then the protobuf response iterator will return. """ req = proto.GetFileRequest( file=proto.File(commit=commit_from(commit), path=path), offset_bytes=offset_bytes, size_bytes=size_bytes ) res = self.stub.GetFile(req, metadata=self.metadata) if extract_value: return ExtractValueIterator(res) return res
[ "def", "get_file", "(", "self", ",", "commit", ",", "path", ",", "offset_bytes", "=", "0", ",", "size_bytes", "=", "0", ",", "extract_value", "=", "True", ")", ":", "req", "=", "proto", ".", "GetFileRequest", "(", "file", "=", "proto", ".", "File", "(", "commit", "=", "commit_from", "(", "commit", ")", ",", "path", "=", "path", ")", ",", "offset_bytes", "=", "offset_bytes", ",", "size_bytes", "=", "size_bytes", ")", "res", "=", "self", ".", "stub", ".", "GetFile", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")", "if", "extract_value", ":", "return", "ExtractValueIterator", "(", "res", ")", "return", "res" ]
Returns an iterator of the contents contents of a file at a specific Commit. Params: * commit: A tuple, string, or Commit object representing the commit. * path: The path of the file. * offset_bytes: Optional. specifies a number of bytes that should be skipped in the beginning of the file. * size_bytes: Optional. limits the total amount of data returned, note you will get fewer bytes than size if you pass a value larger than the size of the file. If size is set to 0 then all of the data will be returned. * extract_value: If True, then an ExtractValueIterator will be return, which will iterate over the bytes of the file. If False, then the protobuf response iterator will return.
[ "Returns", "an", "iterator", "of", "the", "contents", "contents", "of", "a", "file", "at", "a", "specific", "Commit", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L384-L409
236,985
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.get_files
def get_files(self, commit, paths, recursive=False): """ Returns the contents of a list of files at a specific Commit as a dictionary of file paths to data. Params: * commit: A tuple, string, or Commit object representing the commit. * paths: A list of paths to retrieve. * recursive: If True, will go into each directory in the list recursively. """ filtered_file_infos = [] for path in paths: fi = self.inspect_file(commit, path) if fi.file_type == proto.FILE: filtered_file_infos.append(fi) else: filtered_file_infos += self.list_file(commit, path, recursive=recursive) filtered_paths = [fi.file.path for fi in filtered_file_infos if fi.file_type == proto.FILE] return {path: b''.join(self.get_file(commit, path)) for path in filtered_paths}
python
def get_files(self, commit, paths, recursive=False): """ Returns the contents of a list of files at a specific Commit as a dictionary of file paths to data. Params: * commit: A tuple, string, or Commit object representing the commit. * paths: A list of paths to retrieve. * recursive: If True, will go into each directory in the list recursively. """ filtered_file_infos = [] for path in paths: fi = self.inspect_file(commit, path) if fi.file_type == proto.FILE: filtered_file_infos.append(fi) else: filtered_file_infos += self.list_file(commit, path, recursive=recursive) filtered_paths = [fi.file.path for fi in filtered_file_infos if fi.file_type == proto.FILE] return {path: b''.join(self.get_file(commit, path)) for path in filtered_paths}
[ "def", "get_files", "(", "self", ",", "commit", ",", "paths", ",", "recursive", "=", "False", ")", ":", "filtered_file_infos", "=", "[", "]", "for", "path", "in", "paths", ":", "fi", "=", "self", ".", "inspect_file", "(", "commit", ",", "path", ")", "if", "fi", ".", "file_type", "==", "proto", ".", "FILE", ":", "filtered_file_infos", ".", "append", "(", "fi", ")", "else", ":", "filtered_file_infos", "+=", "self", ".", "list_file", "(", "commit", ",", "path", ",", "recursive", "=", "recursive", ")", "filtered_paths", "=", "[", "fi", ".", "file", ".", "path", "for", "fi", "in", "filtered_file_infos", "if", "fi", ".", "file_type", "==", "proto", ".", "FILE", "]", "return", "{", "path", ":", "b''", ".", "join", "(", "self", ".", "get_file", "(", "commit", ",", "path", ")", ")", "for", "path", "in", "filtered_paths", "}" ]
Returns the contents of a list of files at a specific Commit as a dictionary of file paths to data. Params: * commit: A tuple, string, or Commit object representing the commit. * paths: A list of paths to retrieve. * recursive: If True, will go into each directory in the list recursively.
[ "Returns", "the", "contents", "of", "a", "list", "of", "files", "at", "a", "specific", "Commit", "as", "a", "dictionary", "of", "file", "paths", "to", "data", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L411-L432
236,986
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.inspect_file
def inspect_file(self, commit, path): """ Returns info about a specific file. Params: * commit: A tuple, string, or Commit object representing the commit. * path: Path to file. """ req = proto.InspectFileRequest(file=proto.File(commit=commit_from(commit), path=path)) res = self.stub.InspectFile(req, metadata=self.metadata) return res
python
def inspect_file(self, commit, path): """ Returns info about a specific file. Params: * commit: A tuple, string, or Commit object representing the commit. * path: Path to file. """ req = proto.InspectFileRequest(file=proto.File(commit=commit_from(commit), path=path)) res = self.stub.InspectFile(req, metadata=self.metadata) return res
[ "def", "inspect_file", "(", "self", ",", "commit", ",", "path", ")", ":", "req", "=", "proto", ".", "InspectFileRequest", "(", "file", "=", "proto", ".", "File", "(", "commit", "=", "commit_from", "(", "commit", ")", ",", "path", "=", "path", ")", ")", "res", "=", "self", ".", "stub", ".", "InspectFile", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")", "return", "res" ]
Returns info about a specific file. Params: * commit: A tuple, string, or Commit object representing the commit. * path: Path to file.
[ "Returns", "info", "about", "a", "specific", "file", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L434-L444
236,987
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.list_file
def list_file(self, commit, path, recursive=False): """ Lists the files in a directory. Params: * commit: A tuple, string, or Commit object representing the commit. * path: The path to the directory. * recursive: If True, continue listing the files for sub-directories. """ req = proto.ListFileRequest( file=proto.File(commit=commit_from(commit), path=path) ) res = self.stub.ListFile(req, metadata=self.metadata) file_infos = res.file_info if recursive: dirs = [f for f in file_infos if f.file_type == proto.DIR] files = [f for f in file_infos if f.file_type == proto.FILE] return sum([self.list_file(commit, d.file.path, recursive) for d in dirs], files) return list(file_infos)
python
def list_file(self, commit, path, recursive=False): """ Lists the files in a directory. Params: * commit: A tuple, string, or Commit object representing the commit. * path: The path to the directory. * recursive: If True, continue listing the files for sub-directories. """ req = proto.ListFileRequest( file=proto.File(commit=commit_from(commit), path=path) ) res = self.stub.ListFile(req, metadata=self.metadata) file_infos = res.file_info if recursive: dirs = [f for f in file_infos if f.file_type == proto.DIR] files = [f for f in file_infos if f.file_type == proto.FILE] return sum([self.list_file(commit, d.file.path, recursive) for d in dirs], files) return list(file_infos)
[ "def", "list_file", "(", "self", ",", "commit", ",", "path", ",", "recursive", "=", "False", ")", ":", "req", "=", "proto", ".", "ListFileRequest", "(", "file", "=", "proto", ".", "File", "(", "commit", "=", "commit_from", "(", "commit", ")", ",", "path", "=", "path", ")", ")", "res", "=", "self", ".", "stub", ".", "ListFile", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")", "file_infos", "=", "res", ".", "file_info", "if", "recursive", ":", "dirs", "=", "[", "f", "for", "f", "in", "file_infos", "if", "f", ".", "file_type", "==", "proto", ".", "DIR", "]", "files", "=", "[", "f", "for", "f", "in", "file_infos", "if", "f", ".", "file_type", "==", "proto", ".", "FILE", "]", "return", "sum", "(", "[", "self", ".", "list_file", "(", "commit", ",", "d", ".", "file", ".", "path", ",", "recursive", ")", "for", "d", "in", "dirs", "]", ",", "files", ")", "return", "list", "(", "file_infos", ")" ]
Lists the files in a directory. Params: * commit: A tuple, string, or Commit object representing the commit. * path: The path to the directory. * recursive: If True, continue listing the files for sub-directories.
[ "Lists", "the", "files", "in", "a", "directory", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L446-L466
236,988
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
PfsClient.delete_file
def delete_file(self, commit, path): """ Deletes a file from a Commit. DeleteFile leaves a tombstone in the Commit, assuming the file isn't written to later attempting to get the file from the finished commit will result in not found error. The file will of course remain intact in the Commit's parent. Params: * commit: A tuple, string, or Commit object representing the commit. * path: The path to the file. """ req = proto.DeleteFileRequest(file=proto.File(commit=commit_from(commit), path=path)) self.stub.DeleteFile(req, metadata=self.metadata)
python
def delete_file(self, commit, path): """ Deletes a file from a Commit. DeleteFile leaves a tombstone in the Commit, assuming the file isn't written to later attempting to get the file from the finished commit will result in not found error. The file will of course remain intact in the Commit's parent. Params: * commit: A tuple, string, or Commit object representing the commit. * path: The path to the file. """ req = proto.DeleteFileRequest(file=proto.File(commit=commit_from(commit), path=path)) self.stub.DeleteFile(req, metadata=self.metadata)
[ "def", "delete_file", "(", "self", ",", "commit", ",", "path", ")", ":", "req", "=", "proto", ".", "DeleteFileRequest", "(", "file", "=", "proto", ".", "File", "(", "commit", "=", "commit_from", "(", "commit", ")", ",", "path", "=", "path", ")", ")", "self", ".", "stub", ".", "DeleteFile", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")" ]
Deletes a file from a Commit. DeleteFile leaves a tombstone in the Commit, assuming the file isn't written to later attempting to get the file from the finished commit will result in not found error. The file will of course remain intact in the Commit's parent. Params: * commit: A tuple, string, or Commit object representing the commit. * path: The path to the file.
[ "Deletes", "a", "file", "from", "a", "Commit", ".", "DeleteFile", "leaves", "a", "tombstone", "in", "the", "Commit", "assuming", "the", "file", "isn", "t", "written", "to", "later", "attempting", "to", "get", "the", "file", "from", "the", "finished", "commit", "will", "result", "in", "not", "found", "error", ".", "The", "file", "will", "of", "course", "remain", "intact", "in", "the", "Commit", "s", "parent", "." ]
1c58cf91d30e03716a4f45213989e890f7b8a78c
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L475-L487
236,989
IdentityPython/SATOSA
src/satosa/frontends/saml2.py
SAMLFrontend.handle_authn_request
def handle_authn_request(self, context, binding_in): """ This method is bound to the starting endpoint of the authentication. :type context: satosa.context.Context :type binding_in: str :rtype: satosa.response.Response :param context: The current context :param binding_in: The binding type (http post, http redirect, ...) :return: response """ return self._handle_authn_request(context, binding_in, self.idp)
python
def handle_authn_request(self, context, binding_in): """ This method is bound to the starting endpoint of the authentication. :type context: satosa.context.Context :type binding_in: str :rtype: satosa.response.Response :param context: The current context :param binding_in: The binding type (http post, http redirect, ...) :return: response """ return self._handle_authn_request(context, binding_in, self.idp)
[ "def", "handle_authn_request", "(", "self", ",", "context", ",", "binding_in", ")", ":", "return", "self", ".", "_handle_authn_request", "(", "context", ",", "binding_in", ",", "self", ".", "idp", ")" ]
This method is bound to the starting endpoint of the authentication. :type context: satosa.context.Context :type binding_in: str :rtype: satosa.response.Response :param context: The current context :param binding_in: The binding type (http post, http redirect, ...) :return: response
[ "This", "method", "is", "bound", "to", "the", "starting", "endpoint", "of", "the", "authentication", "." ]
49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb
https://github.com/IdentityPython/SATOSA/blob/49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb/src/satosa/frontends/saml2.py#L90-L102
236,990
IdentityPython/SATOSA
src/satosa/frontends/saml2.py
SAMLFrontend._create_state_data
def _create_state_data(self, context, resp_args, relay_state): """ Returns a dict containing the state needed in the response flow. :type context: satosa.context.Context :type resp_args: dict[str, str | saml2.samlp.NameIDPolicy] :type relay_state: str :rtype: dict[str, dict[str, str] | str] :param context: The current context :param resp_args: Response arguments :param relay_state: Request relay state :return: A state as a dict """ if "name_id_policy" in resp_args and resp_args["name_id_policy"] is not None: resp_args["name_id_policy"] = resp_args["name_id_policy"].to_string().decode("utf-8") return {"resp_args": resp_args, "relay_state": relay_state}
python
def _create_state_data(self, context, resp_args, relay_state): """ Returns a dict containing the state needed in the response flow. :type context: satosa.context.Context :type resp_args: dict[str, str | saml2.samlp.NameIDPolicy] :type relay_state: str :rtype: dict[str, dict[str, str] | str] :param context: The current context :param resp_args: Response arguments :param relay_state: Request relay state :return: A state as a dict """ if "name_id_policy" in resp_args and resp_args["name_id_policy"] is not None: resp_args["name_id_policy"] = resp_args["name_id_policy"].to_string().decode("utf-8") return {"resp_args": resp_args, "relay_state": relay_state}
[ "def", "_create_state_data", "(", "self", ",", "context", ",", "resp_args", ",", "relay_state", ")", ":", "if", "\"name_id_policy\"", "in", "resp_args", "and", "resp_args", "[", "\"name_id_policy\"", "]", "is", "not", "None", ":", "resp_args", "[", "\"name_id_policy\"", "]", "=", "resp_args", "[", "\"name_id_policy\"", "]", ".", "to_string", "(", ")", ".", "decode", "(", "\"utf-8\"", ")", "return", "{", "\"resp_args\"", ":", "resp_args", ",", "\"relay_state\"", ":", "relay_state", "}" ]
Returns a dict containing the state needed in the response flow. :type context: satosa.context.Context :type resp_args: dict[str, str | saml2.samlp.NameIDPolicy] :type relay_state: str :rtype: dict[str, dict[str, str] | str] :param context: The current context :param resp_args: Response arguments :param relay_state: Request relay state :return: A state as a dict
[ "Returns", "a", "dict", "containing", "the", "state", "needed", "in", "the", "response", "flow", "." ]
49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb
https://github.com/IdentityPython/SATOSA/blob/49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb/src/satosa/frontends/saml2.py#L125-L141
236,991
IdentityPython/SATOSA
src/satosa/frontends/saml2.py
SAMLFrontend._handle_authn_request
def _handle_authn_request(self, context, binding_in, idp): """ See doc for handle_authn_request method. :type context: satosa.context.Context :type binding_in: str :type idp: saml.server.Server :rtype: satosa.response.Response :param context: The current context :param binding_in: The pysaml binding type :param idp: The saml frontend idp server :return: response """ req_info = idp.parse_authn_request(context.request["SAMLRequest"], binding_in) authn_req = req_info.message satosa_logging(logger, logging.DEBUG, "%s" % authn_req, context.state) try: resp_args = idp.response_args(authn_req) except SAMLError as e: satosa_logging(logger, logging.ERROR, "Could not find necessary info about entity: %s" % e, context.state) return ServiceError("Incorrect request from requester: %s" % e) requester = resp_args["sp_entity_id"] context.state[self.name] = self._create_state_data(context, idp.response_args(authn_req), context.request.get("RelayState")) subject = authn_req.subject name_id_value = subject.name_id.text if subject else None nameid_formats = { "from_policy": authn_req.name_id_policy and authn_req.name_id_policy.format, "from_response": subject and subject.name_id and subject.name_id.format, "from_metadata": ( idp.metadata[requester] .get("spsso_descriptor", [{}])[0] .get("name_id_format", [{}])[0] .get("text") ), "default": NAMEID_FORMAT_TRANSIENT, } name_id_format = ( nameid_formats["from_policy"] or ( nameid_formats["from_response"] != NAMEID_FORMAT_UNSPECIFIED and nameid_formats["from_response"] ) or nameid_formats["from_metadata"] or nameid_formats["from_response"] or nameid_formats["default"] ) requester_name = self._get_sp_display_name(idp, requester) internal_req = InternalData( subject_id=name_id_value, subject_type=name_id_format, requester=requester, requester_name=requester_name, ) idp_policy = idp.config.getattr("policy", "idp") if idp_policy: internal_req.attributes = self._get_approved_attributes( idp, idp_policy, requester, context.state ) return self.auth_req_callback_func(context, internal_req)
python
def _handle_authn_request(self, context, binding_in, idp): """ See doc for handle_authn_request method. :type context: satosa.context.Context :type binding_in: str :type idp: saml.server.Server :rtype: satosa.response.Response :param context: The current context :param binding_in: The pysaml binding type :param idp: The saml frontend idp server :return: response """ req_info = idp.parse_authn_request(context.request["SAMLRequest"], binding_in) authn_req = req_info.message satosa_logging(logger, logging.DEBUG, "%s" % authn_req, context.state) try: resp_args = idp.response_args(authn_req) except SAMLError as e: satosa_logging(logger, logging.ERROR, "Could not find necessary info about entity: %s" % e, context.state) return ServiceError("Incorrect request from requester: %s" % e) requester = resp_args["sp_entity_id"] context.state[self.name] = self._create_state_data(context, idp.response_args(authn_req), context.request.get("RelayState")) subject = authn_req.subject name_id_value = subject.name_id.text if subject else None nameid_formats = { "from_policy": authn_req.name_id_policy and authn_req.name_id_policy.format, "from_response": subject and subject.name_id and subject.name_id.format, "from_metadata": ( idp.metadata[requester] .get("spsso_descriptor", [{}])[0] .get("name_id_format", [{}])[0] .get("text") ), "default": NAMEID_FORMAT_TRANSIENT, } name_id_format = ( nameid_formats["from_policy"] or ( nameid_formats["from_response"] != NAMEID_FORMAT_UNSPECIFIED and nameid_formats["from_response"] ) or nameid_formats["from_metadata"] or nameid_formats["from_response"] or nameid_formats["default"] ) requester_name = self._get_sp_display_name(idp, requester) internal_req = InternalData( subject_id=name_id_value, subject_type=name_id_format, requester=requester, requester_name=requester_name, ) idp_policy = idp.config.getattr("policy", "idp") if idp_policy: internal_req.attributes = self._get_approved_attributes( idp, idp_policy, requester, context.state ) return self.auth_req_callback_func(context, internal_req)
[ "def", "_handle_authn_request", "(", "self", ",", "context", ",", "binding_in", ",", "idp", ")", ":", "req_info", "=", "idp", ".", "parse_authn_request", "(", "context", ".", "request", "[", "\"SAMLRequest\"", "]", ",", "binding_in", ")", "authn_req", "=", "req_info", ".", "message", "satosa_logging", "(", "logger", ",", "logging", ".", "DEBUG", ",", "\"%s\"", "%", "authn_req", ",", "context", ".", "state", ")", "try", ":", "resp_args", "=", "idp", ".", "response_args", "(", "authn_req", ")", "except", "SAMLError", "as", "e", ":", "satosa_logging", "(", "logger", ",", "logging", ".", "ERROR", ",", "\"Could not find necessary info about entity: %s\"", "%", "e", ",", "context", ".", "state", ")", "return", "ServiceError", "(", "\"Incorrect request from requester: %s\"", "%", "e", ")", "requester", "=", "resp_args", "[", "\"sp_entity_id\"", "]", "context", ".", "state", "[", "self", ".", "name", "]", "=", "self", ".", "_create_state_data", "(", "context", ",", "idp", ".", "response_args", "(", "authn_req", ")", ",", "context", ".", "request", ".", "get", "(", "\"RelayState\"", ")", ")", "subject", "=", "authn_req", ".", "subject", "name_id_value", "=", "subject", ".", "name_id", ".", "text", "if", "subject", "else", "None", "nameid_formats", "=", "{", "\"from_policy\"", ":", "authn_req", ".", "name_id_policy", "and", "authn_req", ".", "name_id_policy", ".", "format", ",", "\"from_response\"", ":", "subject", "and", "subject", ".", "name_id", "and", "subject", ".", "name_id", ".", "format", ",", "\"from_metadata\"", ":", "(", "idp", ".", "metadata", "[", "requester", "]", ".", "get", "(", "\"spsso_descriptor\"", ",", "[", "{", "}", "]", ")", "[", "0", "]", ".", "get", "(", "\"name_id_format\"", ",", "[", "{", "}", "]", ")", "[", "0", "]", ".", "get", "(", "\"text\"", ")", ")", ",", "\"default\"", ":", "NAMEID_FORMAT_TRANSIENT", ",", "}", "name_id_format", "=", "(", "nameid_formats", "[", "\"from_policy\"", "]", "or", "(", "nameid_formats", "[", "\"from_response\"", "]", "!=", "NAMEID_FORMAT_UNSPECIFIED", "and", "nameid_formats", "[", "\"from_response\"", "]", ")", "or", "nameid_formats", "[", "\"from_metadata\"", "]", "or", "nameid_formats", "[", "\"from_response\"", "]", "or", "nameid_formats", "[", "\"default\"", "]", ")", "requester_name", "=", "self", ".", "_get_sp_display_name", "(", "idp", ",", "requester", ")", "internal_req", "=", "InternalData", "(", "subject_id", "=", "name_id_value", ",", "subject_type", "=", "name_id_format", ",", "requester", "=", "requester", ",", "requester_name", "=", "requester_name", ",", ")", "idp_policy", "=", "idp", ".", "config", ".", "getattr", "(", "\"policy\"", ",", "\"idp\"", ")", "if", "idp_policy", ":", "internal_req", ".", "attributes", "=", "self", ".", "_get_approved_attributes", "(", "idp", ",", "idp_policy", ",", "requester", ",", "context", ".", "state", ")", "return", "self", ".", "auth_req_callback_func", "(", "context", ",", "internal_req", ")" ]
See doc for handle_authn_request method. :type context: satosa.context.Context :type binding_in: str :type idp: saml.server.Server :rtype: satosa.response.Response :param context: The current context :param binding_in: The pysaml binding type :param idp: The saml frontend idp server :return: response
[ "See", "doc", "for", "handle_authn_request", "method", "." ]
49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb
https://github.com/IdentityPython/SATOSA/blob/49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb/src/satosa/frontends/saml2.py#L177-L245
236,992
IdentityPython/SATOSA
src/satosa/frontends/saml2.py
SAMLFrontend._get_approved_attributes
def _get_approved_attributes(self, idp, idp_policy, sp_entity_id, state): """ Returns a list of approved attributes :type idp: saml.server.Server :type idp_policy: saml2.assertion.Policy :type sp_entity_id: str :type state: satosa.state.State :rtype: list[str] :param idp: The saml frontend idp server :param idp_policy: The idp policy :param sp_entity_id: The requesting sp entity id :param state: The current state :return: A list containing approved attributes """ name_format = idp_policy.get_name_form(sp_entity_id) attrconvs = idp.config.attribute_converters idp_policy.acs = attrconvs attribute_filter = [] for aconv in attrconvs: if aconv.name_format == name_format: all_attributes = {v: None for v in aconv._fro.values()} attribute_filter = list(idp_policy.restrict(all_attributes, sp_entity_id, idp.metadata).keys()) break attribute_filter = self.converter.to_internal_filter(self.attribute_profile, attribute_filter) satosa_logging(logger, logging.DEBUG, "Filter: %s" % attribute_filter, state) return attribute_filter
python
def _get_approved_attributes(self, idp, idp_policy, sp_entity_id, state): """ Returns a list of approved attributes :type idp: saml.server.Server :type idp_policy: saml2.assertion.Policy :type sp_entity_id: str :type state: satosa.state.State :rtype: list[str] :param idp: The saml frontend idp server :param idp_policy: The idp policy :param sp_entity_id: The requesting sp entity id :param state: The current state :return: A list containing approved attributes """ name_format = idp_policy.get_name_form(sp_entity_id) attrconvs = idp.config.attribute_converters idp_policy.acs = attrconvs attribute_filter = [] for aconv in attrconvs: if aconv.name_format == name_format: all_attributes = {v: None for v in aconv._fro.values()} attribute_filter = list(idp_policy.restrict(all_attributes, sp_entity_id, idp.metadata).keys()) break attribute_filter = self.converter.to_internal_filter(self.attribute_profile, attribute_filter) satosa_logging(logger, logging.DEBUG, "Filter: %s" % attribute_filter, state) return attribute_filter
[ "def", "_get_approved_attributes", "(", "self", ",", "idp", ",", "idp_policy", ",", "sp_entity_id", ",", "state", ")", ":", "name_format", "=", "idp_policy", ".", "get_name_form", "(", "sp_entity_id", ")", "attrconvs", "=", "idp", ".", "config", ".", "attribute_converters", "idp_policy", ".", "acs", "=", "attrconvs", "attribute_filter", "=", "[", "]", "for", "aconv", "in", "attrconvs", ":", "if", "aconv", ".", "name_format", "==", "name_format", ":", "all_attributes", "=", "{", "v", ":", "None", "for", "v", "in", "aconv", ".", "_fro", ".", "values", "(", ")", "}", "attribute_filter", "=", "list", "(", "idp_policy", ".", "restrict", "(", "all_attributes", ",", "sp_entity_id", ",", "idp", ".", "metadata", ")", ".", "keys", "(", ")", ")", "break", "attribute_filter", "=", "self", ".", "converter", ".", "to_internal_filter", "(", "self", ".", "attribute_profile", ",", "attribute_filter", ")", "satosa_logging", "(", "logger", ",", "logging", ".", "DEBUG", ",", "\"Filter: %s\"", "%", "attribute_filter", ",", "state", ")", "return", "attribute_filter" ]
Returns a list of approved attributes :type idp: saml.server.Server :type idp_policy: saml2.assertion.Policy :type sp_entity_id: str :type state: satosa.state.State :rtype: list[str] :param idp: The saml frontend idp server :param idp_policy: The idp policy :param sp_entity_id: The requesting sp entity id :param state: The current state :return: A list containing approved attributes
[ "Returns", "a", "list", "of", "approved", "attributes" ]
49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb
https://github.com/IdentityPython/SATOSA/blob/49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb/src/satosa/frontends/saml2.py#L247-L274
236,993
IdentityPython/SATOSA
src/satosa/frontends/saml2.py
SAMLFrontend._build_idp_config_endpoints
def _build_idp_config_endpoints(self, config, providers): """ Builds the final frontend module config :type config: dict[str, Any] :type providers: list[str] :rtype: dict[str, Any] :param config: The module config :param providers: A list of backend names :return: The final config """ # Add an endpoint to each provider idp_endpoints = [] for endp_category in self.endpoints: for func, endpoint in self.endpoints[endp_category].items(): for provider in providers: _endpoint = "{base}/{provider}/{endpoint}".format( base=self.base_url, provider=provider, endpoint=endpoint) idp_endpoints.append((_endpoint, func)) config["service"]["idp"]["endpoints"][endp_category] = idp_endpoints return config
python
def _build_idp_config_endpoints(self, config, providers): """ Builds the final frontend module config :type config: dict[str, Any] :type providers: list[str] :rtype: dict[str, Any] :param config: The module config :param providers: A list of backend names :return: The final config """ # Add an endpoint to each provider idp_endpoints = [] for endp_category in self.endpoints: for func, endpoint in self.endpoints[endp_category].items(): for provider in providers: _endpoint = "{base}/{provider}/{endpoint}".format( base=self.base_url, provider=provider, endpoint=endpoint) idp_endpoints.append((_endpoint, func)) config["service"]["idp"]["endpoints"][endp_category] = idp_endpoints return config
[ "def", "_build_idp_config_endpoints", "(", "self", ",", "config", ",", "providers", ")", ":", "# Add an endpoint to each provider", "idp_endpoints", "=", "[", "]", "for", "endp_category", "in", "self", ".", "endpoints", ":", "for", "func", ",", "endpoint", "in", "self", ".", "endpoints", "[", "endp_category", "]", ".", "items", "(", ")", ":", "for", "provider", "in", "providers", ":", "_endpoint", "=", "\"{base}/{provider}/{endpoint}\"", ".", "format", "(", "base", "=", "self", ".", "base_url", ",", "provider", "=", "provider", ",", "endpoint", "=", "endpoint", ")", "idp_endpoints", ".", "append", "(", "(", "_endpoint", ",", "func", ")", ")", "config", "[", "\"service\"", "]", "[", "\"idp\"", "]", "[", "\"endpoints\"", "]", "[", "endp_category", "]", "=", "idp_endpoints", "return", "config" ]
Builds the final frontend module config :type config: dict[str, Any] :type providers: list[str] :rtype: dict[str, Any] :param config: The module config :param providers: A list of backend names :return: The final config
[ "Builds", "the", "final", "frontend", "module", "config" ]
49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb
https://github.com/IdentityPython/SATOSA/blob/49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb/src/satosa/frontends/saml2.py#L522-L544
236,994
IdentityPython/SATOSA
src/satosa/frontends/saml2.py
SAMLMirrorFrontend._load_endpoints_to_config
def _load_endpoints_to_config(self, provider, target_entity_id, config=None): """ Loads approved endpoints to the config. :type url_base: str :type provider: str :type target_entity_id: str :rtype: dict[str, Any] :param url_base: The proxy base url :param provider: target backend name :param target_entity_id: frontend target entity id :return: IDP config with endpoints """ idp_conf = copy.deepcopy(config or self.idp_config) for service, endpoint in self.endpoints.items(): idp_endpoints = [] for binding, path in endpoint.items(): url = "{base}/{provider}/{target_id}/{path}".format( base=self.base_url, provider=provider, target_id=target_entity_id, path=path) idp_endpoints.append((url, binding)) idp_conf["service"]["idp"]["endpoints"][service] = idp_endpoints return idp_conf
python
def _load_endpoints_to_config(self, provider, target_entity_id, config=None): """ Loads approved endpoints to the config. :type url_base: str :type provider: str :type target_entity_id: str :rtype: dict[str, Any] :param url_base: The proxy base url :param provider: target backend name :param target_entity_id: frontend target entity id :return: IDP config with endpoints """ idp_conf = copy.deepcopy(config or self.idp_config) for service, endpoint in self.endpoints.items(): idp_endpoints = [] for binding, path in endpoint.items(): url = "{base}/{provider}/{target_id}/{path}".format( base=self.base_url, provider=provider, target_id=target_entity_id, path=path) idp_endpoints.append((url, binding)) idp_conf["service"]["idp"]["endpoints"][service] = idp_endpoints return idp_conf
[ "def", "_load_endpoints_to_config", "(", "self", ",", "provider", ",", "target_entity_id", ",", "config", "=", "None", ")", ":", "idp_conf", "=", "copy", ".", "deepcopy", "(", "config", "or", "self", ".", "idp_config", ")", "for", "service", ",", "endpoint", "in", "self", ".", "endpoints", ".", "items", "(", ")", ":", "idp_endpoints", "=", "[", "]", "for", "binding", ",", "path", "in", "endpoint", ".", "items", "(", ")", ":", "url", "=", "\"{base}/{provider}/{target_id}/{path}\"", ".", "format", "(", "base", "=", "self", ".", "base_url", ",", "provider", "=", "provider", ",", "target_id", "=", "target_entity_id", ",", "path", "=", "path", ")", "idp_endpoints", ".", "append", "(", "(", "url", ",", "binding", ")", ")", "idp_conf", "[", "\"service\"", "]", "[", "\"idp\"", "]", "[", "\"endpoints\"", "]", "[", "service", "]", "=", "idp_endpoints", "return", "idp_conf" ]
Loads approved endpoints to the config. :type url_base: str :type provider: str :type target_entity_id: str :rtype: dict[str, Any] :param url_base: The proxy base url :param provider: target backend name :param target_entity_id: frontend target entity id :return: IDP config with endpoints
[ "Loads", "approved", "endpoints", "to", "the", "config", "." ]
49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb
https://github.com/IdentityPython/SATOSA/blob/49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb/src/satosa/frontends/saml2.py#L564-L587
236,995
IdentityPython/SATOSA
src/satosa/frontends/saml2.py
SAMLMirrorFrontend._load_idp_dynamic_entity_id
def _load_idp_dynamic_entity_id(self, state): """ Loads an idp server with the entity id saved in state :type state: satosa.state.State :rtype: saml.server.Server :param state: The current state :return: An idp server """ # Change the idp entity id dynamically idp_config_file = copy.deepcopy(self.idp_config) idp_config_file["entityid"] = "{}/{}".format(self.idp_config["entityid"], state[self.name]["target_entity_id"]) idp_config = IdPConfig().load(idp_config_file, metadata_construction=False) return Server(config=idp_config)
python
def _load_idp_dynamic_entity_id(self, state): """ Loads an idp server with the entity id saved in state :type state: satosa.state.State :rtype: saml.server.Server :param state: The current state :return: An idp server """ # Change the idp entity id dynamically idp_config_file = copy.deepcopy(self.idp_config) idp_config_file["entityid"] = "{}/{}".format(self.idp_config["entityid"], state[self.name]["target_entity_id"]) idp_config = IdPConfig().load(idp_config_file, metadata_construction=False) return Server(config=idp_config)
[ "def", "_load_idp_dynamic_entity_id", "(", "self", ",", "state", ")", ":", "# Change the idp entity id dynamically", "idp_config_file", "=", "copy", ".", "deepcopy", "(", "self", ".", "idp_config", ")", "idp_config_file", "[", "\"entityid\"", "]", "=", "\"{}/{}\"", ".", "format", "(", "self", ".", "idp_config", "[", "\"entityid\"", "]", ",", "state", "[", "self", ".", "name", "]", "[", "\"target_entity_id\"", "]", ")", "idp_config", "=", "IdPConfig", "(", ")", ".", "load", "(", "idp_config_file", ",", "metadata_construction", "=", "False", ")", "return", "Server", "(", "config", "=", "idp_config", ")" ]
Loads an idp server with the entity id saved in state :type state: satosa.state.State :rtype: saml.server.Server :param state: The current state :return: An idp server
[ "Loads", "an", "idp", "server", "with", "the", "entity", "id", "saved", "in", "state" ]
49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb
https://github.com/IdentityPython/SATOSA/blob/49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb/src/satosa/frontends/saml2.py#L605-L619
236,996
IdentityPython/SATOSA
src/satosa/frontends/saml2.py
SAMLVirtualCoFrontend._get_co_name_from_path
def _get_co_name_from_path(self, context): """ The CO name is URL encoded and obtained from the request path for a request coming into one of the standard binding endpoints. For example the HTTP-Redirect binding request path will have the format {base}/{backend}/{co_name}/sso/redirect :type context: satosa.context.Context :rtype: str :param context: """ url_encoded_co_name = context.path.split("/")[1] co_name = unquote_plus(url_encoded_co_name) return co_name
python
def _get_co_name_from_path(self, context): """ The CO name is URL encoded and obtained from the request path for a request coming into one of the standard binding endpoints. For example the HTTP-Redirect binding request path will have the format {base}/{backend}/{co_name}/sso/redirect :type context: satosa.context.Context :rtype: str :param context: """ url_encoded_co_name = context.path.split("/")[1] co_name = unquote_plus(url_encoded_co_name) return co_name
[ "def", "_get_co_name_from_path", "(", "self", ",", "context", ")", ":", "url_encoded_co_name", "=", "context", ".", "path", ".", "split", "(", "\"/\"", ")", "[", "1", "]", "co_name", "=", "unquote_plus", "(", "url_encoded_co_name", ")", "return", "co_name" ]
The CO name is URL encoded and obtained from the request path for a request coming into one of the standard binding endpoints. For example the HTTP-Redirect binding request path will have the format {base}/{backend}/{co_name}/sso/redirect :type context: satosa.context.Context :rtype: str :param context:
[ "The", "CO", "name", "is", "URL", "encoded", "and", "obtained", "from", "the", "request", "path", "for", "a", "request", "coming", "into", "one", "of", "the", "standard", "binding", "endpoints", ".", "For", "example", "the", "HTTP", "-", "Redirect", "binding", "request", "path", "will", "have", "the", "format" ]
49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb
https://github.com/IdentityPython/SATOSA/blob/49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb/src/satosa/frontends/saml2.py#L750-L768
236,997
IdentityPython/SATOSA
src/satosa/frontends/saml2.py
SAMLVirtualCoFrontend._get_co_name
def _get_co_name(self, context): """ Obtain the CO name previously saved in the request state, or if not set use the request path obtained from the current context to determine the target CO. :type context: The current context :rtype: string :param context: The current context :return: CO name """ try: co_name = context.state[self.name][self.KEY_CO_NAME] logger.debug("Found CO {} from state".format(co_name)) except KeyError: co_name = self._get_co_name_from_path(context) logger.debug("Found CO {} from request path".format(co_name)) return co_name
python
def _get_co_name(self, context): """ Obtain the CO name previously saved in the request state, or if not set use the request path obtained from the current context to determine the target CO. :type context: The current context :rtype: string :param context: The current context :return: CO name """ try: co_name = context.state[self.name][self.KEY_CO_NAME] logger.debug("Found CO {} from state".format(co_name)) except KeyError: co_name = self._get_co_name_from_path(context) logger.debug("Found CO {} from request path".format(co_name)) return co_name
[ "def", "_get_co_name", "(", "self", ",", "context", ")", ":", "try", ":", "co_name", "=", "context", ".", "state", "[", "self", ".", "name", "]", "[", "self", ".", "KEY_CO_NAME", "]", "logger", ".", "debug", "(", "\"Found CO {} from state\"", ".", "format", "(", "co_name", ")", ")", "except", "KeyError", ":", "co_name", "=", "self", ".", "_get_co_name_from_path", "(", "context", ")", "logger", ".", "debug", "(", "\"Found CO {} from request path\"", ".", "format", "(", "co_name", ")", ")", "return", "co_name" ]
Obtain the CO name previously saved in the request state, or if not set use the request path obtained from the current context to determine the target CO. :type context: The current context :rtype: string :param context: The current context :return: CO name
[ "Obtain", "the", "CO", "name", "previously", "saved", "in", "the", "request", "state", "or", "if", "not", "set", "use", "the", "request", "path", "obtained", "from", "the", "current", "context", "to", "determine", "the", "target", "CO", "." ]
49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb
https://github.com/IdentityPython/SATOSA/blob/49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb/src/satosa/frontends/saml2.py#L770-L789
236,998
IdentityPython/SATOSA
src/satosa/frontends/saml2.py
SAMLVirtualCoFrontend._add_endpoints_to_config
def _add_endpoints_to_config(self, config, co_name, backend_name): """ Use the request path from the context to determine the target backend, then construct mappings from bindings to endpoints for the virtual IdP for the CO. The endpoint URLs have the form {base}/{backend}/{co_name}/{path} :type config: satosa.satosa_config.SATOSAConfig :type co_name: str :type backend_name: str :rtype: satosa.satosa_config.SATOSAConfig :param config: satosa proxy config :param co_name: CO name :param backend_name: The target backend name :return: config with mappings for CO IdP """ for service, endpoint in self.endpoints.items(): idp_endpoints = [] for binding, path in endpoint.items(): url = "{base}/{backend}/{co_name}/{path}".format( base=self.base_url, backend=backend_name, co_name=quote_plus(co_name), path=path) mapping = (url, binding) idp_endpoints.append(mapping) # Overwrite the IdP config with the CO specific mappings between # SAML binding and URL endpoints. config["service"]["idp"]["endpoints"][service] = idp_endpoints return config
python
def _add_endpoints_to_config(self, config, co_name, backend_name): """ Use the request path from the context to determine the target backend, then construct mappings from bindings to endpoints for the virtual IdP for the CO. The endpoint URLs have the form {base}/{backend}/{co_name}/{path} :type config: satosa.satosa_config.SATOSAConfig :type co_name: str :type backend_name: str :rtype: satosa.satosa_config.SATOSAConfig :param config: satosa proxy config :param co_name: CO name :param backend_name: The target backend name :return: config with mappings for CO IdP """ for service, endpoint in self.endpoints.items(): idp_endpoints = [] for binding, path in endpoint.items(): url = "{base}/{backend}/{co_name}/{path}".format( base=self.base_url, backend=backend_name, co_name=quote_plus(co_name), path=path) mapping = (url, binding) idp_endpoints.append(mapping) # Overwrite the IdP config with the CO specific mappings between # SAML binding and URL endpoints. config["service"]["idp"]["endpoints"][service] = idp_endpoints return config
[ "def", "_add_endpoints_to_config", "(", "self", ",", "config", ",", "co_name", ",", "backend_name", ")", ":", "for", "service", ",", "endpoint", "in", "self", ".", "endpoints", ".", "items", "(", ")", ":", "idp_endpoints", "=", "[", "]", "for", "binding", ",", "path", "in", "endpoint", ".", "items", "(", ")", ":", "url", "=", "\"{base}/{backend}/{co_name}/{path}\"", ".", "format", "(", "base", "=", "self", ".", "base_url", ",", "backend", "=", "backend_name", ",", "co_name", "=", "quote_plus", "(", "co_name", ")", ",", "path", "=", "path", ")", "mapping", "=", "(", "url", ",", "binding", ")", "idp_endpoints", ".", "append", "(", "mapping", ")", "# Overwrite the IdP config with the CO specific mappings between", "# SAML binding and URL endpoints.", "config", "[", "\"service\"", "]", "[", "\"idp\"", "]", "[", "\"endpoints\"", "]", "[", "service", "]", "=", "idp_endpoints", "return", "config" ]
Use the request path from the context to determine the target backend, then construct mappings from bindings to endpoints for the virtual IdP for the CO. The endpoint URLs have the form {base}/{backend}/{co_name}/{path} :type config: satosa.satosa_config.SATOSAConfig :type co_name: str :type backend_name: str :rtype: satosa.satosa_config.SATOSAConfig :param config: satosa proxy config :param co_name: CO name :param backend_name: The target backend name :return: config with mappings for CO IdP
[ "Use", "the", "request", "path", "from", "the", "context", "to", "determine", "the", "target", "backend", "then", "construct", "mappings", "from", "bindings", "to", "endpoints", "for", "the", "virtual", "IdP", "for", "the", "CO", "." ]
49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb
https://github.com/IdentityPython/SATOSA/blob/49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb/src/satosa/frontends/saml2.py#L791-L828
236,999
IdentityPython/SATOSA
src/satosa/frontends/saml2.py
SAMLVirtualCoFrontend._add_entity_id
def _add_entity_id(self, config, co_name): """ Use the CO name to construct the entity ID for the virtual IdP for the CO. The entity ID has the form {base_entity_id}/{co_name} :type config: satosa.satosa_config.SATOSAConfig :type co_name: str :rtype: satosa.satosa_config.SATOSAConfig :param config: satosa proxy config :param co_name: CO name :return: config with updated entity ID """ base_entity_id = config['entityid'] co_entity_id = "{}/{}".format(base_entity_id, quote_plus(co_name)) config['entityid'] = co_entity_id return config
python
def _add_entity_id(self, config, co_name): """ Use the CO name to construct the entity ID for the virtual IdP for the CO. The entity ID has the form {base_entity_id}/{co_name} :type config: satosa.satosa_config.SATOSAConfig :type co_name: str :rtype: satosa.satosa_config.SATOSAConfig :param config: satosa proxy config :param co_name: CO name :return: config with updated entity ID """ base_entity_id = config['entityid'] co_entity_id = "{}/{}".format(base_entity_id, quote_plus(co_name)) config['entityid'] = co_entity_id return config
[ "def", "_add_entity_id", "(", "self", ",", "config", ",", "co_name", ")", ":", "base_entity_id", "=", "config", "[", "'entityid'", "]", "co_entity_id", "=", "\"{}/{}\"", ".", "format", "(", "base_entity_id", ",", "quote_plus", "(", "co_name", ")", ")", "config", "[", "'entityid'", "]", "=", "co_entity_id", "return", "config" ]
Use the CO name to construct the entity ID for the virtual IdP for the CO. The entity ID has the form {base_entity_id}/{co_name} :type config: satosa.satosa_config.SATOSAConfig :type co_name: str :rtype: satosa.satosa_config.SATOSAConfig :param config: satosa proxy config :param co_name: CO name :return: config with updated entity ID
[ "Use", "the", "CO", "name", "to", "construct", "the", "entity", "ID", "for", "the", "virtual", "IdP", "for", "the", "CO", "." ]
49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb
https://github.com/IdentityPython/SATOSA/blob/49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb/src/satosa/frontends/saml2.py#L830-L852