_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q10900
_construct_new_key
train
def _construct_new_key(name, units=None): """Construct an MDF safe key from the name and units""" to_replace = ["/", "\\", "*", "^", "#", " ", "\n", "\t", ",", ".", ")", "(", "'", "`", "-"] to_remove = ["$", "{", "}"] cat = name if units: cat = "_".join([name, units]) for c in to_replace: cat = cat.replace(c, "_") for c in to_remove: cat = cat.replace(c, "") cat = re.sub('_+','_', cat) return cat
python
{ "resource": "" }
q10901
_extract_key_value
train
def _extract_key_value(obj): """Extract the value from the object and make a descriptive key""" key = None; value = None # Parse a Value object, which includes Properties if isinstance(obj, Value): key = _construct_new_key(obj.name, obj.units) value = [] if obj.scalars: value = [(val.value if isinstance(val, Scalar) else val) for val in obj.scalars] elif obj.vectors and len(obj.vectors) == 1: value = [(val.value if isinstance(val, Scalar) else val) for val in obj.vectors[0]] if len(value) == 1: value = value[0] elif len(value) == 0: value = None # If there is a process step, pul out its name as the value # TODO: resolve duplicates if isinstance(obj, ProcessStep): key = "Processing" value = obj.name return key, value
python
{ "resource": "" }
q10902
process_input_buffer
train
def process_input_buffer(): """Send the content of the input buffer to all remote processes, this must be called in the main thread""" from polysh.control_commands_helpers import handle_control_command data = the_stdin_thread.input_buffer.get() remote_dispatcher.log(b'> ' + data) if data.startswith(b':'): try: handle_control_command(data[1:-1].decode()) except UnicodeDecodeError as e: console_output(b'Could not decode command.') return if data.startswith(b'!'): try: retcode = subprocess.call(data[1:], shell=True) except OSError as e: if e.errno == errno.EINTR: console_output(b'Child was interrupted\n') retcode = 0 else: raise if retcode > 128 and retcode <= 192: retcode = 128 - retcode if retcode > 0: console_output('Child returned {:d}\n'.format(retcode).encode()) elif retcode < 0: console_output('Child was terminated by signal {:d}\n'.format( -retcode).encode()) return for r in dispatchers.all_instances(): try: r.dispatch_command(data) except asyncore.ExitNow as e: raise e except Exception as msg: raise msg console_output('{} for {}, disconnecting\n'.format( str(msg), r.display_name).encode()) r.disconnect() else: if r.enabled and r.state is remote_dispatcher.STATE_IDLE: r.change_state(remote_dispatcher.STATE_RUNNING)
python
{ "resource": "" }
q10903
write_main_socket
train
def write_main_socket(c): """Synchronous write to the main socket, wait for ACK""" the_stdin_thread.socket_write.send(c) while True: try: the_stdin_thread.socket_write.recv(1) except socket.error as e: if e.errno != errno.EINTR: raise else: break
python
{ "resource": "" }
q10904
get_stdin_pid
train
def get_stdin_pid(cached_result=None): """Try to get the PID of the stdin thread, otherwise get the whole process ID""" if cached_result is None: try: tasks = os.listdir('/proc/self/task') except OSError as e: if e.errno != errno.ENOENT: raise cached_result = os.getpid() else: tasks.remove(str(os.getpid())) assert len(tasks) == 1 cached_result = int(tasks[0]) return cached_result
python
{ "resource": "" }
q10905
InputBuffer.add
train
def add(self, data): """Add data to the buffer""" assert isinstance(data, bytes) with self.lock: self.buf += data
python
{ "resource": "" }
q10906
InputBuffer.get
train
def get(self): """Get the content of the buffer""" data = b'' with self.lock: data, self.buf = self.buf, b'' return data
python
{ "resource": "" }
q10907
SocketNotificationReader.handle_read
train
def handle_read(self): """Handle all the available character commands in the socket""" while True: try: c = self.recv(1) except socket.error as e: if e.errno == errno.EWOULDBLOCK: return else: raise else: self._do(c) self.socket.setblocking(True) self.send(b'A') self.socket.setblocking(False)
python
{ "resource": "" }
q10908
knuth_sum
train
def knuth_sum(a, b): """Error-free transformation of the sum of two floating point numbers according to D.E. Knuth. The Art of Computer Programming: Seminumerical Algorithms, volume 2. Addison Wesley, Reading, Massachusetts, second edition, 1981. The underlying problem is that the exact sum a+b of two floating point number a and b is not necessarily a floating point number; for example if you add a very large and a very small number. It is however known that the difference between the best floating point approximation of a+b and the exact a+b is again a floating point number. This routine returns the sum and the error. Algorithm 3.1 in <https://doi.org/10.1137/030601818>. """ x = a + b z = x - a y = (a - (x - z)) + (b - z) return x, y
python
{ "resource": "" }
q10909
distill
train
def distill(p, K): """Algorithm 4.3. Error-free vector transformation for summation. The vector p is transformed without changing the sum, and p_n is replaced by float(sum(p)). Kahan [21] calls this a 'distillation algorithm.' """ q = p.reshape(p.shape[0], -1) for _ in range(K): _accupy.distill(q) return q.reshape(p.shape)
python
{ "resource": "" }
q10910
toggle_shells
train
def toggle_shells(command, enable): """Enable or disable the specified shells. If the command would have no effect, it changes all other shells to the inverse enable value.""" selection = list(selected_shells(command)) if command and command != '*' and selection: for i in selection: if i.state != remote_dispatcher.STATE_DEAD and i.enabled != enable: break else: toggle_shells('*', not enable) for i in selection: if i.state != remote_dispatcher.STATE_DEAD: i.set_enabled(enable)
python
{ "resource": "" }
q10911
selected_shells
train
def selected_shells(command): """Iterator over the shells with names matching the patterns. An empty patterns matches all the shells""" if not command or command == '*': for i in dispatchers.all_instances(): yield i return selected = set() instance_found = False for pattern in command.split(): found = False for expanded_pattern in expand_syntax(pattern): for i in dispatchers.all_instances(): instance_found = True if fnmatch(i.display_name, expanded_pattern): found = True if i not in selected: selected.add(i) yield i if instance_found and not found: console_output('{} not found\n'.format(pattern).encode())
python
{ "resource": "" }
q10912
complete_shells
train
def complete_shells(line, text, predicate=lambda i: True): """Return the shell names to include in the completion""" res = [i.display_name + ' ' for i in dispatchers.all_instances() if i.display_name.startswith(text) and predicate(i) and ' ' + i.display_name + ' ' not in line] return res
python
{ "resource": "" }
q10913
kill_all
train
def kill_all(): """When polysh quits, we kill all the remote shells we started""" for i in dispatchers.all_instances(): try: os.kill(-i.pid, signal.SIGKILL) except OSError: # The process was already dead, no problem pass
python
{ "resource": "" }
q10914
BufferedDispatcher._handle_read_chunk
train
def _handle_read_chunk(self): """Some data can be read""" new_data = b'' buffer_length = len(self.read_buffer) try: while buffer_length < self.MAX_BUFFER_SIZE: try: piece = self.recv(4096) except OSError as e: if e.errno == errno.EAGAIN: # End of the available data break elif e.errno == errno.EIO and new_data: # Hopefully we could read an error message before the # actual termination break else: raise if not piece: # A closed connection is indicated by signaling a read # condition, and having recv() return 0. break new_data += piece buffer_length += len(piece) finally: new_data = new_data.replace(b'\r', b'\n') self.read_buffer += new_data return new_data
python
{ "resource": "" }
q10915
BufferedDispatcher.dispatch_write
train
def dispatch_write(self, buf): """Augment the buffer with stuff to write when possible""" self.write_buffer += buf if len(self.write_buffer) > self.MAX_BUFFER_SIZE: console_output('Buffer too big ({:d}) for {}\n'.format( len(self.write_buffer), str(self)).encode()) raise asyncore.ExitNow(1) return True
python
{ "resource": "" }
q10916
safe_write
train
def safe_write(buf): """We can get a SIGWINCH when printing, which will cause write to raise an EINTR. That's not a reason to stop printing.""" assert isinstance(buf, bytes) while True: try: os.write(1, buf) break except IOError as e: if e.errno != errno.EINTR: raise
python
{ "resource": "" }
q10917
console_output
train
def console_output(msg, logging_msg=None): """Use instead of print, to clear the status information before printing""" assert isinstance(msg, bytes) assert isinstance(logging_msg, bytes) or logging_msg is None from polysh import remote_dispatcher remote_dispatcher.log(logging_msg or msg) if remote_dispatcher.options.interactive: from polysh.stdin import the_stdin_thread the_stdin_thread.no_raw_input() global last_status_length if last_status_length: safe_write('\r{}\r'.format( last_status_length * ' ').encode()) last_status_length = 0 safe_write(msg)
python
{ "resource": "" }
q10918
expand_syntax
train
def expand_syntax(string): """Iterator over all the strings in the expansion of the argument""" match = syntax_pattern.search(string) if match: prefix = string[:match.start()] suffix = string[match.end():] intervals = match.group(1).split(',') for interval in intervals: interval_match = interval_pattern.match(interval) if interval_match: start = interval_match.group(1) end = (interval_match.group(2) or start).strip('-') for i in _iter_numbers(start, end): for expanded in expand_syntax(prefix + i + suffix): yield expanded else: yield string
python
{ "resource": "" }
q10919
all_instances
train
def all_instances(): """Iterator over all the remote_dispatcher instances""" return sorted([i for i in asyncore.socket_map.values() if isinstance(i, remote_dispatcher.RemoteDispatcher)], key=lambda i: i.display_name or '')
python
{ "resource": "" }
q10920
count_awaited_processes
train
def count_awaited_processes(): """Return a tuple with the number of awaited processes and the total number""" awaited = 0 total = 0 for i in all_instances(): if i.enabled: total += 1 if i.state is not remote_dispatcher.STATE_IDLE: awaited += 1 return awaited, total
python
{ "resource": "" }
q10921
all_terminated
train
def all_terminated(): """For each remote shell determine if its terminated""" instances_found = False for i in all_instances(): instances_found = True if i.state not in (remote_dispatcher.STATE_TERMINATED, remote_dispatcher.STATE_DEAD): return False return instances_found
python
{ "resource": "" }
q10922
update_terminal_size
train
def update_terminal_size(): """Propagate the terminal size to the remote shells accounting for the place taken by the longest name""" w, h = terminal_size() w = max(w - display_names.max_display_name_length - 2, min(w, 10)) # python bug http://python.org/sf/1112949 on amd64 # from ajaxterm.py bug = struct.unpack('i', struct.pack('I', termios.TIOCSWINSZ))[0] packed_size = struct.pack('HHHH', h, w, 0, 0) term_size = w, h for i in all_instances(): if i.enabled and i.term_size != term_size: i.term_size = term_size fcntl.ioctl(i.fd, bug, packed_size)
python
{ "resource": "" }
q10923
format_info
train
def format_info(info_list): """Turn a 2-dimension list of bytes into a 1-dimension list of bytes with correct spacing""" max_lengths = [] if info_list: nr_columns = len(info_list[0]) else: nr_columns = 0 for i in range(nr_columns): max_lengths.append(max([len(info[i]) for info in info_list])) flattened_info_list = [] for info_id in range(len(info_list)): info = info_list[info_id] for str_id in range(len(info) - 1): # Don't justify the last column (i.e. the last printed line) # as it can get much longer in some shells than in others orig_str = info[str_id] indent = max_lengths[str_id] - len(orig_str) info[str_id] = orig_str + indent * b' ' flattened_info_list.append(b' '.join(info) + b'\n') return flattened_info_list
python
{ "resource": "" }
q10924
generate_ill_conditioned_dot_product
train
def generate_ill_conditioned_dot_product(n, c, dps=100): """n ... length of vector c ... target condition number """ # Algorithm 6.1 from # # ACCURATE SUM AND DOT PRODUCT, # TAKESHI OGITA, SIEGFRIED M. RUMP, AND SHIN'ICHI OISHI. assert n >= 6 n2 = round(n / 2) x = numpy.zeros(n) y = numpy.zeros(n) b = math.log2(c) # vector of exponents between 0 and b/2: e = numpy.rint(numpy.random.rand(n2) * b / 2).astype(int) # make sure exponents b/2 and 0 actually occur in e # vectors x,y e[0] = round(b / 2) + 1 e[-1] = 0 # generate first half of vectors x, y rx, ry = numpy.random.rand(2, n2) x[:n2] = (2 * rx - 1) * 2 ** e y[:n2] = (2 * ry - 1) * 2 ** e def dot_exact(x, y): mp.dps = dps # convert to list first, see # <https://github.com/fredrik-johansson/mpmath/pull/385> return mp.fdot(x.tolist(), y.tolist()) # for i=n2+1:n and v=1:i, # generate x_i, y_i such that (*) x(v)’*y(v) ~ 2^e(i-n2) # generate exponents for second half e = numpy.rint(numpy.linspace(b / 2, 0, n - n2)).astype(int) rx, ry = numpy.random.rand(2, n2) for i in range(n2, n): # x_i random with generated exponent x[i] = (2 * rx[i - n2] - 1) * 2 ** e[i - n2] # y_i according to (*) y[i] = ( (2 * ry[i - n2] - 1) * 2 ** e[i - n2] - dot_exact(x[: i + 1], y[: i + 1]) ) / x[i] x, y = numpy.random.permutation((x, y)) # the true dot product rounded to nearest floating point d = dot_exact(x, y) # the actual condition number C = 2 * dot_exact(abs(x), abs(y)) / abs(d) return x, y, d, C
python
{ "resource": "" }
q10925
RemoteDispatcher.launch_ssh
train
def launch_ssh(self, name, port): """Launch the ssh command in the child process""" if options.user: name = '%s@%s' % (options.user, name) evaluated = options.ssh % {'host': name, 'port': port} if evaluated == options.ssh: evaluated = '%s %s' % (evaluated, name) os.execlp('/bin/sh', 'sh', '-c', evaluated)
python
{ "resource": "" }
q10926
RemoteDispatcher.change_state
train
def change_state(self, state): """Change the state of the remote process, logging the change""" if state is not self.state: if self.debug: self.print_debug(b'state => ' + STATE_NAMES[state].encode()) if self.state is STATE_NOT_STARTED: self.read_in_state_not_started = b'' self.state = state
python
{ "resource": "" }
q10927
RemoteDispatcher.disconnect
train
def disconnect(self): """We are no more interested in this remote process""" try: os.kill(-self.pid, signal.SIGKILL) except OSError: # The process was already dead, no problem pass self.read_buffer = b'' self.write_buffer = b'' self.set_enabled(False) if self.read_in_state_not_started: self.print_lines(self.read_in_state_not_started) self.read_in_state_not_started = b'' if options.abort_error and self.state is STATE_NOT_STARTED: raise asyncore.ExitNow(1) self.change_state(STATE_DEAD)
python
{ "resource": "" }
q10928
RemoteDispatcher.configure_tty
train
def configure_tty(self): """We don't want \n to be replaced with \r\n, and we disable the echo""" attr = termios.tcgetattr(self.fd) attr[1] &= ~termios.ONLCR # oflag attr[3] &= ~termios.ECHO # lflag termios.tcsetattr(self.fd, termios.TCSANOW, attr) # unsetopt zle prevents Zsh from resetting the tty return b'unsetopt zle 2> /dev/null;stty -echo -onlcr -ctlecho;'
python
{ "resource": "" }
q10929
RemoteDispatcher.set_prompt
train
def set_prompt(self): """The prompt is important because we detect the readyness of a process by waiting for its prompt.""" # No right prompt command_line = b'PS2=;RPS1=;RPROMPT=;' command_line += b'PROMPT_COMMAND=;' command_line += b'TERM=ansi;' command_line += b'unset HISTFILE;' prompt1, prompt2 = callbacks.add(b'prompt', self.seen_prompt_cb, True) command_line += b'PS1="' + prompt1 + b'""' + prompt2 + b'\n"\n' return command_line
python
{ "resource": "" }
q10930
RemoteDispatcher.handle_read_fast_case
train
def handle_read_fast_case(self, data): """If we are in a fast case we'll avoid the long processing of each line""" if self.state is not STATE_RUNNING or callbacks.any_in(data): # Slow case :-( return False last_nl = data.rfind(b'\n') if last_nl == -1: # No '\n' in data => slow case return False self.read_buffer = data[last_nl + 1:] self.print_lines(data[:last_nl]) return True
python
{ "resource": "" }
q10931
RemoteDispatcher.handle_read
train
def handle_read(self): """We got some output from a remote shell, this is one of the state machine""" if self.state == STATE_DEAD: return global nr_handle_read nr_handle_read += 1 new_data = self._handle_read_chunk() if self.debug: self.print_debug(b'==> ' + new_data) if self.handle_read_fast_case(self.read_buffer): return lf_pos = new_data.find(b'\n') if lf_pos >= 0: # Optimization: we knew there were no '\n' in the previous read # buffer, so we searched only in the new_data and we offset the # found index by the length of the previous buffer lf_pos += len(self.read_buffer) - len(new_data) elif self.state is STATE_NOT_STARTED and \ options.password is not None and \ b'password:' in self.read_buffer.lower(): self.dispatch_write('{}\n'.format(options.password).encode()) self.read_buffer = b'' return while lf_pos >= 0: # For each line in the buffer line = self.read_buffer[:lf_pos + 1] if callbacks.process(line): pass elif self.state in (STATE_IDLE, STATE_RUNNING): self.print_lines(line) elif self.state is STATE_NOT_STARTED: self.read_in_state_not_started += line if b'The authenticity of host' in line: msg = line.strip(b'\n') + b' Closing connection.' self.disconnect() elif b'REMOTE HOST IDENTIFICATION HAS CHANGED' in line: msg = b'Remote host identification has changed.' else: msg = None if msg: self.print_lines(msg + b' Consider manually connecting or ' b'using ssh-keyscan.') # Go to the next line in the buffer self.read_buffer = self.read_buffer[lf_pos + 1:] if self.handle_read_fast_case(self.read_buffer): return lf_pos = self.read_buffer.find(b'\n') if self.state is STATE_NOT_STARTED and not self.init_string_sent: self.dispatch_write(self.init_string) self.init_string_sent = True
python
{ "resource": "" }
q10932
RemoteDispatcher.print_unfinished_line
train
def print_unfinished_line(self): """The unfinished line stayed long enough in the buffer to be printed""" if self.state is STATE_RUNNING: if not callbacks.process(self.read_buffer): self.print_lines(self.read_buffer) self.read_buffer = b''
python
{ "resource": "" }
q10933
RemoteDispatcher.handle_write
train
def handle_write(self): """Let's write as much as we can""" num_sent = self.send(self.write_buffer) if self.debug: if self.state is not STATE_NOT_STARTED or options.password is None: self.print_debug(b'<== ' + self.write_buffer[:num_sent]) self.write_buffer = self.write_buffer[num_sent:]
python
{ "resource": "" }
q10934
RemoteDispatcher.print_debug
train
def print_debug(self, msg): """Log some debugging information to the console""" assert isinstance(msg, bytes) state = STATE_NAMES[self.state].encode() console_output(b'[dbg] ' + self.display_name.encode() + b'[' + state + b']: ' + msg + b'\n')
python
{ "resource": "" }
q10935
RemoteDispatcher.get_info
train
def get_info(self): """Return a list with all information available about this process""" return [self.display_name.encode(), self.enabled and b'enabled' or b'disabled', STATE_NAMES[self.state].encode() + b':', self.last_printed_line.strip()]
python
{ "resource": "" }
q10936
RemoteDispatcher.dispatch_write
train
def dispatch_write(self, buf): """There is new stuff to write when possible""" if self.state != STATE_DEAD and self.enabled: super().dispatch_write(buf) return True return False
python
{ "resource": "" }
q10937
RemoteDispatcher.change_name
train
def change_name(self, new_name): """Change the name of the shell, possibly updating the maximum name length""" if not new_name: name = self.hostname else: name = new_name.decode() self.display_name = display_names.change( self.display_name, name)
python
{ "resource": "" }
q10938
RemoteDispatcher.rename
train
def rename(self, name): """Send to the remote shell, its new name to be shell expanded""" if name: # defug callback add? rename1, rename2 = callbacks.add( b'rename', self.change_name, False) self.dispatch_command(b'/bin/echo "' + rename1 + b'""' + rename2 + b'"' + name + b'\n') else: self.change_name(self.hostname.encode())
python
{ "resource": "" }
q10939
complete
train
def complete(text, state): """On tab press, return the next possible completion""" global completion_results if state == 0: line = readline.get_line_buffer() if line.startswith(':'): # Control command completion completion_results = complete_control_command(line, text) else: if line.startswith('!') and text and line.startswith(text): dropped_exclam = True text = text[1:] else: dropped_exclam = False completion_results = [] # Complete local paths completion_results += complete_local_path(text) # Complete from history l = len(text) completion_results += [w + ' ' for w in history_words if len(w) > l and w.startswith(text)] if readline.get_begidx() == 0: # Completing first word from $PATH completion_results += [w + ' ' for w in user_commands_in_path if len(w) > l and w.startswith(text)] completion_results = remove_dupes(completion_results) if dropped_exclam: completion_results = ['!' + r for r in completion_results] if state < len(completion_results): return completion_results[state] completion_results = None return None
python
{ "resource": "" }
q10940
Popen
train
def Popen(*args, **kwargs): """ Executes a command using subprocess.Popen and redirects output to AETROS and stdout. Parses stdout as well for stdout API calls. Use read_line argument to read stdout of command's stdout line by line. Use returned process stdin to communicate with the command. :return: subprocess.Popen """ read_line = None if 'read_line' in kwargs: read_line = kwargs['read_line'] del kwargs['read_line'] p = subprocess.Popen(*args, **kwargs) wait_stdout = None wait_stderr = None if p.stdout: wait_stdout = sys.stdout.attach(p.stdout, read_line=read_line) if p.stderr: wait_stderr = sys.stderr.attach(p.stderr) original_wait = p.wait def wait(): original_wait() if wait_stdout: wait_stdout() if wait_stderr: wait_stderr() p.wait = wait return p
python
{ "resource": "" }
q10941
JobBackend.on_sigint
train
def on_sigint(self, sig, frame): """ We got SIGINT signal. """ if self.stop_requested or self.stop_requested_force: # signal has already been sent or we force a shutdown. # handles the keystroke 2x CTRL+C to force an exit. self.stop_requested_force = True self.logger.warning('Force stopped: ' + str(sig)) # just kill the process, we don't care about the results self.on_force_exit() os._exit(1) # with force_exit we really close the process, killing it in unknown state # self.fail('Force stopped', force_exit=True) # return if self.is_master_process(): self.logger.warning('Received signal '+str(sig)+'. Send again to force stop. Stopping ...') else: self.logger.debug("Got child signal " + str(sig)) self.stop_requested = True # the default SIGINT handle in python is not always installed, so we can't rely on the # KeyboardInterrupt exception to be thrown. # thread.interrupt_main would call sigint again. # the shutdown listener will do the rest like committing rest memory files into Git and closing connections. sys.exit(0 if self.in_early_stop else 1)
python
{ "resource": "" }
q10942
JobBackend.external_aborted
train
def external_aborted(self, params): """ Immediately abort the job by server. This runs in the Client:read() thread. """ self.ended = True self.running = False # When the server sends an abort signal, we really have to close immediately, # since for example the job has been already deleted. # without touching the git and client any further os._exit(1)
python
{ "resource": "" }
q10943
JobBackend.external_stop
train
def external_stop(self, force): """ Stop signal by server. """ # only the master processes handles the regular stop signal from the server, sending a SIGINT to # all its child (means to us, non-master process) if not self.is_master_process(): if force: # make sure even the subprocess dies really on force os._exit(1) return self.logger.warning("Received stop signal by server.") if not self.stop_requested_force: self.stop_requested_force = force raise_sigint()
python
{ "resource": "" }
q10944
JobBackend.fail
train
def fail(self, message=None, force_exit=False): """ Marks the job as failed, saves the given error message and force exists the process when force_exit=True. """ global last_exit_code if not last_exit_code: last_exit_code = 1 with self.git.batch_commit('FAILED'): self.set_status('FAILED', add_section=False) self.git.commit_json_file('FAIL_MESSAGE', 'aetros/job/crash/error', str(message) if message else '') if isinstance(sys.stderr, GeneralLogger): self.git.commit_json_file('FAIL_MESSAGE_LAST_LOG', 'aetros/job/crash/last_message', sys.stderr.last_messages) self.logger.debug('Crash report stored in commit ' + self.git.get_head_commit()) self.stop(JOB_STATUS.PROGRESS_STATUS_FAILED, force_exit=force_exit)
python
{ "resource": "" }
q10945
JobBackend.write_log
train
def write_log(self, message): """ Proxy method for GeneralLogger. """ if self.stream_log and not self.ended: # points to the Git stream write self.stream_log.write(message) return True
python
{ "resource": "" }
q10946
JobBackend.set_status
train
def set_status(self, status, add_section=True): """ Set an arbitrary status, visible in the big wheel of the job view. """ status = str(status) if add_section: self.section(status) self.job_add_status('status', status)
python
{ "resource": "" }
q10947
JobBackend.create
train
def create(self, create_info=None, hyperparameter=None, server='local', insights=False): """ Creates a new job in git and pushes it. :param create_info: from the api.create_job_info(id). Contains the config and job info (type, server) :param hyperparameter: simple nested dict with key->value, which overwrites stuff from aetros.yml :param server: if None, the the job will be assigned to a server. :param insights: whether you want to activate insights (for simple models) """ if not create_info: create_info = { 'server': server, 'config': { 'insights': insights, 'command': ' '.join(sys.argv) } } config = find_config(self.config_path, logger=self.logger) if not config['model']: raise Exception('AETROS config file (aetros.yml) not found.') # first transform simple format in the full definition with parameter types # (string, number, group, choice_group, etc) full_hyperparameters = lose_parameters_to_full(config['parameters']) # now extract hyperparameters from full definition, and overwrite stuff using # incoming_hyperparameter if available hyperparameter = extract_parameters(full_hyperparameters, hyperparameter) create_info['config']['parameters'] = hyperparameter self.job = create_info if 'server' not in self.job and server: # setting this disables server assignment self.job['server'] = server self.job['optimization'] = None self.job['type'] = 'custom' if 'parameters' not in self.job['config']: self.job['config']['parameters'] = {} if 'insights' not in self.job['config']: self.job['config']['insights'] = insights self.job['created'] = time.time() self.git.create_job_id(self.job) self.logger.debug("Job created with Git ref " + self.git.ref_head) return self.job_id
python
{ "resource": "" }
q10948
JobBackend.get_parameter
train
def get_parameter(self, path, default=None, return_group=False): """ Reads hyperparameter from job configuration. If nothing found use given default. :param path: str :param default: * :param return_group: If true and path is a choice_group, we return the dict instead of the group name. :return: * """ value = read_parameter_by_path(self.job['config']['parameters'], path, return_group) if value is None: return default return value
python
{ "resource": "" }
q10949
JobBackend.load
train
def load(self, job_id): """ Loads job into index and work-tree, restart its ref and sets as current. :param job_id: int """ self.git.read_job(job_id, checkout=self.is_master_process()) self.load_job_from_ref()
python
{ "resource": "" }
q10950
JobBackend.load_job_from_ref
train
def load_job_from_ref(self): """ Loads the job.json into self.job """ if not self.job_id: raise Exception('Job not loaded yet. Use load(id) first.') if not os.path.exists(self.git.work_tree + '/aetros/job.json'): raise Exception('Could not load aetros/job.json from git repository. Make sure you have created the job correctly.') with open(self.git.work_tree + '/aetros/job.json') as f: self.job = simplejson.loads(f.read(), object_pairs_hook=collections.OrderedDict) if not self.job: raise Exception('Could not parse aetros/job.json from git repository. Make sure you have created the job correctly.') self.logger.debug('job: ' + str(self.job))
python
{ "resource": "" }
q10951
JobBackend.file_list
train
def file_list(self): """ Lists all files in the working directory. """ blacklist = ['.git', 'aetros'] working_tree = self.git.work_tree def recursive(path='.'): if os.path.basename(path) in blacklist: return 0, 0 if os.path.isdir(path): files = [] for file in os.listdir(path): if path and path != '.': file = path + '/' + file added_files = recursive(file) files += added_files return files else: if path.endswith('.pyc'): return [] if is_ignored(path, self.config['ignore']): return [] return [os.path.relpath(path, working_tree)] return recursive(working_tree)
python
{ "resource": "" }
q10952
ftpretty.get
train
def get(self, remote, local=None): """ Gets the file from FTP server local can be: a file: opened for writing, left open a string: path to output file None: contents are returned """ if isinstance(local, file_type): # open file, leave open local_file = local elif local is None: # return string local_file = buffer_type() else: # path to file, open, write/close return None local_file = open(local, 'wb') self.conn.retrbinary("RETR %s" % remote, local_file.write) if isinstance(local, file_type): pass elif local is None: contents = local_file.getvalue() local_file.close() return contents else: local_file.close() return None
python
{ "resource": "" }
q10953
ftpretty.upload_tree
train
def upload_tree(self, src, dst, ignore=None): """Recursively upload a directory tree. Although similar to shutil.copytree we don't follow symlinks. """ names = os.listdir(src) if ignore is not None: ignored_names = ignore(src, names) else: ignored_names = set() try: self.conn.mkd(dst) except error_perm: pass errors = [] for name in names: if name in ignored_names: continue src_name = os.path.join(src, name) dst_name = os.path.join(dst, name) try: if os.path.islink(src_name): pass elif os.path.isdir(src_name): self.upload_tree(src_name, dst_name, ignore) else: # Will raise a SpecialFileError for unsupported file types self.put(src_name, dst_name) except Exception as why: errors.append((src_name, dst_name, str(why))) return dst
python
{ "resource": "" }
q10954
ftpretty.list
train
def list(self, remote='.', extra=False, remove_relative_paths=False): """ Return directory list """ if extra: self.tmp_output = [] self.conn.dir(remote, self._collector) directory_list = split_file_info(self.tmp_output) else: directory_list = self.conn.nlst(remote) if remove_relative_paths: return list(filter(self.is_not_relative_path, directory_list)) return directory_list
python
{ "resource": "" }
q10955
ftpretty.descend
train
def descend(self, remote, force=False): """ Descend, possibly creating directories as needed """ remote_dirs = remote.split('/') for directory in remote_dirs: try: self.conn.cwd(directory) except Exception: if force: self.conn.mkd(directory) self.conn.cwd(directory) return self.conn.pwd()
python
{ "resource": "" }
q10956
ftpretty.delete
train
def delete(self, remote): """ Delete a file from server """ try: self.conn.delete(remote) except Exception: return False else: return True
python
{ "resource": "" }
q10957
ftpretty.cd
train
def cd(self, remote): """ Change working directory on server """ try: self.conn.cwd(remote) except Exception: return False else: return self.pwd()
python
{ "resource": "" }
q10958
start
train
def start(logger, full_id, fetch=True, env=None, volumes=None, cpus=None, memory=None, gpu_devices=None, offline=False): """ Starts the job with all logging of a job_id """ owner, name, id = unpack_full_job_id(full_id) if isinstance(sys.stdout, GeneralLogger): # we don't want to have stuff written to stdout before in job's log sys.stdout.clear_buffer() job_backend = JobBackend(model_name=owner + '/' + name) if fetch: job_backend.fetch(id) job_backend.restart(id) job_backend.start(collect_system=False, offline=offline) job_backend.set_status('PREPARE', add_section=False) job = job_backend.get_job_model() if not cpus: cpus = job.get_cpu() if not memory: memory = job.get_memory() if not gpu_devices and job.get_gpu(): # if requested 2 GPUs and we have 3 GPUs with id [0,1,2], gpus should be [0,1] gpu_devices = [] for i in range(0, job.get_gpu()): gpu_devices.append(i) start_command(logger, job_backend, env, volumes, cpus=cpus, memory=memory, gpu_devices=gpu_devices, offline=offline)
python
{ "resource": "" }
q10959
fromimage
train
def fromimage(im, flatten=False, mode=None): """ Return a copy of a PIL image as a numpy array. Parameters ---------- im : PIL image Input image. flatten : bool If true, convert the output to grey-scale. mode : str, optional Mode to convert image to, e.g. ``'RGB'``. See the Notes of the `imread` docstring for more details. Returns ------- fromimage : ndarray The different colour bands/channels are stored in the third dimension, such that a grey-image is MxN, an RGB-image MxNx3 and an RGBA-image MxNx4. """ if not Image.isImageType(im): raise TypeError("Input is not a PIL image.") if mode is not None: if mode != im.mode: im = im.convert(mode) elif im.mode == 'P': # Mode 'P' means there is an indexed "palette". If we leave the mode # as 'P', then when we do `a = array(im)` below, `a` will be a 2-D # containing the indices into the palette, and not a 3-D array # containing the RGB or RGBA values. if 'transparency' in im.info: im = im.convert('RGBA') else: im = im.convert('RGB') if flatten: im = im.convert('F') elif im.mode == '1': # Workaround for crash in PIL. When im is 1-bit, the call array(im) # can cause a seg. fault, or generate garbage. See # https://github.com/scipy/scipy/issues/2138 and # https://github.com/python-pillow/Pillow/issues/350. # # This converts im from a 1-bit image to an 8-bit image. im = im.convert('L') a = array(im) return a
python
{ "resource": "" }
q10960
imresize
train
def imresize(arr, size, interp='bilinear', mode=None): """ Resize an image. Parameters ---------- arr : ndarray The array of image to be resized. size : int, float or tuple * int - Percentage of current size. * float - Fraction of current size. * tuple - Size of the output image. interp : str, optional Interpolation to use for re-sizing ('nearest', 'lanczos', 'bilinear', 'bicubic' or 'cubic'). mode : str, optional The PIL image mode ('P', 'L', etc.) to convert `arr` before resizing. Returns ------- imresize : ndarray The resized array of image. See Also -------- toimage : Implicitly used to convert `arr` according to `mode`. scipy.ndimage.zoom : More generic implementation that does not use PIL. """ im = toimage(arr, mode=mode) ts = type(size) if issubdtype(ts, int): percent = size / 100.0 size = tuple((array(im.size)*percent).astype(int)) elif issubdtype(type(size), float): size = tuple((array(im.size)*size).astype(int)) else: size = (size[1], size[0]) func = {'nearest': 0, 'lanczos': 1, 'bilinear': 2, 'bicubic': 3, 'cubic': 3} imnew = im.resize(size, resample=func[interp]) return fromimage(imnew)
python
{ "resource": "" }
q10961
BackendClient._end_channel
train
def _end_channel(self, channel): """ Soft end of ssh channel. End the writing thread as soon as the message queue is empty. """ self.stop_on_empty_queue[channel] = True # by joining the we wait until its loop finishes. # it won't loop forever since we've set self.stop_on_empty_queue=True write_thread = self.thread_write_instances[channel] thread_join_non_blocking(write_thread)
python
{ "resource": "" }
q10962
BackendClient.wait_sending_last_messages
train
def wait_sending_last_messages(self): """ Requests all channels to close and waits for it. """ if self.active and self.online is not False: self.logger.debug("client sends last %s messages ..." % ([str(i) + ':' + str(len(x)) for i, x in six.iteritems(self.queues)],)) for channel, messages in six.iteritems(self.queues): for idx, message in enumerate(messages): self.logger.debug("[%s] %d: %s" % (channel, idx, str(message)[0:120])) # send all missing messages # by joining we wait until its loop finish. # it won't loop forever since we've set self.stop_on_empty_queue=True for channel in six.iterkeys(self.ssh_channel): if channel != '': self._end_channel(channel) # last is control channel self._end_channel('')
python
{ "resource": "" }
q10963
BackendClient.wait_until_queue_empty
train
def wait_until_queue_empty(self, channels, report=True, clear_end=True): """ Waits until all queues of channels are empty. """ state = {'message': ''} self.logger.debug("wait_until_queue_empty: report=%s %s" % (str(report), str([channel+':'+str(len(self.queues[channel])) for channel in channels]), )) queues = [] for channel in channels: queues += self.queues[channel][:] def print_progress(): if report: self.logger.debug("all_empty=%s" % (str(all_empty),)) sys.__stderr__.write('\b' * len(state['message'])) sys.__stderr__.write("\033[K") state['message'] = "%.2f kB/s // %.2fkB of %.2fkB // %.2f%%" \ % (self.bytes_speed / 1024, self.bytes_sent / 1024, self.bytes_total / 1024, (self.bytes_sent / self.bytes_total * 100) if self.bytes_total else 0) sys.__stderr__.write(state['message']) sys.__stderr__.flush() while True: all_empty = all(m['_sent'] for m in queues) print_progress() if all_empty: break time.sleep(0.2) print_progress() if report and clear_end: sys.__stderr__.write('\b' * len(state['message'])) sys.__stderr__.write("\033[K") sys.__stderr__.flush()
python
{ "resource": "" }
q10964
BackendClient.send_message
train
def send_message(self, message, channel): """ Internal. Sends the actual message from a queue entry. """ if not self.is_connected(channel): return False message['_sending'] = True if '_data' in message: data = message['_data'] else: data = msgpack.packb(message, default=invalid_json_values) self.bytes_total += len(data) message['_bytes_sent'] = 0 message['_id'] = -1 if is_debug2(): sys.__stderr__.write("[%s] send message: %s\n" % (channel, str(msgpack.unpackb(data))[0:180])) try: while data: start = time.time() bytes_sent = self.ssh_channel[channel].send(data) data = data[bytes_sent:] message['_bytes_sent'] += bytes_sent self.bytes_sent += bytes_sent end = time.time() self.write_speeds.append(bytes_sent / (end-start)) speeds_len = len(self.write_speeds) if speeds_len: self.bytes_speed = sum(self.write_speeds) / speeds_len if speeds_len > 10: self.write_speeds = self.write_speeds[5:] message['_sent'] = True return True except (KeyboardInterrupt, SystemExit): if message['_sent']: return message['_bytes_sent'] return False except Exception as error: self.connection_error(channel, error) return False
python
{ "resource": "" }
q10965
BackendClient.wait_for_at_least_one_message
train
def wait_for_at_least_one_message(self, channel): """ Reads until we receive at least one message we can unpack. Return all found messages. """ unpacker = msgpack.Unpacker(encoding='utf-8') while True: try: start = time.time() chunk = self.ssh_channel[channel].recv(1024) end = time.time() self.read_speeds.append( len(chunk) / (end-start) ) if len(self.read_speeds) > 20: self.read_speeds = self.read_speeds[10:] if chunk == b'': # happens only when connection broke. If nothing is to be received, it hangs instead. self.connection_error(channel, 'Connection broken w') return False except Exception as error: self.connection_error(channel, error) raise unpacker.feed(chunk) messages = [m for m in unpacker] if messages: return messages
python
{ "resource": "" }
q10966
raise_sigint
train
def raise_sigint(): """ Raising the SIGINT signal in the current process and all sub-processes. os.kill() only issues a signal in the current process (without subprocesses). CTRL+C on the console sends the signal to the process group (which we need). """ if hasattr(signal, 'CTRL_C_EVENT'): # windows. Need CTRL_C_EVENT to raise the signal in the whole process group os.kill(os.getpid(), signal.CTRL_C_EVENT) else: # unix. pgid = os.getpgid(os.getpid()) if pgid == 1: os.kill(os.getpid(), signal.SIGINT) else: os.killpg(os.getpgid(os.getpid()), signal.SIGINT)
python
{ "resource": "" }
q10967
Git.read_job
train
def read_job(self, job_id, checkout=False): """ Reads head and reads the tree into index, and checkout the work-tree when checkout=True. This does not fetch the job from the actual server. It needs to be in the local git already. """ self.job_id = job_id commit = self.get_head_commit() self.logger.debug('Job ref points to ' + commit) self.command_exec(['read-tree', self.ref_head]) if checkout: self.logger.debug('Working directory in ' + self.work_tree) # make sure we have checked out all files we have added until now. Important for simple models, # so we have the actual model.py and dataset scripts. if os.path.exists(self.work_tree): shutil.rmtree(self.work_tree) os.makedirs(self.work_tree) # make the working tree reflect exactly the tree of ref_head. # since we removed the dir before, we have exactly the tree of the reference # '--', '.' is important to not update HEAD self.command_exec(['--work-tree', self.work_tree, 'checkout', self.ref_head, '--', '.'])
python
{ "resource": "" }
q10968
Git.start_push_sync
train
def start_push_sync(self): """ Starts the detection of unsynced Git data. """ self.active_thread = True self.active_push = True self.thread_push_instance = Thread(target=self.thread_push) self.thread_push_instance.daemon = True self.thread_push_instance.start()
python
{ "resource": "" }
q10969
Git.batch_commit
train
def batch_commit(self, message): """ Instead of committing a lot of small commits you can batch it together using this controller. Example: with git.batch_commit('BATCHED'): git.commit_file('my commit 1', 'path/to/file', 'content from file') git.commit_json_file('[1, 2, 3]', 'path/to/file2', 'json array') Withing the `with` block you can use group the method calls of `commit_file` and `commit_json_file`, and every other method calling this two methods. :type message: str :return: with controller to be used with Python's `with git.batch_commit():` """ class controlled_execution: def __init__(self, git, message): self.git = git self.message = message def __enter__(self): self.git.git_batch_commit = True if self.git.job_id: # make sure we're always on the tip tree self.git.read_tree(self.git.ref_head) def __exit__(self, type, value, traceback): self.git.git_batch_commit = False # if nothing committed, we return early if not self.git.git_batch_commit_messages: return commit_message = self.message if self.git.git_batch_commit_messages: commit_message = commit_message + "\n\n" + "\n".join(self.git.git_batch_commit_messages) self.git.git_batch_commit_messages = [] self.git.commit_index(commit_message) return controlled_execution(self, message)
python
{ "resource": "" }
q10970
Git.add_file_path_in_work_tree
train
def add_file_path_in_work_tree(self, path, work_tree, verbose=True): """ Add a new file as blob in the storage and add its tree entry into the index. """ args = ['--work-tree', work_tree, 'add', '-f'] if verbose: args.append('--verbose') args.append(path) self.command_exec(args, show_output=verbose)
python
{ "resource": "" }
q10971
Git.commit_file
train
def commit_file(self, message, path, content): """ Add a new file as blob in the storage, add its tree entry into the index and commit the index. :param message: str :param path: str :param content: str :return: """ if self.git_batch_commit: self.add_file(path, content) self.git_batch_commit_messages.append(message) else: with self.lock_write(): if self.job_id: self.read_tree(self.ref_head) self.add_file(path, content) return self.commit_index(message)
python
{ "resource": "" }
q10972
Git.contents
train
def contents(self, path): """ Reads the given path of current ref_head and returns its content as utf-8 """ try: out, code, err = self.command_exec(['cat-file', '-p', self.ref_head+':'+path]) if not code: return out.decode('utf-8') except Exception: pass return None
python
{ "resource": "" }
q10973
get_ordered_devices
train
def get_ordered_devices(): """ Default CUDA_DEVICE_ORDER is not compatible with nvidia-docker. Nvidia-Docker is using CUDA_DEVICE_ORDER=PCI_BUS_ID. https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolation """ libcudart = get_libcudart() devices = {} for i in range(0, get_installed_devices()): gpu = get_device_properties(i) pciBusId = ctypes.create_string_buffer(64) libcudart.cudaDeviceGetPCIBusId(ctypes.byref(pciBusId), 64, i) full_id = pciBusId.value.decode('utf-8') gpu['fullId'] = full_id devices[full_id] = gpu ordered = [] i = 0 for key in sorted(devices): devices[key]['id'] = i ordered.append(devices[key]) i += 1 del libcudart return ordered
python
{ "resource": "" }
q10974
reduce_list_size
train
def reduce_list_size(li): """Return two lists - the last N items of li whose total size is less than MAX_SIZE - the rest of the original list li """ # sys.getsizeof is nearly useless. All our data is stringable so rather # use that as a measure of size. size = len(repr(li)) keep = li toss = [] n = len(li) decrement_by = max(n / 10, 10) while (size >= MAX_SIZE) and (n > 0): n -= decrement_by toss = li[:-n] keep = li[-n:] size = len(repr(keep)) return keep, toss
python
{ "resource": "" }
q10975
AnchorSmith.least_role
train
def least_role() -> Role: """ Return the TRUSTEE indy-sdk role for an anchor acting in an AnchorSmith capacity. :return: TRUSTEE role """ LOGGER.debug('AnchorSmith.least_role >>>') rv = Role.TRUSTEE.token() LOGGER.debug('AnchorSmith.least_role <<< %s', rv) return rv
python
{ "resource": "" }
q10976
usage
train
def usage() -> None: """ Print usage advice. """ print() print('Usage: setnym.py <config-ini>') print() print('where <config-ini> represents the path to the configuration file.') print() print('The operation submits a nym to a trustee anchor to send to the ledger,') print('if the ledger does not have it already as configured.') print() print('The configuration file has sections and entries as follows:') print(' * section [Node Pool]:') print(' - name: the name of the node pool to which the operation applies') print(' - genesis.txn.path: the path to the genesis transaction file') print(' for the node pool (may omit if node pool already exists)') print(' * section [Trustee Anchor]:') print(" - name: the trustee anchor's (wallet) name") print(" - wallet.type: (default blank) the trustee anchor's wallet type") print(" - wallet.access: (default blank) the trustee anchor's") print(' wallet access credential (password) value') print(' * section [VON Anchor]:') print(' - role: the role to request in the send-nym transaction; specify:') print(' - (default) empty value for user with no additional write privileges') print(' - TRUST_ANCHOR for VON anchor with write privileges for indy artifacts') print(' - TRUSTEE for VON anchor sending further cryptonyms to the ledger') print(" - name: the VON anchor's (wallet) name") print(" - seed: the VON anchor's seed (optional, for wallet creation only)") print(" - did: the VON anchor's DID (optional, for wallet creation only)") print(' - wallet.create: whether create the wallet if it does not yet exist') print(' (value True/False, 1/0, or Yes/No)') print(" - wallet.type: (default blank) the VON anchor's wallet type") print(" - wallet.access: (default blank) the VON anchor's") print(' wallet access credential (password) value.') print()
python
{ "resource": "" }
q10977
_set_wallets
train
async def _set_wallets(an_data: dict) -> dict: """ Set wallets as configured for setnym operation. :param an_data: dict mapping profiles to anchor data :return: dict mapping anchor names to wallet objects """ w_mgr = WalletManager() rv = {} for profile in an_data: w_cfg = {'id': an_data[profile].name} if an_data[profile].wallet_type: w_cfg['storage_type'] = an_data[profile].wallet_type if an_data[profile].seed: w_cfg['seed'] = an_data[profile].seed if an_data[profile].did: w_cfg['did'] = an_data[profile].did if an_data[profile].wallet_create: try: await w_mgr.create(w_cfg, access=an_data[profile].wallet_access) except ExtantWallet: pass rv[profile] = w_mgr.get(w_cfg, access=an_data[profile].wallet_access) return rv
python
{ "resource": "" }
q10978
schema_id
train
def schema_id(origin_did: str, name: str, version: str) -> str: """ Return schema identifier for input origin DID, schema name, and schema version. :param origin_did: DID of schema originator :param name: schema name :param version: schema version :return: schema identifier """ return '{}:2:{}:{}'.format(origin_did, name, version)
python
{ "resource": "" }
q10979
ok_did
train
def ok_did(token: str) -> bool: """ Whether input token looks like a valid distributed identifier. :param token: candidate string :return: whether input token looks like a valid schema identifier """ return bool(re.match('[{}]{{21,22}}$'.format(B58), token or ''))
python
{ "resource": "" }
q10980
cred_def_id2seq_no
train
def cred_def_id2seq_no(cd_id: str) -> int: """ Given a credential definition identifier, return its schema sequence number. Raise BadIdentifier on input that is not a credential definition identifier. :param cd_id: credential definition identifier :return: sequence number """ if ok_cred_def_id(cd_id): return int(cd_id.split(':')[3]) # sequence number is token at 0-based position 3 raise BadIdentifier('Bad credential definition identifier {}'.format(cd_id))
python
{ "resource": "" }
q10981
rev_reg_id2cred_def_id
train
def rev_reg_id2cred_def_id(rr_id: str) -> str: """ Given a revocation registry identifier, return its corresponding credential definition identifier. Raise BadIdentifier if input is not a revocation registry identifier. :param rr_id: revocation registry identifier :return: credential definition identifier """ if ok_rev_reg_id(rr_id): return ':'.join(rr_id.split(':')[2:-2]) # rev reg id comprises (prefixes):<cred_def_id>:(suffixes) raise BadIdentifier('Bad revocation registry identifier {}'.format(rr_id))
python
{ "resource": "" }
q10982
prune_creds_json
train
def prune_creds_json(creds: dict, cred_ids: set) -> str: """ Strip all creds out of the input json structure that do not match any of the input credential identifiers. :param creds: indy-sdk creds structure :param cred_ids: the set of credential identifiers of interest :return: the reduced creds json """ rv = deepcopy(creds) for key in ('attrs', 'predicates'): for attr_uuid, creds_by_uuid in rv[key].items(): rv[key][attr_uuid] = [cred for cred in creds_by_uuid if cred['cred_info']['referent'] in cred_ids] empties = [attr_uuid for attr_uuid in rv[key] if not rv[key][attr_uuid]] for attr_uuid in empties: del rv[key][attr_uuid] return json.dumps(rv)
python
{ "resource": "" }
q10983
proof_req2wql_all
train
def proof_req2wql_all(proof_req: dict, x_cd_ids: Union[str, Sequence[str]] = None) -> dict: """ Given a proof request and a list of cred def ids to omit, return an extra WQL query dict that will find all corresponding credentials in search. The proof request must have cred def id restrictions on all requested attribute specifications. At present, the utility does not support predicates. :param proof_req: proof request :param x_cd_ids: cred def identifier or sequence thereof to omit :return: extra WQL dict to fetch all corresponding credentials in search. """ rv = {} attr_refts = proof_req_attr_referents(proof_req) for cd_id in [k for k in attr_refts if k not in ([x_cd_ids] if isinstance(x_cd_ids, str) else x_cd_ids or [])]: rv[set(attr_refts[cd_id].values()).pop()] = {"cred_def_id": cd_id} return rv
python
{ "resource": "" }
q10984
proof_req_attr_referents
train
def proof_req_attr_referents(proof_req: dict) -> dict: """ Given a proof request with all requested attributes having cred def id restrictions, return its attribute referents by cred def id and attribute. The returned structure can be useful in populating the extra WQL query parameter in the credential search API. :param proof_req: proof request with all requested attribute specifications having cred def id restriction; e.g., :: { 'name": 'proof_req', 'version': '0.0', 'requested_attributes': { '18_greenLevel_uuid': { 'restrictions': [ { 'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:18:tag' } ], 'name': 'greenLevel', 'non_revoked': { 'to': 1532367957, 'from': 1532367957 } }, '18_legalName_uuid': { 'restrictions': [ { 'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:18:tag' } ], 'name': 'legalName', 'non_revoked': { 'to': 1532367957, 'from': 1532367957 } }, '15_id_uuid': { # this specification will not show up in response: no cred def id restriction :-( 'name': 'id', 'non_revoked': { 'to': 1532367957, 'from': 1532367957 } } } 'requested_predicates': { } } :return: nested dict mapping cred def id to name to proof request referent; e.g., :: { 'WgWxqztrNooG92RXvxSTWv:3:CL:18:tag': { 'legalName': '18_legalName_uuid' 'greenLevel': '18_greenLevel_uuid' } } """ rv = {} for uuid, spec in proof_req['requested_attributes'].items(): cd_id = None for restriction in spec.get('restrictions', []): cd_id = restriction.get('cred_def_id', None) if cd_id: break if not cd_id: continue if cd_id not in rv: # cd_id of None is not OK rv[cd_id] = {} rv[cd_id][spec['name']] = uuid return rv
python
{ "resource": "" }
q10985
proof_req_pred_referents
train
def proof_req_pred_referents(proof_req: dict) -> dict: """ Given a proof request with all requested predicates having cred def id restrictions, return its predicate referents by cred def id and attribute, mapping a predicate and a limit. The returned structure can be useful in downstream processing to filter cred-infos for predicates. :param proof_req: proof request with all requested predicate specifications having cred def id restriction; e.g., :: { 'name': 'proof_req', 'version': '0.0', 'requested_attributes': { ... } 'requested_predicates': { '194_highscore_GE_uuid': { 'name': 'highscore', 'p_type': '>=', 'p_value': '100000', 'restrictions': [ { 'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:194:tag' } ], 'non_revoked': { ... } }, '194_level_GE_uuid': { 'name': 'level', 'p_type': '>=', 'p_value': '10', 'restrictions': [ { 'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:194:tag' } ], 'non_revoked': { ... } }, '194_attempts_LE_uuid': { 'name': 'attempts', 'p_type': '<=', 'p_value': '3', 'restrictions': [ { 'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:194:tag' } ], 'non_revoked': { ... } }, '198_employees_LT_uuid': { 'name': 'employees', 'p_type': '<', 'p_value': '100', 'restrictions': [ { 'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:198:tag' } ], 'non_revoked': { ... } }, '198_employees_GE_uuid': { 'name': 'employees', 'p_type': '>=', 'p_value': '50', 'restrictions': [ { 'cred_def_id': 'WgWxqztrNooG92RXvxSTWv:3:CL:198:tag' } ], 'non_revoked': { ... } }, } } :return: nested dict mapping cred def id to name to proof request referent to predicate and limit; e.g., :: { 'WgWxqztrNooG92RXvxSTWv:3:CL:194:tag': { 'highscore': { '194_level_GE_uuid': ['>=', 100000] }, 'level': { '194_level_GE_uuid': ['>=', 10] }, 'attempts': { '194_attempts_LE_uuid': ['<=', 3] } }, 'WgWxqztrNooG92RXvxSTWv:3:CL:198:tag': { 'employees': { # may have many preds per attr, but always 1 uuid and 1 relation per pred '198_LT_employees_uuid': ['<=', 100] '198_GE_employees_uuid': ['>=', 50] } } } """ rv = {} for uuid, spec in proof_req['requested_predicates'].items(): cd_id = None for restriction in spec.get('restrictions', []): cd_id = restriction.get('cred_def_id', None) if cd_id: break if not cd_id: continue if cd_id not in rv: # cd_id of None is not OK rv[cd_id] = {} if spec['name'] not in rv[cd_id]: rv[cd_id][spec['name']] = {} rv[cd_id][spec['name']][uuid] = [spec['p_type'], Predicate.to_int(spec['p_value'])] return rv
python
{ "resource": "" }
q10986
Verifier._build_rr_state_json
train
async def _build_rr_state_json(self, rr_id: str, timestamp: int) -> (str, int): """ Build rev reg state json at a given requested timestamp. Return rev reg state json and its transaction time on the distributed ledger, with upper bound at input timestamp of interest. Raise AbsentRevReg if no revocation registry exists on input rev reg id, or BadRevStateTime if requested timestamp predates revocation registry creation. :param rr_id: rev reg id :param timestamp: timestamp of interest (epoch seconds) :return: rev reg state json and ledger timestamp (epoch seconds) """ LOGGER.debug('_Verifier._build_rr_state_json >>> rr_id: %s, timestamp: %s', rr_id, timestamp) if not ok_rev_reg_id(rr_id): LOGGER.debug('Verifier._build_rr_state_json <!< Bad rev reg id %s', rr_id) raise BadIdentifier('Bad rev reg id {}'.format(rr_id)) rr_json = None ledger_timestamp = None get_rr_req_json = await ledger.build_get_revoc_reg_request(self.did, rr_id, timestamp) resp_json = await self._submit(get_rr_req_json) resp = json.loads(resp_json) if resp.get('result', {}).get('data', None) and resp['result']['data'].get('value', None): # timestamp at or beyond rev reg creation, carry on try: (_, rr_json, ledger_timestamp) = await ledger.parse_get_revoc_reg_response(resp_json) except IndyError: # ledger replied, but there is no such rev reg available LOGGER.debug('Verifier._build_rr_state_json <!< no rev reg exists on %s', rr_id) raise AbsentRevReg('No rev reg exists on {}'.format(rr_id)) else: LOGGER.debug( '_Verifier._build_rr_state_json <!< Rev reg %s created after asked-for time %s', rr_id, timestamp) raise BadRevStateTime('Rev reg {} created after asked-for time {}'.format(rr_id, timestamp)) rv = (rr_json, ledger_timestamp) LOGGER.debug('_Verifier._build_rr_state_json <<< %s', rv) return rv
python
{ "resource": "" }
q10987
Verifier.build_proof_req_json
train
async def build_proof_req_json(self, cd_id2spec: dict) -> str: """ Build and return indy-sdk proof request for input attributes and non-revocation intervals by cred def id. :param cd_id2spec: dict mapping cred def ids to: - (optionally) 'attrs': lists of names of attributes of interest (omit for all, empty list or None for none) - (optionally) '>=': (pred) inclusive int lower-bounds of interest (omit, empty list, or None for none) - (optionally) '>': (pred) exclusive int lower-bounds of interest (omit, empty list, or None for none) - (optionally) '<=': (pred) inclusive int upper-bounds of interest (omit, empty list, or None for none) - (optionally) '<': (pred) exclusive int upper-bounds of interest (omit, empty list, or None for none) - (optionally), 'interval': either - (2-tuple) pair of epoch second counts marking 'from' and 'to' timestamps, or - | single epoch second count to set 'from' and 'to' the same; default | (now, now) for cred defs supporting revocation or None otherwise; e.g., :: { 'Vx4E82R17q...:3:CL:16:tag': { 'attrs': [ # request attrs 'name' and 'favouriteDrink' from this cred def's schema 'name', 'favouriteDrink' ], '>=': { # request predicate score>=80 from this cred def 'score': 80 } '<=': { # request ranking <=10 from this cred def 'ranking': 10 } 'interval': 1528116008 # same instant for all attrs and preds of corresponding schema }, 'R17v42T4pk...:3:CL:19:tag': None, # request all attrs, no preds, default intervals on all attrs 'e3vc5K168n...:3:CL:23:tag': {}, # request all attrs, no preds, default intervals on all attrs 'Z9ccax812j...:3:CL:27:tag': { # request all attrs, no preds, this interval on all attrs 'interval': (1528112408, 1528116008) }, '9cHbp54C8n...:3:CL:37:tag': { # request no attrs and some predicates; specify interval 'attrs': [], # or equivalently, 'attrs': None '>=': { 'employees': '50' # nicety: implementation converts to int for caller }, '>=': { 'revenue': '10000000' # nicety: implementation converts to int for caller 'ebidta': 0 } 'interval': (1528029608, 1528116008) }, '6caBcmLi33...:3:CL:41:tag': { # all attrs, one pred, default intervals to now on attrs & pred '>': { 'regEpoch': 1514782800 } }, ... } :return: indy-sdk proof request json """ LOGGER.debug('Verifier.build_proof_req_json >>> cd_id2spec: %s', cd_id2spec) cd_id2schema = {} now = int(time()) rv = { 'nonce': str(int(time())), 'name': 'proof_req', 'version': '0.0', 'requested_attributes': {}, 'requested_predicates': {} } for cd_id in cd_id2spec: if not ok_cred_def_id(cd_id): LOGGER.debug('Verifier.build_proof_req_json <!< Bad cred def id %s', cd_id) raise BadIdentifier('Bad cred def id {}'.format(cd_id)) interval = None cred_def = json.loads(await self.get_cred_def(cd_id)) seq_no = cred_def_id2seq_no(cd_id) cd_id2schema[cd_id] = json.loads(await self.get_schema(seq_no)) if 'revocation' in cred_def['value']: fro_to = cd_id2spec[cd_id].get('interval', (now, now)) if cd_id2spec[cd_id] else (now, now) interval = { 'from': fro_to if isinstance(fro_to, int) else min(fro_to), 'to': fro_to if isinstance(fro_to, int) else max(fro_to) } for attr in (cd_id2spec[cd_id].get('attrs', cd_id2schema[cd_id]['attrNames']) or [] if cd_id2spec[cd_id] else cd_id2schema[cd_id]['attrNames']): attr_uuid = '{}_{}_uuid'.format(seq_no, canon(attr)) rv['requested_attributes'][attr_uuid] = { 'name': attr, 'restrictions': [{ 'cred_def_id': cd_id }] } if interval: rv['requested_attributes'][attr_uuid]['non_revoked'] = interval for pred in Predicate: for attr in (cd_id2spec[cd_id].get(pred.value.math, {}) or {} if cd_id2spec[cd_id] else {}): pred_uuid = '{}_{}_{}_uuid'.format(seq_no, canon(attr), pred.value.fortran) try: rv['requested_predicates'][pred_uuid] = { 'name': attr, 'p_type': pred.value.math, 'p_value': Predicate.to_int(cd_id2spec[cd_id][pred.value.math][attr]), 'restrictions': [{ 'cred_def_id': cd_id }] } except ValueError: LOGGER.info( 'cannot build %s predicate on non-int bound %s for %s', pred.value.fortran, cd_id2spec[cd_id][pred.value.math][attr], attr) continue # int conversion failed - reject candidate if interval: rv['requested_predicates'][pred_uuid]['non_revoked'] = interval LOGGER.debug('Verifier.build_proof_req_json <<< %s', json.dumps(rv)) return json.dumps(rv)
python
{ "resource": "" }
q10988
Verifier.load_cache_for_verification
train
async def load_cache_for_verification(self, archive: bool = False) -> int: """ Load schema, cred def, revocation caches; optionally archive enough to go offline and be able to verify proof on content marked of interest in configuration. Return timestamp (epoch seconds) of cache load event, also used as subdirectory for cache archives. :param archive: True to archive now or False to demur (subclasses may still need to augment archivable caches further) :return: cache load event timestamp (epoch seconds) """ LOGGER.debug('Verifier.load_cache_for_verification >>> archive: %s', archive) rv = int(time()) for s_id in self.config.get('archive-verifier-caches-on-close', {}).get('schema_id', {}): if ok_schema_id(s_id): with SCHEMA_CACHE.lock: await self.get_schema(s_id) else: LOGGER.info('Not archiving schema for specified bad id %s', s_id) for cd_id in self.config.get('archive-verifier-caches-on-close', {}).get('cred_def_id', {}): if ok_cred_def_id(cd_id): with CRED_DEF_CACHE.lock: await self.get_cred_def(cd_id) else: LOGGER.info('Not archiving cred def for specified bad id %s', cd_id) for rr_id in self.config.get('archive-verifier-caches-on-close', {}).get('rev_reg_id', {}): if ok_rev_reg_id(rr_id): await self.get_rev_reg_def(rr_id) with REVO_CACHE.lock: revo_cache_entry = REVO_CACHE.get(rr_id, None) if revo_cache_entry: try: await revo_cache_entry.get_state_json(self._build_rr_state_json, rv, rv) except ClosedPool: LOGGER.warning( 'Verifier %s is offline from pool %s, cannot update revo cache reg state for %s to %s', self.name, self.pool.name, rr_id, rv) except AbsentPool: LOGGER.warning( 'Verifier %s has no pool, cannot update revo cache reg state for %s to %s', self.name, rr_id, rv) else: LOGGER.info('Not archiving rev reg for specified bad id %s', rr_id) if archive: ArchivableCaches.archive(self.dir_cache) LOGGER.debug('Verifier.load_cache_for_verification <<< %s', rv) return rv
python
{ "resource": "" }
q10989
Verifier.check_encoding
train
def check_encoding(proof_req: dict, proof: dict) -> bool: """ Return whether the proof's raw values correspond to their encodings as cross-referenced against proof request. :param proof request: proof request :param proof: corresponding proof to check :return: True if OK, False for encoding mismatch """ LOGGER.debug('Verifier.check_encoding <<< proof_req: %s, proof: %s', proof_req, proof) cd_id2proof_id = {} # invert proof['identifiers'] per cd_id p_preds = {} # cd_id and attr to bound for idx in range(len(proof['identifiers'])): cd_id = proof['identifiers'][idx]['cred_def_id'] cd_id2proof_id[cd_id] = idx # since at most 1 cred per cred def p_preds[cd_id] = { ge_proof['predicate']['attr_name']: ge_proof['predicate']['value'] for ge_proof in proof['proof']['proofs'][idx]['primary_proof']['ge_proofs'] } for (uuid, req_attr) in proof_req['requested_attributes'].items(): # proof req xref proof per revealed attr canon_attr = canon(req_attr['name']) proof_ident_idx = cd_id2proof_id[req_attr['restrictions'][0]['cred_def_id']] enco = proof['proof']['proofs'][proof_ident_idx]['primary_proof']['eq_proof']['revealed_attrs'].get( canon_attr) if not enco: continue # requested but declined from revelation in proof: must appear in a predicate if enco != proof['requested_proof']['revealed_attrs'][uuid]['encoded']: LOGGER.debug('Verifier.check_proof_encoding <<< False') return False if enco != encode(proof['requested_proof']['revealed_attrs'][uuid]['raw']): LOGGER.debug('Verifier.check_proof_encoding <<< False') return False for (uuid, req_pred) in proof_req['requested_predicates'].items(): # proof req xref proof per pred canon_attr = canon(req_pred['name']) if p_preds[req_pred['restrictions'][0]['cred_def_id']].get(canon_attr) != req_pred['p_value']: LOGGER.debug('Verifier.check_proof_encoding <<< False') return False LOGGER.debug('Verifier.check_proof_encoding <<< True') return True
python
{ "resource": "" }
q10990
PublicKey.to_dict
train
def to_dict(self): """ Return dict representation of public key to embed in DID document. """ return { 'id': self.id, 'type': str(self.type.ver_type), 'controller': canon_ref(self.did, self.controller), **self.type.specification(self.value) }
python
{ "resource": "" }
q10991
main
train
async def main(wallet_name: str) -> None: """ Main line for revocation registry builder operating in external process on behalf of issuer agent. :param wallet_name: wallet name - must match that of issuer with existing wallet """ logging.basicConfig(level=logging.WARN, format='%(levelname)-8s | %(name)-12s | %(message)s') logging.getLogger('indy').setLevel(logging.ERROR) path_start = join(RevRegBuilder.dir_tails_sentinel(wallet_name), '.start') with open(path_start, 'r') as fh_start: start_data = json.loads(fh_start.read()) remove(path_start) logging.getLogger(__name__).setLevel(start_data['logging']['level']) for path_log in start_data['logging']['paths']: logging.getLogger(__name__).addHandler(logging.FileHandler(path_log)) wallet = WalletManager().get( { 'id': wallet_name, 'storage_type': start_data['wallet']['storage_type'], **start_data['wallet']['config'], }, access=start_data['wallet']['access_creds'].get('key', None)) async with wallet, RevRegBuilder(wallet, rrbx=True) as rrban: await rrban.serve()
python
{ "resource": "" }
q10992
RevRegBuilder._start_data_json
train
def _start_data_json(self) -> str: """ Output json with start data to write for external revocation registry builder process pickup. :return: logging and wallet init data json """ rv = { 'logging': { 'paths': [] }, 'wallet': { } } logger = LOGGER while not logger.level: logger = logger.parent if logger is None: break rv['logging']['level'] = logger.level logger = LOGGER log_paths = [realpath(h.baseFilename) for h in logger.handlers if hasattr(h, 'baseFilename')] while not log_paths: logger = logger.parent if logger is None: break log_paths = [realpath(h.baseFilename) for h in logger.handlers if hasattr(h, 'baseFilename')] for log_path in log_paths: rv['logging']['paths'].append(log_path) rv['wallet']['storage_type'] = self.wallet.storage_type rv['wallet']['config'] = self.wallet.config rv['wallet']['access_creds'] = self.wallet.access_creds return json.dumps(rv)
python
{ "resource": "" }
q10993
RevRegBuilder._get_state
train
def _get_state(wallet_name: str) -> _STATE: """ Return current state of revocation registry builder process. :param wallet_name: name of wallet for corresponding Issuer :return: current process state as _STATE enum """ dir_sentinel = RevRegBuilder.dir_tails_sentinel(wallet_name) file_pid = join(dir_sentinel, '.pid') file_start = join(dir_sentinel, '.start') file_stop = join(dir_sentinel, '.stop') if isfile(file_stop): return _STATE.STOPPING if isfile(file_start) or isfile(file_pid): return _STATE.RUNNING return _STATE.ABSENT
python
{ "resource": "" }
q10994
RevRegBuilder.dir_tails_top
train
def dir_tails_top(self, rr_id) -> str: """ Return top of tails tree for input rev reg id. :param rr_id: revocation registry identifier :return: top of tails tree """ return join(self.dir_tails_hopper, rr_id) if self.external else self.dir_tails
python
{ "resource": "" }
q10995
RevRegBuilder.dir_tails_target
train
def dir_tails_target(self, rr_id) -> str: """ Return target directory for revocation registry and tails file production. :param rr_id: revocation registry identifier :return: tails target directory """ return join(self.dir_tails_top(rr_id), rev_reg_id2cred_def_id(rr_id))
python
{ "resource": "" }
q10996
RevRegBuilder.mark_in_progress
train
def mark_in_progress(self, rr_id: str, rr_size: int) -> None: """ Prepare sentinel directory for revocation registry construction. :param rr_id: revocation registry identifier :rr_size: size of revocation registry to build """ try: makedirs(join(self._dir_tails_sentinel, rr_id), exist_ok=False) except FileExistsError: LOGGER.warning('Rev reg %s construction already in progress', rr_id) else: open(join(self._dir_tails_sentinel, rr_id, '.{}'.format(rr_size)), 'w').close()
python
{ "resource": "" }
q10997
RevRegBuilder.serve
train
async def serve(self) -> None: """ Write pidfile to sentinel directory if need be, and wait for sentinels to shut down or build revocation registry and tails file. """ LOGGER.debug('RevRegBuilder.serve >>>') assert self.external file_pid = join(self._dir_tails_sentinel, '.pid') if isfile(file_pid): with open(file_pid, 'r') as fh_pid: pid = int(fh_pid.read()) try: kill(pid, 0) except ProcessLookupError: remove(file_pid) LOGGER.info('RevRegBuilder removed derelict .pid file') except PermissionError: LOGGER.info('RevRegBuilder process already running with pid %s: exiting', pid) LOGGER.debug('RevRegBuilder.serve <<<') return else: LOGGER.info('RevRegBuilder process already running with pid %s: exiting', pid) LOGGER.debug('RevRegBuilder.serve <<<') return pid = getpid() with open(file_pid, 'w') as pid_fh: print(str(pid), file=pid_fh) file_stop = join(self._dir_tails_sentinel, '.stop') while True: if isfile(file_stop): # stop now, pick up any pending tasks next invocation remove(file_stop) remove(file_pid) break p_pending = [join(self._dir_tails_sentinel, d) for d in listdir(self._dir_tails_sentinel) if isdir(join(self._dir_tails_sentinel, d))] p_pending = [p for p in p_pending if [s for s in listdir(p) if s.startswith('.')]] # size marker if p_pending: pdir = basename(p_pending[0]) rr_id = pdir rr_size = int([s for s in listdir(p_pending[0]) if s.startswith('.')][0][1:]) open(join(p_pending[0], '.in-progress'), 'w').close() await self.create_rev_reg(rr_id, rr_size or None) rmtree(p_pending[0]) await asyncio.sleep(1) LOGGER.debug('RevRegBuilder.serve <<<')
python
{ "resource": "" }
q10998
RevRegBuilder.stop
train
async def stop(wallet_name: str) -> None: """ Gracefully stop an external revocation registry builder, waiting for its current. The indy-sdk toolkit uses a temporary directory for tails file mustration, and shutting down the toolkit removes the directory, crashing the external tails file write. This method allows a graceful stop to wait for completion of such tasks already in progress. :wallet_name: name external revocation registry builder to check :return: whether a task is pending. """ LOGGER.debug('RevRegBuilder.stop >>>') dir_sentinel = join(RevRegBuilder.dir_tails_sentinel(wallet_name)) if isdir(dir_sentinel): open(join(dir_sentinel, '.stop'), 'w').close() # touch while any(isfile(join(dir_sentinel, d, '.in-progress')) for d in listdir(dir_sentinel)): await asyncio.sleep(1) LOGGER.debug('RevRegBuilder.stop <<<')
python
{ "resource": "" }
q10999
do_ultracache
train
def do_ultracache(parser, token): """Based on Django's default cache template tag""" nodelist = parser.parse(("endultracache",)) parser.delete_first_token() tokens = token.split_contents() if len(tokens) < 3: raise TemplateSyntaxError(""%r" tag requires at least 2 arguments." % tokens[0]) return UltraCacheNode(nodelist, parser.compile_filter(tokens[1]), tokens[2], # fragment_name can"t be a variable. [parser.compile_filter(token) for token in tokens[3:]])
python
{ "resource": "" }