id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
234,100
dhylands/rshell
rshell/main.py
rm
def rm(filename, recursive=False, force=False): """Removes a file or directory tree.""" return auto(remove_file, filename, recursive, force)
python
def rm(filename, recursive=False, force=False): """Removes a file or directory tree.""" return auto(remove_file, filename, recursive, force)
[ "def", "rm", "(", "filename", ",", "recursive", "=", "False", ",", "force", "=", "False", ")", ":", "return", "auto", "(", "remove_file", ",", "filename", ",", "recursive", ",", "force", ")" ]
Removes a file or directory tree.
[ "Removes", "a", "file", "or", "directory", "tree", "." ]
a92a8fa8074ac792241c83c640a51b394667c324
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L834-L836
234,101
dhylands/rshell
rshell/main.py
make_dir
def make_dir(dst_dir, dry_run, print_func, recursed): """Creates a directory. Produces information in case of dry run. Issues error where necessary. """ parent = os.path.split(dst_dir.rstrip('/'))[0] # Check for nonexistent parent parent_files = auto(listdir_stat, parent) if parent else True # Relative dir if dry_run: if recursed: # Assume success: parent not actually created yet print_func("Creating directory {}".format(dst_dir)) elif parent_files is None: print_func("Unable to create {}".format(dst_dir)) return True if not mkdir(dst_dir): print_err("Unable to create {}".format(dst_dir)) return False return True
python
def make_dir(dst_dir, dry_run, print_func, recursed): """Creates a directory. Produces information in case of dry run. Issues error where necessary. """ parent = os.path.split(dst_dir.rstrip('/'))[0] # Check for nonexistent parent parent_files = auto(listdir_stat, parent) if parent else True # Relative dir if dry_run: if recursed: # Assume success: parent not actually created yet print_func("Creating directory {}".format(dst_dir)) elif parent_files is None: print_func("Unable to create {}".format(dst_dir)) return True if not mkdir(dst_dir): print_err("Unable to create {}".format(dst_dir)) return False return True
[ "def", "make_dir", "(", "dst_dir", ",", "dry_run", ",", "print_func", ",", "recursed", ")", ":", "parent", "=", "os", ".", "path", ".", "split", "(", "dst_dir", ".", "rstrip", "(", "'/'", ")", ")", "[", "0", "]", "# Check for nonexistent parent", "parent_files", "=", "auto", "(", "listdir_stat", ",", "parent", ")", "if", "parent", "else", "True", "# Relative dir", "if", "dry_run", ":", "if", "recursed", ":", "# Assume success: parent not actually created yet", "print_func", "(", "\"Creating directory {}\"", ".", "format", "(", "dst_dir", ")", ")", "elif", "parent_files", "is", "None", ":", "print_func", "(", "\"Unable to create {}\"", ".", "format", "(", "dst_dir", ")", ")", "return", "True", "if", "not", "mkdir", "(", "dst_dir", ")", ":", "print_err", "(", "\"Unable to create {}\"", ".", "format", "(", "dst_dir", ")", ")", "return", "False", "return", "True" ]
Creates a directory. Produces information in case of dry run. Issues error where necessary.
[ "Creates", "a", "directory", ".", "Produces", "information", "in", "case", "of", "dry", "run", ".", "Issues", "error", "where", "necessary", "." ]
a92a8fa8074ac792241c83c640a51b394667c324
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L839-L854
234,102
dhylands/rshell
rshell/main.py
recv_file_from_host
def recv_file_from_host(src_file, dst_filename, filesize, dst_mode='wb'): """Function which runs on the pyboard. Matches up with send_file_to_remote.""" import sys import ubinascii if HAS_BUFFER: try: import pyb usb = pyb.USB_VCP() except: try: import machine usb = machine.USB_VCP() except: usb = None if usb and usb.isconnected(): # We don't want 0x03 bytes in the data to be interpreted as a Control-C # This gets reset each time the REPL runs a line, so we don't need to # worry about resetting it ourselves usb.setinterrupt(-1) try: with open(dst_filename, dst_mode) as dst_file: bytes_remaining = filesize if not HAS_BUFFER: bytes_remaining *= 2 # hexlify makes each byte into 2 buf_size = BUFFER_SIZE write_buf = bytearray(buf_size) read_buf = bytearray(buf_size) while bytes_remaining > 0: # Send back an ack as a form of flow control sys.stdout.write('\x06') read_size = min(bytes_remaining, buf_size) buf_remaining = read_size buf_index = 0 while buf_remaining > 0: if HAS_BUFFER: bytes_read = sys.stdin.buffer.readinto(read_buf, read_size) else: bytes_read = sys.stdin.readinto(read_buf, read_size) if bytes_read > 0: write_buf[buf_index:bytes_read] = read_buf[0:bytes_read] buf_index += bytes_read buf_remaining -= bytes_read if HAS_BUFFER: dst_file.write(write_buf[0:read_size]) else: dst_file.write(ubinascii.unhexlify(write_buf[0:read_size])) bytes_remaining -= read_size return True except: return False
python
def recv_file_from_host(src_file, dst_filename, filesize, dst_mode='wb'): """Function which runs on the pyboard. Matches up with send_file_to_remote.""" import sys import ubinascii if HAS_BUFFER: try: import pyb usb = pyb.USB_VCP() except: try: import machine usb = machine.USB_VCP() except: usb = None if usb and usb.isconnected(): # We don't want 0x03 bytes in the data to be interpreted as a Control-C # This gets reset each time the REPL runs a line, so we don't need to # worry about resetting it ourselves usb.setinterrupt(-1) try: with open(dst_filename, dst_mode) as dst_file: bytes_remaining = filesize if not HAS_BUFFER: bytes_remaining *= 2 # hexlify makes each byte into 2 buf_size = BUFFER_SIZE write_buf = bytearray(buf_size) read_buf = bytearray(buf_size) while bytes_remaining > 0: # Send back an ack as a form of flow control sys.stdout.write('\x06') read_size = min(bytes_remaining, buf_size) buf_remaining = read_size buf_index = 0 while buf_remaining > 0: if HAS_BUFFER: bytes_read = sys.stdin.buffer.readinto(read_buf, read_size) else: bytes_read = sys.stdin.readinto(read_buf, read_size) if bytes_read > 0: write_buf[buf_index:bytes_read] = read_buf[0:bytes_read] buf_index += bytes_read buf_remaining -= bytes_read if HAS_BUFFER: dst_file.write(write_buf[0:read_size]) else: dst_file.write(ubinascii.unhexlify(write_buf[0:read_size])) bytes_remaining -= read_size return True except: return False
[ "def", "recv_file_from_host", "(", "src_file", ",", "dst_filename", ",", "filesize", ",", "dst_mode", "=", "'wb'", ")", ":", "import", "sys", "import", "ubinascii", "if", "HAS_BUFFER", ":", "try", ":", "import", "pyb", "usb", "=", "pyb", ".", "USB_VCP", "(", ")", "except", ":", "try", ":", "import", "machine", "usb", "=", "machine", ".", "USB_VCP", "(", ")", "except", ":", "usb", "=", "None", "if", "usb", "and", "usb", ".", "isconnected", "(", ")", ":", "# We don't want 0x03 bytes in the data to be interpreted as a Control-C", "# This gets reset each time the REPL runs a line, so we don't need to", "# worry about resetting it ourselves", "usb", ".", "setinterrupt", "(", "-", "1", ")", "try", ":", "with", "open", "(", "dst_filename", ",", "dst_mode", ")", "as", "dst_file", ":", "bytes_remaining", "=", "filesize", "if", "not", "HAS_BUFFER", ":", "bytes_remaining", "*=", "2", "# hexlify makes each byte into 2", "buf_size", "=", "BUFFER_SIZE", "write_buf", "=", "bytearray", "(", "buf_size", ")", "read_buf", "=", "bytearray", "(", "buf_size", ")", "while", "bytes_remaining", ">", "0", ":", "# Send back an ack as a form of flow control", "sys", ".", "stdout", ".", "write", "(", "'\\x06'", ")", "read_size", "=", "min", "(", "bytes_remaining", ",", "buf_size", ")", "buf_remaining", "=", "read_size", "buf_index", "=", "0", "while", "buf_remaining", ">", "0", ":", "if", "HAS_BUFFER", ":", "bytes_read", "=", "sys", ".", "stdin", ".", "buffer", ".", "readinto", "(", "read_buf", ",", "read_size", ")", "else", ":", "bytes_read", "=", "sys", ".", "stdin", ".", "readinto", "(", "read_buf", ",", "read_size", ")", "if", "bytes_read", ">", "0", ":", "write_buf", "[", "buf_index", ":", "bytes_read", "]", "=", "read_buf", "[", "0", ":", "bytes_read", "]", "buf_index", "+=", "bytes_read", "buf_remaining", "-=", "bytes_read", "if", "HAS_BUFFER", ":", "dst_file", ".", "write", "(", "write_buf", "[", "0", ":", "read_size", "]", ")", "else", ":", "dst_file", ".", "write", "(", "ubinascii", ".", "unhexlify", "(", "write_buf", "[", "0", ":", "read_size", "]", ")", ")", "bytes_remaining", "-=", "read_size", "return", "True", "except", ":", "return", "False" ]
Function which runs on the pyboard. Matches up with send_file_to_remote.
[ "Function", "which", "runs", "on", "the", "pyboard", ".", "Matches", "up", "with", "send_file_to_remote", "." ]
a92a8fa8074ac792241c83c640a51b394667c324
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L968-L1017
234,103
dhylands/rshell
rshell/main.py
send_file_to_remote
def send_file_to_remote(dev, src_file, dst_filename, filesize, dst_mode='wb'): """Intended to be passed to the `remote` function as the xfer_func argument. Matches up with recv_file_from_host. """ bytes_remaining = filesize save_timeout = dev.timeout dev.timeout = 1 while bytes_remaining > 0: # Wait for ack so we don't get too far ahead of the remote ack = dev.read(1) if ack is None or ack != b'\x06': sys.stderr.write("timed out or error in transfer to remote\n") sys.exit(2) if HAS_BUFFER: buf_size = BUFFER_SIZE else: buf_size = BUFFER_SIZE // 2 read_size = min(bytes_remaining, buf_size) buf = src_file.read(read_size) #sys.stdout.write('\r%d/%d' % (filesize - bytes_remaining, filesize)) #sys.stdout.flush() if HAS_BUFFER: dev.write(buf) else: dev.write(binascii.hexlify(buf)) bytes_remaining -= read_size #sys.stdout.write('\r') dev.timeout = save_timeout
python
def send_file_to_remote(dev, src_file, dst_filename, filesize, dst_mode='wb'): """Intended to be passed to the `remote` function as the xfer_func argument. Matches up with recv_file_from_host. """ bytes_remaining = filesize save_timeout = dev.timeout dev.timeout = 1 while bytes_remaining > 0: # Wait for ack so we don't get too far ahead of the remote ack = dev.read(1) if ack is None or ack != b'\x06': sys.stderr.write("timed out or error in transfer to remote\n") sys.exit(2) if HAS_BUFFER: buf_size = BUFFER_SIZE else: buf_size = BUFFER_SIZE // 2 read_size = min(bytes_remaining, buf_size) buf = src_file.read(read_size) #sys.stdout.write('\r%d/%d' % (filesize - bytes_remaining, filesize)) #sys.stdout.flush() if HAS_BUFFER: dev.write(buf) else: dev.write(binascii.hexlify(buf)) bytes_remaining -= read_size #sys.stdout.write('\r') dev.timeout = save_timeout
[ "def", "send_file_to_remote", "(", "dev", ",", "src_file", ",", "dst_filename", ",", "filesize", ",", "dst_mode", "=", "'wb'", ")", ":", "bytes_remaining", "=", "filesize", "save_timeout", "=", "dev", ".", "timeout", "dev", ".", "timeout", "=", "1", "while", "bytes_remaining", ">", "0", ":", "# Wait for ack so we don't get too far ahead of the remote", "ack", "=", "dev", ".", "read", "(", "1", ")", "if", "ack", "is", "None", "or", "ack", "!=", "b'\\x06'", ":", "sys", ".", "stderr", ".", "write", "(", "\"timed out or error in transfer to remote\\n\"", ")", "sys", ".", "exit", "(", "2", ")", "if", "HAS_BUFFER", ":", "buf_size", "=", "BUFFER_SIZE", "else", ":", "buf_size", "=", "BUFFER_SIZE", "//", "2", "read_size", "=", "min", "(", "bytes_remaining", ",", "buf_size", ")", "buf", "=", "src_file", ".", "read", "(", "read_size", ")", "#sys.stdout.write('\\r%d/%d' % (filesize - bytes_remaining, filesize))", "#sys.stdout.flush()", "if", "HAS_BUFFER", ":", "dev", ".", "write", "(", "buf", ")", "else", ":", "dev", ".", "write", "(", "binascii", ".", "hexlify", "(", "buf", ")", ")", "bytes_remaining", "-=", "read_size", "#sys.stdout.write('\\r')", "dev", ".", "timeout", "=", "save_timeout" ]
Intended to be passed to the `remote` function as the xfer_func argument. Matches up with recv_file_from_host.
[ "Intended", "to", "be", "passed", "to", "the", "remote", "function", "as", "the", "xfer_func", "argument", ".", "Matches", "up", "with", "recv_file_from_host", "." ]
a92a8fa8074ac792241c83c640a51b394667c324
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L1020-L1048
234,104
dhylands/rshell
rshell/main.py
recv_file_from_remote
def recv_file_from_remote(dev, src_filename, dst_file, filesize): """Intended to be passed to the `remote` function as the xfer_func argument. Matches up with send_file_to_host. """ bytes_remaining = filesize if not HAS_BUFFER: bytes_remaining *= 2 # hexlify makes each byte into 2 buf_size = BUFFER_SIZE write_buf = bytearray(buf_size) while bytes_remaining > 0: read_size = min(bytes_remaining, buf_size) buf_remaining = read_size buf_index = 0 while buf_remaining > 0: read_buf = dev.read(buf_remaining) bytes_read = len(read_buf) if bytes_read: write_buf[buf_index:bytes_read] = read_buf[0:bytes_read] buf_index += bytes_read buf_remaining -= bytes_read if HAS_BUFFER: dst_file.write(write_buf[0:read_size]) else: dst_file.write(binascii.unhexlify(write_buf[0:read_size])) # Send an ack to the remote as a form of flow control dev.write(b'\x06') # ASCII ACK is 0x06 bytes_remaining -= read_size
python
def recv_file_from_remote(dev, src_filename, dst_file, filesize): """Intended to be passed to the `remote` function as the xfer_func argument. Matches up with send_file_to_host. """ bytes_remaining = filesize if not HAS_BUFFER: bytes_remaining *= 2 # hexlify makes each byte into 2 buf_size = BUFFER_SIZE write_buf = bytearray(buf_size) while bytes_remaining > 0: read_size = min(bytes_remaining, buf_size) buf_remaining = read_size buf_index = 0 while buf_remaining > 0: read_buf = dev.read(buf_remaining) bytes_read = len(read_buf) if bytes_read: write_buf[buf_index:bytes_read] = read_buf[0:bytes_read] buf_index += bytes_read buf_remaining -= bytes_read if HAS_BUFFER: dst_file.write(write_buf[0:read_size]) else: dst_file.write(binascii.unhexlify(write_buf[0:read_size])) # Send an ack to the remote as a form of flow control dev.write(b'\x06') # ASCII ACK is 0x06 bytes_remaining -= read_size
[ "def", "recv_file_from_remote", "(", "dev", ",", "src_filename", ",", "dst_file", ",", "filesize", ")", ":", "bytes_remaining", "=", "filesize", "if", "not", "HAS_BUFFER", ":", "bytes_remaining", "*=", "2", "# hexlify makes each byte into 2", "buf_size", "=", "BUFFER_SIZE", "write_buf", "=", "bytearray", "(", "buf_size", ")", "while", "bytes_remaining", ">", "0", ":", "read_size", "=", "min", "(", "bytes_remaining", ",", "buf_size", ")", "buf_remaining", "=", "read_size", "buf_index", "=", "0", "while", "buf_remaining", ">", "0", ":", "read_buf", "=", "dev", ".", "read", "(", "buf_remaining", ")", "bytes_read", "=", "len", "(", "read_buf", ")", "if", "bytes_read", ":", "write_buf", "[", "buf_index", ":", "bytes_read", "]", "=", "read_buf", "[", "0", ":", "bytes_read", "]", "buf_index", "+=", "bytes_read", "buf_remaining", "-=", "bytes_read", "if", "HAS_BUFFER", ":", "dst_file", ".", "write", "(", "write_buf", "[", "0", ":", "read_size", "]", ")", "else", ":", "dst_file", ".", "write", "(", "binascii", ".", "unhexlify", "(", "write_buf", "[", "0", ":", "read_size", "]", ")", ")", "# Send an ack to the remote as a form of flow control", "dev", ".", "write", "(", "b'\\x06'", ")", "# ASCII ACK is 0x06", "bytes_remaining", "-=", "read_size" ]
Intended to be passed to the `remote` function as the xfer_func argument. Matches up with send_file_to_host.
[ "Intended", "to", "be", "passed", "to", "the", "remote", "function", "as", "the", "xfer_func", "argument", ".", "Matches", "up", "with", "send_file_to_host", "." ]
a92a8fa8074ac792241c83c640a51b394667c324
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L1051-L1077
234,105
dhylands/rshell
rshell/main.py
send_file_to_host
def send_file_to_host(src_filename, dst_file, filesize): """Function which runs on the pyboard. Matches up with recv_file_from_remote.""" import sys import ubinascii try: with open(src_filename, 'rb') as src_file: bytes_remaining = filesize if HAS_BUFFER: buf_size = BUFFER_SIZE else: buf_size = BUFFER_SIZE // 2 while bytes_remaining > 0: read_size = min(bytes_remaining, buf_size) buf = src_file.read(read_size) if HAS_BUFFER: sys.stdout.buffer.write(buf) else: sys.stdout.write(ubinascii.hexlify(buf)) bytes_remaining -= read_size # Wait for an ack so we don't get ahead of the remote while True: char = sys.stdin.read(1) if char: if char == '\x06': break # This should only happen if an error occurs sys.stdout.write(char) return True except: return False
python
def send_file_to_host(src_filename, dst_file, filesize): """Function which runs on the pyboard. Matches up with recv_file_from_remote.""" import sys import ubinascii try: with open(src_filename, 'rb') as src_file: bytes_remaining = filesize if HAS_BUFFER: buf_size = BUFFER_SIZE else: buf_size = BUFFER_SIZE // 2 while bytes_remaining > 0: read_size = min(bytes_remaining, buf_size) buf = src_file.read(read_size) if HAS_BUFFER: sys.stdout.buffer.write(buf) else: sys.stdout.write(ubinascii.hexlify(buf)) bytes_remaining -= read_size # Wait for an ack so we don't get ahead of the remote while True: char = sys.stdin.read(1) if char: if char == '\x06': break # This should only happen if an error occurs sys.stdout.write(char) return True except: return False
[ "def", "send_file_to_host", "(", "src_filename", ",", "dst_file", ",", "filesize", ")", ":", "import", "sys", "import", "ubinascii", "try", ":", "with", "open", "(", "src_filename", ",", "'rb'", ")", "as", "src_file", ":", "bytes_remaining", "=", "filesize", "if", "HAS_BUFFER", ":", "buf_size", "=", "BUFFER_SIZE", "else", ":", "buf_size", "=", "BUFFER_SIZE", "//", "2", "while", "bytes_remaining", ">", "0", ":", "read_size", "=", "min", "(", "bytes_remaining", ",", "buf_size", ")", "buf", "=", "src_file", ".", "read", "(", "read_size", ")", "if", "HAS_BUFFER", ":", "sys", ".", "stdout", ".", "buffer", ".", "write", "(", "buf", ")", "else", ":", "sys", ".", "stdout", ".", "write", "(", "ubinascii", ".", "hexlify", "(", "buf", ")", ")", "bytes_remaining", "-=", "read_size", "# Wait for an ack so we don't get ahead of the remote", "while", "True", ":", "char", "=", "sys", ".", "stdin", ".", "read", "(", "1", ")", "if", "char", ":", "if", "char", "==", "'\\x06'", ":", "break", "# This should only happen if an error occurs", "sys", ".", "stdout", ".", "write", "(", "char", ")", "return", "True", "except", ":", "return", "False" ]
Function which runs on the pyboard. Matches up with recv_file_from_remote.
[ "Function", "which", "runs", "on", "the", "pyboard", ".", "Matches", "up", "with", "recv_file_from_remote", "." ]
a92a8fa8074ac792241c83c640a51b394667c324
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L1080-L1109
234,106
dhylands/rshell
rshell/main.py
print_cols
def print_cols(words, print_func, termwidth=79): """Takes a single column of words, and prints it as multiple columns that will fit in termwidth columns. """ width = max([word_len(word) for word in words]) nwords = len(words) ncols = max(1, (termwidth + 1) // (width + 1)) nrows = (nwords + ncols - 1) // ncols for row in range(nrows): for i in range(row, nwords, nrows): word = words[i] if word[0] == '\x1b': print_func('%-*s' % (width + 11, words[i]), end='\n' if i + nrows >= nwords else ' ') else: print_func('%-*s' % (width, words[i]), end='\n' if i + nrows >= nwords else ' ')
python
def print_cols(words, print_func, termwidth=79): """Takes a single column of words, and prints it as multiple columns that will fit in termwidth columns. """ width = max([word_len(word) for word in words]) nwords = len(words) ncols = max(1, (termwidth + 1) // (width + 1)) nrows = (nwords + ncols - 1) // ncols for row in range(nrows): for i in range(row, nwords, nrows): word = words[i] if word[0] == '\x1b': print_func('%-*s' % (width + 11, words[i]), end='\n' if i + nrows >= nwords else ' ') else: print_func('%-*s' % (width, words[i]), end='\n' if i + nrows >= nwords else ' ')
[ "def", "print_cols", "(", "words", ",", "print_func", ",", "termwidth", "=", "79", ")", ":", "width", "=", "max", "(", "[", "word_len", "(", "word", ")", "for", "word", "in", "words", "]", ")", "nwords", "=", "len", "(", "words", ")", "ncols", "=", "max", "(", "1", ",", "(", "termwidth", "+", "1", ")", "//", "(", "width", "+", "1", ")", ")", "nrows", "=", "(", "nwords", "+", "ncols", "-", "1", ")", "//", "ncols", "for", "row", "in", "range", "(", "nrows", ")", ":", "for", "i", "in", "range", "(", "row", ",", "nwords", ",", "nrows", ")", ":", "word", "=", "words", "[", "i", "]", "if", "word", "[", "0", "]", "==", "'\\x1b'", ":", "print_func", "(", "'%-*s'", "%", "(", "width", "+", "11", ",", "words", "[", "i", "]", ")", ",", "end", "=", "'\\n'", "if", "i", "+", "nrows", ">=", "nwords", "else", "' '", ")", "else", ":", "print_func", "(", "'%-*s'", "%", "(", "width", ",", "words", "[", "i", "]", ")", ",", "end", "=", "'\\n'", "if", "i", "+", "nrows", ">=", "nwords", "else", "' '", ")" ]
Takes a single column of words, and prints it as multiple columns that will fit in termwidth columns.
[ "Takes", "a", "single", "column", "of", "words", "and", "prints", "it", "as", "multiple", "columns", "that", "will", "fit", "in", "termwidth", "columns", "." ]
a92a8fa8074ac792241c83c640a51b394667c324
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L1190-L1206
234,107
dhylands/rshell
rshell/main.py
print_long
def print_long(filename, stat, print_func): """Prints detailed information about the file passed in.""" size = stat_size(stat) mtime = stat_mtime(stat) file_mtime = time.localtime(mtime) curr_time = time.time() if mtime > (curr_time + SIX_MONTHS) or mtime < (curr_time - SIX_MONTHS): print_func('%6d %s %2d %04d %s' % (size, MONTH[file_mtime[1]], file_mtime[2], file_mtime[0], decorated_filename(filename, stat))) else: print_func('%6d %s %2d %02d:%02d %s' % (size, MONTH[file_mtime[1]], file_mtime[2], file_mtime[3], file_mtime[4], decorated_filename(filename, stat)))
python
def print_long(filename, stat, print_func): """Prints detailed information about the file passed in.""" size = stat_size(stat) mtime = stat_mtime(stat) file_mtime = time.localtime(mtime) curr_time = time.time() if mtime > (curr_time + SIX_MONTHS) or mtime < (curr_time - SIX_MONTHS): print_func('%6d %s %2d %04d %s' % (size, MONTH[file_mtime[1]], file_mtime[2], file_mtime[0], decorated_filename(filename, stat))) else: print_func('%6d %s %2d %02d:%02d %s' % (size, MONTH[file_mtime[1]], file_mtime[2], file_mtime[3], file_mtime[4], decorated_filename(filename, stat)))
[ "def", "print_long", "(", "filename", ",", "stat", ",", "print_func", ")", ":", "size", "=", "stat_size", "(", "stat", ")", "mtime", "=", "stat_mtime", "(", "stat", ")", "file_mtime", "=", "time", ".", "localtime", "(", "mtime", ")", "curr_time", "=", "time", ".", "time", "(", ")", "if", "mtime", ">", "(", "curr_time", "+", "SIX_MONTHS", ")", "or", "mtime", "<", "(", "curr_time", "-", "SIX_MONTHS", ")", ":", "print_func", "(", "'%6d %s %2d %04d %s'", "%", "(", "size", ",", "MONTH", "[", "file_mtime", "[", "1", "]", "]", ",", "file_mtime", "[", "2", "]", ",", "file_mtime", "[", "0", "]", ",", "decorated_filename", "(", "filename", ",", "stat", ")", ")", ")", "else", ":", "print_func", "(", "'%6d %s %2d %02d:%02d %s'", "%", "(", "size", ",", "MONTH", "[", "file_mtime", "[", "1", "]", "]", ",", "file_mtime", "[", "2", "]", ",", "file_mtime", "[", "3", "]", ",", "file_mtime", "[", "4", "]", ",", "decorated_filename", "(", "filename", ",", "stat", ")", ")", ")" ]
Prints detailed information about the file passed in.
[ "Prints", "detailed", "information", "about", "the", "file", "passed", "in", "." ]
a92a8fa8074ac792241c83c640a51b394667c324
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L1224-L1237
234,108
dhylands/rshell
rshell/main.py
connect
def connect(port, baud=115200, user='micro', password='python', wait=0): """Tries to connect automagically via network or serial.""" try: ip_address = socket.gethostbyname(port) #print('Connecting to ip', ip_address) connect_telnet(port, ip_address, user=user, password=password) except socket.gaierror: # Doesn't look like a hostname or IP-address, assume its a serial port #print('connecting to serial', port) connect_serial(port, baud=baud, wait=wait)
python
def connect(port, baud=115200, user='micro', password='python', wait=0): """Tries to connect automagically via network or serial.""" try: ip_address = socket.gethostbyname(port) #print('Connecting to ip', ip_address) connect_telnet(port, ip_address, user=user, password=password) except socket.gaierror: # Doesn't look like a hostname or IP-address, assume its a serial port #print('connecting to serial', port) connect_serial(port, baud=baud, wait=wait)
[ "def", "connect", "(", "port", ",", "baud", "=", "115200", ",", "user", "=", "'micro'", ",", "password", "=", "'python'", ",", "wait", "=", "0", ")", ":", "try", ":", "ip_address", "=", "socket", ".", "gethostbyname", "(", "port", ")", "#print('Connecting to ip', ip_address)", "connect_telnet", "(", "port", ",", "ip_address", ",", "user", "=", "user", ",", "password", "=", "password", ")", "except", "socket", ".", "gaierror", ":", "# Doesn't look like a hostname or IP-address, assume its a serial port", "#print('connecting to serial', port)", "connect_serial", "(", "port", ",", "baud", "=", "baud", ",", "wait", "=", "wait", ")" ]
Tries to connect automagically via network or serial.
[ "Tries", "to", "connect", "automagically", "via", "network", "or", "serial", "." ]
a92a8fa8074ac792241c83c640a51b394667c324
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L1276-L1285
234,109
dhylands/rshell
rshell/main.py
connect_telnet
def connect_telnet(name, ip_address=None, user='micro', password='python'): """Connect to a MicroPython board via telnet.""" if ip_address is None: try: ip_address = socket.gethostbyname(name) except socket.gaierror: ip_address = name if not QUIET: if name == ip_address: print('Connecting to (%s) ...' % ip_address) else: print('Connecting to %s (%s) ...' % (name, ip_address)) dev = DeviceNet(name, ip_address, user, password) add_device(dev)
python
def connect_telnet(name, ip_address=None, user='micro', password='python'): """Connect to a MicroPython board via telnet.""" if ip_address is None: try: ip_address = socket.gethostbyname(name) except socket.gaierror: ip_address = name if not QUIET: if name == ip_address: print('Connecting to (%s) ...' % ip_address) else: print('Connecting to %s (%s) ...' % (name, ip_address)) dev = DeviceNet(name, ip_address, user, password) add_device(dev)
[ "def", "connect_telnet", "(", "name", ",", "ip_address", "=", "None", ",", "user", "=", "'micro'", ",", "password", "=", "'python'", ")", ":", "if", "ip_address", "is", "None", ":", "try", ":", "ip_address", "=", "socket", ".", "gethostbyname", "(", "name", ")", "except", "socket", ".", "gaierror", ":", "ip_address", "=", "name", "if", "not", "QUIET", ":", "if", "name", "==", "ip_address", ":", "print", "(", "'Connecting to (%s) ...'", "%", "ip_address", ")", "else", ":", "print", "(", "'Connecting to %s (%s) ...'", "%", "(", "name", ",", "ip_address", ")", ")", "dev", "=", "DeviceNet", "(", "name", ",", "ip_address", ",", "user", ",", "password", ")", "add_device", "(", "dev", ")" ]
Connect to a MicroPython board via telnet.
[ "Connect", "to", "a", "MicroPython", "board", "via", "telnet", "." ]
a92a8fa8074ac792241c83c640a51b394667c324
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L1288-L1301
234,110
dhylands/rshell
rshell/main.py
connect_serial
def connect_serial(port, baud=115200, wait=0): """Connect to a MicroPython board via a serial port.""" if not QUIET: print('Connecting to %s (buffer-size %d)...' % (port, BUFFER_SIZE)) try: dev = DeviceSerial(port, baud, wait) except DeviceError as err: sys.stderr.write(str(err)) sys.stderr.write('\n') return False add_device(dev) return True
python
def connect_serial(port, baud=115200, wait=0): """Connect to a MicroPython board via a serial port.""" if not QUIET: print('Connecting to %s (buffer-size %d)...' % (port, BUFFER_SIZE)) try: dev = DeviceSerial(port, baud, wait) except DeviceError as err: sys.stderr.write(str(err)) sys.stderr.write('\n') return False add_device(dev) return True
[ "def", "connect_serial", "(", "port", ",", "baud", "=", "115200", ",", "wait", "=", "0", ")", ":", "if", "not", "QUIET", ":", "print", "(", "'Connecting to %s (buffer-size %d)...'", "%", "(", "port", ",", "BUFFER_SIZE", ")", ")", "try", ":", "dev", "=", "DeviceSerial", "(", "port", ",", "baud", ",", "wait", ")", "except", "DeviceError", "as", "err", ":", "sys", ".", "stderr", ".", "write", "(", "str", "(", "err", ")", ")", "sys", ".", "stderr", ".", "write", "(", "'\\n'", ")", "return", "False", "add_device", "(", "dev", ")", "return", "True" ]
Connect to a MicroPython board via a serial port.
[ "Connect", "to", "a", "MicroPython", "board", "via", "a", "serial", "port", "." ]
a92a8fa8074ac792241c83c640a51b394667c324
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L1304-L1315
234,111
dhylands/rshell
rshell/main.py
main
def main(): """This main function saves the stdin termios settings, calls real_main, and restores stdin termios settings when it returns. """ save_settings = None stdin_fd = -1 try: import termios stdin_fd = sys.stdin.fileno() save_settings = termios.tcgetattr(stdin_fd) except: pass try: real_main() finally: if save_settings: termios.tcsetattr(stdin_fd, termios.TCSANOW, save_settings)
python
def main(): """This main function saves the stdin termios settings, calls real_main, and restores stdin termios settings when it returns. """ save_settings = None stdin_fd = -1 try: import termios stdin_fd = sys.stdin.fileno() save_settings = termios.tcgetattr(stdin_fd) except: pass try: real_main() finally: if save_settings: termios.tcsetattr(stdin_fd, termios.TCSANOW, save_settings)
[ "def", "main", "(", ")", ":", "save_settings", "=", "None", "stdin_fd", "=", "-", "1", "try", ":", "import", "termios", "stdin_fd", "=", "sys", ".", "stdin", ".", "fileno", "(", ")", "save_settings", "=", "termios", ".", "tcgetattr", "(", "stdin_fd", ")", "except", ":", "pass", "try", ":", "real_main", "(", ")", "finally", ":", "if", "save_settings", ":", "termios", ".", "tcsetattr", "(", "stdin_fd", ",", "termios", ".", "TCSANOW", ",", "save_settings", ")" ]
This main function saves the stdin termios settings, calls real_main, and restores stdin termios settings when it returns.
[ "This", "main", "function", "saves", "the", "stdin", "termios", "settings", "calls", "real_main", "and", "restores", "stdin", "termios", "settings", "when", "it", "returns", "." ]
a92a8fa8074ac792241c83c640a51b394667c324
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L2888-L2904
234,112
dhylands/rshell
rshell/main.py
Device.close
def close(self): """Closes the serial port.""" if self.pyb and self.pyb.serial: self.pyb.serial.close() self.pyb = None
python
def close(self): """Closes the serial port.""" if self.pyb and self.pyb.serial: self.pyb.serial.close() self.pyb = None
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "pyb", "and", "self", ".", "pyb", ".", "serial", ":", "self", ".", "pyb", ".", "serial", ".", "close", "(", ")", "self", ".", "pyb", "=", "None" ]
Closes the serial port.
[ "Closes", "the", "serial", "port", "." ]
a92a8fa8074ac792241c83c640a51b394667c324
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L1398-L1402
234,113
dhylands/rshell
rshell/main.py
Device.is_root_path
def is_root_path(self, filename): """Determines if 'filename' corresponds to a directory on this device.""" test_filename = filename + '/' for root_dir in self.root_dirs: if test_filename.startswith(root_dir): return True return False
python
def is_root_path(self, filename): """Determines if 'filename' corresponds to a directory on this device.""" test_filename = filename + '/' for root_dir in self.root_dirs: if test_filename.startswith(root_dir): return True return False
[ "def", "is_root_path", "(", "self", ",", "filename", ")", ":", "test_filename", "=", "filename", "+", "'/'", "for", "root_dir", "in", "self", ".", "root_dirs", ":", "if", "test_filename", ".", "startswith", "(", "root_dir", ")", ":", "return", "True", "return", "False" ]
Determines if 'filename' corresponds to a directory on this device.
[ "Determines", "if", "filename", "corresponds", "to", "a", "directory", "on", "this", "device", "." ]
a92a8fa8074ac792241c83c640a51b394667c324
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L1407-L1413
234,114
dhylands/rshell
rshell/main.py
Device.read
def read(self, num_bytes): """Reads data from the pyboard over the serial port.""" self.check_pyb() try: return self.pyb.serial.read(num_bytes) except (serial.serialutil.SerialException, TypeError): # Write failed - assume that we got disconnected self.close() raise DeviceError('serial port %s closed' % self.dev_name_short)
python
def read(self, num_bytes): """Reads data from the pyboard over the serial port.""" self.check_pyb() try: return self.pyb.serial.read(num_bytes) except (serial.serialutil.SerialException, TypeError): # Write failed - assume that we got disconnected self.close() raise DeviceError('serial port %s closed' % self.dev_name_short)
[ "def", "read", "(", "self", ",", "num_bytes", ")", ":", "self", ".", "check_pyb", "(", ")", "try", ":", "return", "self", ".", "pyb", ".", "serial", ".", "read", "(", "num_bytes", ")", "except", "(", "serial", ".", "serialutil", ".", "SerialException", ",", "TypeError", ")", ":", "# Write failed - assume that we got disconnected", "self", ".", "close", "(", ")", "raise", "DeviceError", "(", "'serial port %s closed'", "%", "self", ".", "dev_name_short", ")" ]
Reads data from the pyboard over the serial port.
[ "Reads", "data", "from", "the", "pyboard", "over", "the", "serial", "port", "." ]
a92a8fa8074ac792241c83c640a51b394667c324
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L1418-L1426
234,115
dhylands/rshell
rshell/main.py
Device.remote
def remote(self, func, *args, xfer_func=None, **kwargs): """Calls func with the indicated args on the micropython board.""" global HAS_BUFFER HAS_BUFFER = self.has_buffer if hasattr(func, 'extra_funcs'): func_name = func.name func_lines = [] for extra_func in func.extra_funcs: func_lines += inspect.getsource(extra_func).split('\n') func_lines += [''] func_lines += filter(lambda line: line[:1] != '@', func.source.split('\n')) func_src = '\n'.join(func_lines) else: func_name = func.__name__ func_src = inspect.getsource(func) args_arr = [remote_repr(i) for i in args] kwargs_arr = ["{}={}".format(k, remote_repr(v)) for k, v in kwargs.items()] func_src += 'output = ' + func_name + '(' func_src += ', '.join(args_arr + kwargs_arr) func_src += ')\n' func_src += 'if output is None:\n' func_src += ' print("None")\n' func_src += 'else:\n' func_src += ' print(output)\n' time_offset = self.time_offset if self.adjust_for_timezone: time_offset -= time.localtime().tm_gmtoff func_src = func_src.replace('TIME_OFFSET', '{}'.format(time_offset)) func_src = func_src.replace('HAS_BUFFER', '{}'.format(HAS_BUFFER)) func_src = func_src.replace('BUFFER_SIZE', '{}'.format(BUFFER_SIZE)) func_src = func_src.replace('IS_UPY', 'True') if DEBUG: print('----- About to send %d bytes of code to the pyboard -----' % len(func_src)) print(func_src) print('-----') self.check_pyb() try: self.pyb.enter_raw_repl() self.check_pyb() output = self.pyb.exec_raw_no_follow(func_src) if xfer_func: xfer_func(self, *args, **kwargs) self.check_pyb() output, _ = self.pyb.follow(timeout=20) self.check_pyb() self.pyb.exit_raw_repl() except (serial.serialutil.SerialException, TypeError): self.close() raise DeviceError('serial port %s closed' % self.dev_name_short) if DEBUG: print('-----Response-----') print(output) print('-----') return output
python
def remote(self, func, *args, xfer_func=None, **kwargs): """Calls func with the indicated args on the micropython board.""" global HAS_BUFFER HAS_BUFFER = self.has_buffer if hasattr(func, 'extra_funcs'): func_name = func.name func_lines = [] for extra_func in func.extra_funcs: func_lines += inspect.getsource(extra_func).split('\n') func_lines += [''] func_lines += filter(lambda line: line[:1] != '@', func.source.split('\n')) func_src = '\n'.join(func_lines) else: func_name = func.__name__ func_src = inspect.getsource(func) args_arr = [remote_repr(i) for i in args] kwargs_arr = ["{}={}".format(k, remote_repr(v)) for k, v in kwargs.items()] func_src += 'output = ' + func_name + '(' func_src += ', '.join(args_arr + kwargs_arr) func_src += ')\n' func_src += 'if output is None:\n' func_src += ' print("None")\n' func_src += 'else:\n' func_src += ' print(output)\n' time_offset = self.time_offset if self.adjust_for_timezone: time_offset -= time.localtime().tm_gmtoff func_src = func_src.replace('TIME_OFFSET', '{}'.format(time_offset)) func_src = func_src.replace('HAS_BUFFER', '{}'.format(HAS_BUFFER)) func_src = func_src.replace('BUFFER_SIZE', '{}'.format(BUFFER_SIZE)) func_src = func_src.replace('IS_UPY', 'True') if DEBUG: print('----- About to send %d bytes of code to the pyboard -----' % len(func_src)) print(func_src) print('-----') self.check_pyb() try: self.pyb.enter_raw_repl() self.check_pyb() output = self.pyb.exec_raw_no_follow(func_src) if xfer_func: xfer_func(self, *args, **kwargs) self.check_pyb() output, _ = self.pyb.follow(timeout=20) self.check_pyb() self.pyb.exit_raw_repl() except (serial.serialutil.SerialException, TypeError): self.close() raise DeviceError('serial port %s closed' % self.dev_name_short) if DEBUG: print('-----Response-----') print(output) print('-----') return output
[ "def", "remote", "(", "self", ",", "func", ",", "*", "args", ",", "xfer_func", "=", "None", ",", "*", "*", "kwargs", ")", ":", "global", "HAS_BUFFER", "HAS_BUFFER", "=", "self", ".", "has_buffer", "if", "hasattr", "(", "func", ",", "'extra_funcs'", ")", ":", "func_name", "=", "func", ".", "name", "func_lines", "=", "[", "]", "for", "extra_func", "in", "func", ".", "extra_funcs", ":", "func_lines", "+=", "inspect", ".", "getsource", "(", "extra_func", ")", ".", "split", "(", "'\\n'", ")", "func_lines", "+=", "[", "''", "]", "func_lines", "+=", "filter", "(", "lambda", "line", ":", "line", "[", ":", "1", "]", "!=", "'@'", ",", "func", ".", "source", ".", "split", "(", "'\\n'", ")", ")", "func_src", "=", "'\\n'", ".", "join", "(", "func_lines", ")", "else", ":", "func_name", "=", "func", ".", "__name__", "func_src", "=", "inspect", ".", "getsource", "(", "func", ")", "args_arr", "=", "[", "remote_repr", "(", "i", ")", "for", "i", "in", "args", "]", "kwargs_arr", "=", "[", "\"{}={}\"", ".", "format", "(", "k", ",", "remote_repr", "(", "v", ")", ")", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", "]", "func_src", "+=", "'output = '", "+", "func_name", "+", "'('", "func_src", "+=", "', '", ".", "join", "(", "args_arr", "+", "kwargs_arr", ")", "func_src", "+=", "')\\n'", "func_src", "+=", "'if output is None:\\n'", "func_src", "+=", "' print(\"None\")\\n'", "func_src", "+=", "'else:\\n'", "func_src", "+=", "' print(output)\\n'", "time_offset", "=", "self", ".", "time_offset", "if", "self", ".", "adjust_for_timezone", ":", "time_offset", "-=", "time", ".", "localtime", "(", ")", ".", "tm_gmtoff", "func_src", "=", "func_src", ".", "replace", "(", "'TIME_OFFSET'", ",", "'{}'", ".", "format", "(", "time_offset", ")", ")", "func_src", "=", "func_src", ".", "replace", "(", "'HAS_BUFFER'", ",", "'{}'", ".", "format", "(", "HAS_BUFFER", ")", ")", "func_src", "=", "func_src", ".", "replace", "(", "'BUFFER_SIZE'", ",", "'{}'", ".", "format", "(", "BUFFER_SIZE", ")", ")", "func_src", "=", "func_src", ".", "replace", "(", "'IS_UPY'", ",", "'True'", ")", "if", "DEBUG", ":", "print", "(", "'----- About to send %d bytes of code to the pyboard -----'", "%", "len", "(", "func_src", ")", ")", "print", "(", "func_src", ")", "print", "(", "'-----'", ")", "self", ".", "check_pyb", "(", ")", "try", ":", "self", ".", "pyb", ".", "enter_raw_repl", "(", ")", "self", ".", "check_pyb", "(", ")", "output", "=", "self", ".", "pyb", ".", "exec_raw_no_follow", "(", "func_src", ")", "if", "xfer_func", ":", "xfer_func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "check_pyb", "(", ")", "output", ",", "_", "=", "self", ".", "pyb", ".", "follow", "(", "timeout", "=", "20", ")", "self", ".", "check_pyb", "(", ")", "self", ".", "pyb", ".", "exit_raw_repl", "(", ")", "except", "(", "serial", ".", "serialutil", ".", "SerialException", ",", "TypeError", ")", ":", "self", ".", "close", "(", ")", "raise", "DeviceError", "(", "'serial port %s closed'", "%", "self", ".", "dev_name_short", ")", "if", "DEBUG", ":", "print", "(", "'-----Response-----'", ")", "print", "(", "output", ")", "print", "(", "'-----'", ")", "return", "output" ]
Calls func with the indicated args on the micropython board.
[ "Calls", "func", "with", "the", "indicated", "args", "on", "the", "micropython", "board", "." ]
a92a8fa8074ac792241c83c640a51b394667c324
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L1428-L1481
234,116
dhylands/rshell
rshell/main.py
Device.sync_time
def sync_time(self): """Sets the time on the pyboard to match the time on the host.""" now = time.localtime(time.time()) self.remote(set_time, (now.tm_year, now.tm_mon, now.tm_mday, now.tm_wday + 1, now.tm_hour, now.tm_min, now.tm_sec, 0)) return now
python
def sync_time(self): """Sets the time on the pyboard to match the time on the host.""" now = time.localtime(time.time()) self.remote(set_time, (now.tm_year, now.tm_mon, now.tm_mday, now.tm_wday + 1, now.tm_hour, now.tm_min, now.tm_sec, 0)) return now
[ "def", "sync_time", "(", "self", ")", ":", "now", "=", "time", ".", "localtime", "(", "time", ".", "time", "(", ")", ")", "self", ".", "remote", "(", "set_time", ",", "(", "now", ".", "tm_year", ",", "now", ".", "tm_mon", ",", "now", ".", "tm_mday", ",", "now", ".", "tm_wday", "+", "1", ",", "now", ".", "tm_hour", ",", "now", ".", "tm_min", ",", "now", ".", "tm_sec", ",", "0", ")", ")", "return", "now" ]
Sets the time on the pyboard to match the time on the host.
[ "Sets", "the", "time", "on", "the", "pyboard", "to", "match", "the", "time", "on", "the", "host", "." ]
a92a8fa8074ac792241c83c640a51b394667c324
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L1506-L1511
234,117
dhylands/rshell
rshell/main.py
Device.write
def write(self, buf): """Writes data to the pyboard over the serial port.""" self.check_pyb() try: return self.pyb.serial.write(buf) except (serial.serialutil.SerialException, BrokenPipeError, TypeError): # Write failed - assume that we got disconnected self.close() raise DeviceError('{} closed'.format(self.dev_name_short))
python
def write(self, buf): """Writes data to the pyboard over the serial port.""" self.check_pyb() try: return self.pyb.serial.write(buf) except (serial.serialutil.SerialException, BrokenPipeError, TypeError): # Write failed - assume that we got disconnected self.close() raise DeviceError('{} closed'.format(self.dev_name_short))
[ "def", "write", "(", "self", ",", "buf", ")", ":", "self", ".", "check_pyb", "(", ")", "try", ":", "return", "self", ".", "pyb", ".", "serial", ".", "write", "(", "buf", ")", "except", "(", "serial", ".", "serialutil", ".", "SerialException", ",", "BrokenPipeError", ",", "TypeError", ")", ":", "# Write failed - assume that we got disconnected", "self", ".", "close", "(", ")", "raise", "DeviceError", "(", "'{} closed'", ".", "format", "(", "self", ".", "dev_name_short", ")", ")" ]
Writes data to the pyboard over the serial port.
[ "Writes", "data", "to", "the", "pyboard", "over", "the", "serial", "port", "." ]
a92a8fa8074ac792241c83c640a51b394667c324
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L1513-L1521
234,118
dhylands/rshell
rshell/main.py
DeviceSerial.timeout
def timeout(self, value): """Sets the timeout associated with the serial port.""" self.check_pyb() try: self.pyb.serial.timeout = value except: # timeout is a property so it calls code, and that can fail # if the serial port is closed. pass
python
def timeout(self, value): """Sets the timeout associated with the serial port.""" self.check_pyb() try: self.pyb.serial.timeout = value except: # timeout is a property so it calls code, and that can fail # if the serial port is closed. pass
[ "def", "timeout", "(", "self", ",", "value", ")", ":", "self", ".", "check_pyb", "(", ")", "try", ":", "self", ".", "pyb", ".", "serial", ".", "timeout", "=", "value", "except", ":", "# timeout is a property so it calls code, and that can fail", "# if the serial port is closed.", "pass" ]
Sets the timeout associated with the serial port.
[ "Sets", "the", "timeout", "associated", "with", "the", "serial", "port", "." ]
a92a8fa8074ac792241c83c640a51b394667c324
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L1611-L1619
234,119
dhylands/rshell
rshell/main.py
Shell.onecmd
def onecmd(self, line): """Override onecmd. 1 - So we don't have to have a do_EOF method. 2 - So we can strip comments 3 - So we can track line numbers """ if DEBUG: print('Executing "%s"' % line) self.line_num += 1 if line == "EOF" or line == 'exit': if cmd.Cmd.use_rawinput: # This means that we printed a prompt, and we'll want to # print a newline to pretty things up for the caller. self.print('') return True # Strip comments comment_idx = line.find("#") if comment_idx >= 0: line = line[0:comment_idx] line = line.strip() # search multiple commands on the same line lexer = shlex.shlex(line) lexer.whitespace = '' for issemicolon, group in itertools.groupby(lexer, lambda x: x == ";"): if not issemicolon: self.onecmd_exec("".join(group))
python
def onecmd(self, line): """Override onecmd. 1 - So we don't have to have a do_EOF method. 2 - So we can strip comments 3 - So we can track line numbers """ if DEBUG: print('Executing "%s"' % line) self.line_num += 1 if line == "EOF" or line == 'exit': if cmd.Cmd.use_rawinput: # This means that we printed a prompt, and we'll want to # print a newline to pretty things up for the caller. self.print('') return True # Strip comments comment_idx = line.find("#") if comment_idx >= 0: line = line[0:comment_idx] line = line.strip() # search multiple commands on the same line lexer = shlex.shlex(line) lexer.whitespace = '' for issemicolon, group in itertools.groupby(lexer, lambda x: x == ";"): if not issemicolon: self.onecmd_exec("".join(group))
[ "def", "onecmd", "(", "self", ",", "line", ")", ":", "if", "DEBUG", ":", "print", "(", "'Executing \"%s\"'", "%", "line", ")", "self", ".", "line_num", "+=", "1", "if", "line", "==", "\"EOF\"", "or", "line", "==", "'exit'", ":", "if", "cmd", ".", "Cmd", ".", "use_rawinput", ":", "# This means that we printed a prompt, and we'll want to", "# print a newline to pretty things up for the caller.", "self", ".", "print", "(", "''", ")", "return", "True", "# Strip comments", "comment_idx", "=", "line", ".", "find", "(", "\"#\"", ")", "if", "comment_idx", ">=", "0", ":", "line", "=", "line", "[", "0", ":", "comment_idx", "]", "line", "=", "line", ".", "strip", "(", ")", "# search multiple commands on the same line", "lexer", "=", "shlex", ".", "shlex", "(", "line", ")", "lexer", ".", "whitespace", "=", "''", "for", "issemicolon", ",", "group", "in", "itertools", ".", "groupby", "(", "lexer", ",", "lambda", "x", ":", "x", "==", "\";\"", ")", ":", "if", "not", "issemicolon", ":", "self", ".", "onecmd_exec", "(", "\"\"", ".", "join", "(", "group", ")", ")" ]
Override onecmd. 1 - So we don't have to have a do_EOF method. 2 - So we can strip comments 3 - So we can track line numbers
[ "Override", "onecmd", "." ]
a92a8fa8074ac792241c83c640a51b394667c324
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L1725-L1753
234,120
dhylands/rshell
rshell/main.py
Shell.print
def print(self, *args, end='\n', file=None): """Convenience function so you don't need to remember to put the \n at the end of the line. """ if file is None: file = self.stdout s = ' '.join(str(arg) for arg in args) + end file.write(s)
python
def print(self, *args, end='\n', file=None): """Convenience function so you don't need to remember to put the \n at the end of the line. """ if file is None: file = self.stdout s = ' '.join(str(arg) for arg in args) + end file.write(s)
[ "def", "print", "(", "self", ",", "*", "args", ",", "end", "=", "'\\n'", ",", "file", "=", "None", ")", ":", "if", "file", "is", "None", ":", "file", "=", "self", ".", "stdout", "s", "=", "' '", ".", "join", "(", "str", "(", "arg", ")", "for", "arg", "in", "args", ")", "+", "end", "file", ".", "write", "(", "s", ")" ]
Convenience function so you don't need to remember to put the \n at the end of the line.
[ "Convenience", "function", "so", "you", "don", "t", "need", "to", "remember", "to", "put", "the", "\\", "n", "at", "the", "end", "of", "the", "line", "." ]
a92a8fa8074ac792241c83c640a51b394667c324
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L1808-L1815
234,121
dhylands/rshell
rshell/main.py
Shell.filename_complete
def filename_complete(self, text, line, begidx, endidx): """Wrapper for catching exceptions since cmd seems to silently absorb them. """ try: return self.real_filename_complete(text, line, begidx, endidx) except: traceback.print_exc()
python
def filename_complete(self, text, line, begidx, endidx): """Wrapper for catching exceptions since cmd seems to silently absorb them. """ try: return self.real_filename_complete(text, line, begidx, endidx) except: traceback.print_exc()
[ "def", "filename_complete", "(", "self", ",", "text", ",", "line", ",", "begidx", ",", "endidx", ")", ":", "try", ":", "return", "self", ".", "real_filename_complete", "(", "text", ",", "line", ",", "begidx", ",", "endidx", ")", "except", ":", "traceback", ".", "print_exc", "(", ")" ]
Wrapper for catching exceptions since cmd seems to silently absorb them.
[ "Wrapper", "for", "catching", "exceptions", "since", "cmd", "seems", "to", "silently", "absorb", "them", "." ]
a92a8fa8074ac792241c83c640a51b394667c324
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L1839-L1846
234,122
dhylands/rshell
rshell/main.py
Shell.real_filename_complete
def real_filename_complete(self, text, line, begidx, endidx): """Figure out what filenames match the completion.""" # line contains the full command line that's been entered so far. # text contains the portion of the line that readline is trying to complete # text should correspond to line[begidx:endidx] # # The way the completer works text will start after one of the characters # in DELIMS. So if the filename entered so far was "embedded\ sp" then # text will point to the s in sp. # # The following bit of logic backs up to find the real beginning of the # filename. if begidx >= len(line): # This happens when you hit TAB on an empty filename before_match = begidx else: for before_match in range(begidx, 0, -1): if line[before_match] in DELIMS and before_match >= 1 and line[before_match - 1] != '\\': break # We set fixed to be the portion of the filename which is before text # and match is the full portion of the filename that's been entered so # far (that's the part we use for matching files). # # When we return a list of completions, the bit that we return should # just be the portion that we replace 'text' with. fixed = unescape(line[before_match+1:begidx]) # fixed portion of the match match = unescape(line[before_match+1:endidx]) # portion to match filenames against # We do the following to cover the case that the current directory # is / and the path being entered is relative. strip = '' if len(match) > 0 and match[0] == '/': abs_match = match elif cur_dir == '/': abs_match = cur_dir + match strip = cur_dir else: abs_match = cur_dir + '/' + match strip = cur_dir + '/' completions = [] prepend = '' if abs_match.rfind('/') == 0: # match is in the root directory # This means that we're looking for matches in the root directory # (i.e. abs_match is /foo and the user hit TAB). # So we'll supply the matching board names as possible completions. # Since they're all treated as directories we leave the trailing slash. with DEV_LOCK: if match[0] == '/': completions += [dev.name_path for dev in DEVS if dev.name_path.startswith(abs_match)] else: completions += [dev.name_path[1:] for dev in DEVS if dev.name_path.startswith(abs_match)] if DEFAULT_DEV: # Add root directories of the default device (i.e. /flash/ and /sd/) if match[0] == '/': completions += [root_dir for root_dir in DEFAULT_DEV.root_dirs if root_dir.startswith(match)] else: completions += [root_dir[1:] for root_dir in DEFAULT_DEV.root_dirs if root_dir[1:].startswith(match)] else: # This means that there are at least 2 slashes in abs_match. If one # of them matches a board name then we need to remove the board # name from fixed. Since the results from listdir_matches won't # contain the board name, we need to prepend each of the completions. with DEV_LOCK: for dev in DEVS: if abs_match.startswith(dev.name_path): prepend = dev.name_path[:-1] break paths = sorted(auto(listdir_matches, abs_match)) for path in paths: path = prepend + path if path.startswith(strip): path = path[len(strip):] completions.append(escape(path.replace(fixed, '', 1))) return completions
python
def real_filename_complete(self, text, line, begidx, endidx): """Figure out what filenames match the completion.""" # line contains the full command line that's been entered so far. # text contains the portion of the line that readline is trying to complete # text should correspond to line[begidx:endidx] # # The way the completer works text will start after one of the characters # in DELIMS. So if the filename entered so far was "embedded\ sp" then # text will point to the s in sp. # # The following bit of logic backs up to find the real beginning of the # filename. if begidx >= len(line): # This happens when you hit TAB on an empty filename before_match = begidx else: for before_match in range(begidx, 0, -1): if line[before_match] in DELIMS and before_match >= 1 and line[before_match - 1] != '\\': break # We set fixed to be the portion of the filename which is before text # and match is the full portion of the filename that's been entered so # far (that's the part we use for matching files). # # When we return a list of completions, the bit that we return should # just be the portion that we replace 'text' with. fixed = unescape(line[before_match+1:begidx]) # fixed portion of the match match = unescape(line[before_match+1:endidx]) # portion to match filenames against # We do the following to cover the case that the current directory # is / and the path being entered is relative. strip = '' if len(match) > 0 and match[0] == '/': abs_match = match elif cur_dir == '/': abs_match = cur_dir + match strip = cur_dir else: abs_match = cur_dir + '/' + match strip = cur_dir + '/' completions = [] prepend = '' if abs_match.rfind('/') == 0: # match is in the root directory # This means that we're looking for matches in the root directory # (i.e. abs_match is /foo and the user hit TAB). # So we'll supply the matching board names as possible completions. # Since they're all treated as directories we leave the trailing slash. with DEV_LOCK: if match[0] == '/': completions += [dev.name_path for dev in DEVS if dev.name_path.startswith(abs_match)] else: completions += [dev.name_path[1:] for dev in DEVS if dev.name_path.startswith(abs_match)] if DEFAULT_DEV: # Add root directories of the default device (i.e. /flash/ and /sd/) if match[0] == '/': completions += [root_dir for root_dir in DEFAULT_DEV.root_dirs if root_dir.startswith(match)] else: completions += [root_dir[1:] for root_dir in DEFAULT_DEV.root_dirs if root_dir[1:].startswith(match)] else: # This means that there are at least 2 slashes in abs_match. If one # of them matches a board name then we need to remove the board # name from fixed. Since the results from listdir_matches won't # contain the board name, we need to prepend each of the completions. with DEV_LOCK: for dev in DEVS: if abs_match.startswith(dev.name_path): prepend = dev.name_path[:-1] break paths = sorted(auto(listdir_matches, abs_match)) for path in paths: path = prepend + path if path.startswith(strip): path = path[len(strip):] completions.append(escape(path.replace(fixed, '', 1))) return completions
[ "def", "real_filename_complete", "(", "self", ",", "text", ",", "line", ",", "begidx", ",", "endidx", ")", ":", "# line contains the full command line that's been entered so far.", "# text contains the portion of the line that readline is trying to complete", "# text should correspond to line[begidx:endidx]", "#", "# The way the completer works text will start after one of the characters", "# in DELIMS. So if the filename entered so far was \"embedded\\ sp\" then", "# text will point to the s in sp.", "#", "# The following bit of logic backs up to find the real beginning of the", "# filename.", "if", "begidx", ">=", "len", "(", "line", ")", ":", "# This happens when you hit TAB on an empty filename", "before_match", "=", "begidx", "else", ":", "for", "before_match", "in", "range", "(", "begidx", ",", "0", ",", "-", "1", ")", ":", "if", "line", "[", "before_match", "]", "in", "DELIMS", "and", "before_match", ">=", "1", "and", "line", "[", "before_match", "-", "1", "]", "!=", "'\\\\'", ":", "break", "# We set fixed to be the portion of the filename which is before text", "# and match is the full portion of the filename that's been entered so", "# far (that's the part we use for matching files).", "#", "# When we return a list of completions, the bit that we return should", "# just be the portion that we replace 'text' with.", "fixed", "=", "unescape", "(", "line", "[", "before_match", "+", "1", ":", "begidx", "]", ")", "# fixed portion of the match", "match", "=", "unescape", "(", "line", "[", "before_match", "+", "1", ":", "endidx", "]", ")", "# portion to match filenames against", "# We do the following to cover the case that the current directory", "# is / and the path being entered is relative.", "strip", "=", "''", "if", "len", "(", "match", ")", ">", "0", "and", "match", "[", "0", "]", "==", "'/'", ":", "abs_match", "=", "match", "elif", "cur_dir", "==", "'/'", ":", "abs_match", "=", "cur_dir", "+", "match", "strip", "=", "cur_dir", "else", ":", "abs_match", "=", "cur_dir", "+", "'/'", "+", "match", "strip", "=", "cur_dir", "+", "'/'", "completions", "=", "[", "]", "prepend", "=", "''", "if", "abs_match", ".", "rfind", "(", "'/'", ")", "==", "0", ":", "# match is in the root directory", "# This means that we're looking for matches in the root directory", "# (i.e. abs_match is /foo and the user hit TAB).", "# So we'll supply the matching board names as possible completions.", "# Since they're all treated as directories we leave the trailing slash.", "with", "DEV_LOCK", ":", "if", "match", "[", "0", "]", "==", "'/'", ":", "completions", "+=", "[", "dev", ".", "name_path", "for", "dev", "in", "DEVS", "if", "dev", ".", "name_path", ".", "startswith", "(", "abs_match", ")", "]", "else", ":", "completions", "+=", "[", "dev", ".", "name_path", "[", "1", ":", "]", "for", "dev", "in", "DEVS", "if", "dev", ".", "name_path", ".", "startswith", "(", "abs_match", ")", "]", "if", "DEFAULT_DEV", ":", "# Add root directories of the default device (i.e. /flash/ and /sd/)", "if", "match", "[", "0", "]", "==", "'/'", ":", "completions", "+=", "[", "root_dir", "for", "root_dir", "in", "DEFAULT_DEV", ".", "root_dirs", "if", "root_dir", ".", "startswith", "(", "match", ")", "]", "else", ":", "completions", "+=", "[", "root_dir", "[", "1", ":", "]", "for", "root_dir", "in", "DEFAULT_DEV", ".", "root_dirs", "if", "root_dir", "[", "1", ":", "]", ".", "startswith", "(", "match", ")", "]", "else", ":", "# This means that there are at least 2 slashes in abs_match. If one", "# of them matches a board name then we need to remove the board", "# name from fixed. Since the results from listdir_matches won't", "# contain the board name, we need to prepend each of the completions.", "with", "DEV_LOCK", ":", "for", "dev", "in", "DEVS", ":", "if", "abs_match", ".", "startswith", "(", "dev", ".", "name_path", ")", ":", "prepend", "=", "dev", ".", "name_path", "[", ":", "-", "1", "]", "break", "paths", "=", "sorted", "(", "auto", "(", "listdir_matches", ",", "abs_match", ")", ")", "for", "path", "in", "paths", ":", "path", "=", "prepend", "+", "path", "if", "path", ".", "startswith", "(", "strip", ")", ":", "path", "=", "path", "[", "len", "(", "strip", ")", ":", "]", "completions", ".", "append", "(", "escape", "(", "path", ".", "replace", "(", "fixed", ",", "''", ",", "1", ")", ")", ")", "return", "completions" ]
Figure out what filenames match the completion.
[ "Figure", "out", "what", "filenames", "match", "the", "completion", "." ]
a92a8fa8074ac792241c83c640a51b394667c324
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L1848-L1927
234,123
dhylands/rshell
rshell/main.py
Shell.directory_complete
def directory_complete(self, text, line, begidx, endidx): """Figure out what directories match the completion.""" return [filename for filename in self.filename_complete(text, line, begidx, endidx) if filename[-1] == '/']
python
def directory_complete(self, text, line, begidx, endidx): """Figure out what directories match the completion.""" return [filename for filename in self.filename_complete(text, line, begidx, endidx) if filename[-1] == '/']
[ "def", "directory_complete", "(", "self", ",", "text", ",", "line", ",", "begidx", ",", "endidx", ")", ":", "return", "[", "filename", "for", "filename", "in", "self", ".", "filename_complete", "(", "text", ",", "line", ",", "begidx", ",", "endidx", ")", "if", "filename", "[", "-", "1", "]", "==", "'/'", "]" ]
Figure out what directories match the completion.
[ "Figure", "out", "what", "directories", "match", "the", "completion", "." ]
a92a8fa8074ac792241c83c640a51b394667c324
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L1929-L1931
234,124
dhylands/rshell
rshell/main.py
Shell.line_to_args
def line_to_args(self, line): """This will convert the line passed into the do_xxx functions into an array of arguments and handle the Output Redirection Operator. """ # Note: using shlex.split causes quoted substrings to stay together. args = shlex.split(line) self.redirect_filename = '' self.redirect_dev = None redirect_index = -1 if '>' in args: redirect_index = args.index('>') elif '>>' in args: redirect_index = args.index('>>') if redirect_index >= 0: if redirect_index + 1 >= len(args): raise ShellError("> requires a filename") self.redirect_filename = resolve_path(args[redirect_index + 1]) rmode = auto(get_mode, os.path.dirname(self.redirect_filename)) if not mode_isdir(rmode): raise ShellError("Unable to redirect to '%s', directory doesn't exist" % self.redirect_filename) if args[redirect_index] == '>': self.redirect_mode = 'w' if DEBUG: print('Redirecting (write) to', self.redirect_filename) else: self.redirect_mode = 'a' if DEBUG: print('Redirecting (append) to', self.redirect_filename) self.redirect_dev, self.redirect_filename = get_dev_and_path(self.redirect_filename) try: if self.redirect_dev is None: self.stdout = SmartFile(open(self.redirect_filename, self.redirect_mode)) else: # Redirecting to a remote device. We collect the results locally # and copy them to the remote device at the end of the command. self.stdout = SmartFile(tempfile.TemporaryFile(mode='w+')) except OSError as err: raise ShellError(err) del args[redirect_index + 1] del args[redirect_index] curr_cmd, _, _ = self.parseline(self.lastcmd) parser = self.create_argparser(curr_cmd) if parser: args = parser.parse_args(args) return args
python
def line_to_args(self, line): """This will convert the line passed into the do_xxx functions into an array of arguments and handle the Output Redirection Operator. """ # Note: using shlex.split causes quoted substrings to stay together. args = shlex.split(line) self.redirect_filename = '' self.redirect_dev = None redirect_index = -1 if '>' in args: redirect_index = args.index('>') elif '>>' in args: redirect_index = args.index('>>') if redirect_index >= 0: if redirect_index + 1 >= len(args): raise ShellError("> requires a filename") self.redirect_filename = resolve_path(args[redirect_index + 1]) rmode = auto(get_mode, os.path.dirname(self.redirect_filename)) if not mode_isdir(rmode): raise ShellError("Unable to redirect to '%s', directory doesn't exist" % self.redirect_filename) if args[redirect_index] == '>': self.redirect_mode = 'w' if DEBUG: print('Redirecting (write) to', self.redirect_filename) else: self.redirect_mode = 'a' if DEBUG: print('Redirecting (append) to', self.redirect_filename) self.redirect_dev, self.redirect_filename = get_dev_and_path(self.redirect_filename) try: if self.redirect_dev is None: self.stdout = SmartFile(open(self.redirect_filename, self.redirect_mode)) else: # Redirecting to a remote device. We collect the results locally # and copy them to the remote device at the end of the command. self.stdout = SmartFile(tempfile.TemporaryFile(mode='w+')) except OSError as err: raise ShellError(err) del args[redirect_index + 1] del args[redirect_index] curr_cmd, _, _ = self.parseline(self.lastcmd) parser = self.create_argparser(curr_cmd) if parser: args = parser.parse_args(args) return args
[ "def", "line_to_args", "(", "self", ",", "line", ")", ":", "# Note: using shlex.split causes quoted substrings to stay together.", "args", "=", "shlex", ".", "split", "(", "line", ")", "self", ".", "redirect_filename", "=", "''", "self", ".", "redirect_dev", "=", "None", "redirect_index", "=", "-", "1", "if", "'>'", "in", "args", ":", "redirect_index", "=", "args", ".", "index", "(", "'>'", ")", "elif", "'>>'", "in", "args", ":", "redirect_index", "=", "args", ".", "index", "(", "'>>'", ")", "if", "redirect_index", ">=", "0", ":", "if", "redirect_index", "+", "1", ">=", "len", "(", "args", ")", ":", "raise", "ShellError", "(", "\"> requires a filename\"", ")", "self", ".", "redirect_filename", "=", "resolve_path", "(", "args", "[", "redirect_index", "+", "1", "]", ")", "rmode", "=", "auto", "(", "get_mode", ",", "os", ".", "path", ".", "dirname", "(", "self", ".", "redirect_filename", ")", ")", "if", "not", "mode_isdir", "(", "rmode", ")", ":", "raise", "ShellError", "(", "\"Unable to redirect to '%s', directory doesn't exist\"", "%", "self", ".", "redirect_filename", ")", "if", "args", "[", "redirect_index", "]", "==", "'>'", ":", "self", ".", "redirect_mode", "=", "'w'", "if", "DEBUG", ":", "print", "(", "'Redirecting (write) to'", ",", "self", ".", "redirect_filename", ")", "else", ":", "self", ".", "redirect_mode", "=", "'a'", "if", "DEBUG", ":", "print", "(", "'Redirecting (append) to'", ",", "self", ".", "redirect_filename", ")", "self", ".", "redirect_dev", ",", "self", ".", "redirect_filename", "=", "get_dev_and_path", "(", "self", ".", "redirect_filename", ")", "try", ":", "if", "self", ".", "redirect_dev", "is", "None", ":", "self", ".", "stdout", "=", "SmartFile", "(", "open", "(", "self", ".", "redirect_filename", ",", "self", ".", "redirect_mode", ")", ")", "else", ":", "# Redirecting to a remote device. We collect the results locally", "# and copy them to the remote device at the end of the command.", "self", ".", "stdout", "=", "SmartFile", "(", "tempfile", ".", "TemporaryFile", "(", "mode", "=", "'w+'", ")", ")", "except", "OSError", "as", "err", ":", "raise", "ShellError", "(", "err", ")", "del", "args", "[", "redirect_index", "+", "1", "]", "del", "args", "[", "redirect_index", "]", "curr_cmd", ",", "_", ",", "_", "=", "self", ".", "parseline", "(", "self", ".", "lastcmd", ")", "parser", "=", "self", ".", "create_argparser", "(", "curr_cmd", ")", "if", "parser", ":", "args", "=", "parser", ".", "parse_args", "(", "args", ")", "return", "args" ]
This will convert the line passed into the do_xxx functions into an array of arguments and handle the Output Redirection Operator.
[ "This", "will", "convert", "the", "line", "passed", "into", "the", "do_xxx", "functions", "into", "an", "array", "of", "arguments", "and", "handle", "the", "Output", "Redirection", "Operator", "." ]
a92a8fa8074ac792241c83c640a51b394667c324
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L1933-L1979
234,125
dhylands/rshell
rshell/main.py
Shell.do_cat
def do_cat(self, line): """cat FILENAME... Concatenates files and sends to stdout. """ # note: when we get around to supporting cat from stdin, we'll need # to write stdin to a temp file, and then copy the file # since we need to know the filesize when copying to the pyboard. args = self.line_to_args(line) for filename in args: filename = resolve_path(filename) mode = auto(get_mode, filename) if not mode_exists(mode): print_err("Cannot access '%s': No such file" % filename) continue if not mode_isfile(mode): print_err("'%s': is not a file" % filename) continue cat(filename, self.stdout)
python
def do_cat(self, line): """cat FILENAME... Concatenates files and sends to stdout. """ # note: when we get around to supporting cat from stdin, we'll need # to write stdin to a temp file, and then copy the file # since we need to know the filesize when copying to the pyboard. args = self.line_to_args(line) for filename in args: filename = resolve_path(filename) mode = auto(get_mode, filename) if not mode_exists(mode): print_err("Cannot access '%s': No such file" % filename) continue if not mode_isfile(mode): print_err("'%s': is not a file" % filename) continue cat(filename, self.stdout)
[ "def", "do_cat", "(", "self", ",", "line", ")", ":", "# note: when we get around to supporting cat from stdin, we'll need", "# to write stdin to a temp file, and then copy the file", "# since we need to know the filesize when copying to the pyboard.", "args", "=", "self", ".", "line_to_args", "(", "line", ")", "for", "filename", "in", "args", ":", "filename", "=", "resolve_path", "(", "filename", ")", "mode", "=", "auto", "(", "get_mode", ",", "filename", ")", "if", "not", "mode_exists", "(", "mode", ")", ":", "print_err", "(", "\"Cannot access '%s': No such file\"", "%", "filename", ")", "continue", "if", "not", "mode_isfile", "(", "mode", ")", ":", "print_err", "(", "\"'%s': is not a file\"", "%", "filename", ")", "continue", "cat", "(", "filename", ",", "self", ".", "stdout", ")" ]
cat FILENAME... Concatenates files and sends to stdout.
[ "cat", "FILENAME", "..." ]
a92a8fa8074ac792241c83c640a51b394667c324
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L2015-L2033
234,126
dhylands/rshell
rshell/main.py
Shell.do_echo
def do_echo(self, line): """echo TEXT... Display a line of text. """ args = self.line_to_args(line) self.print(*args)
python
def do_echo(self, line): """echo TEXT... Display a line of text. """ args = self.line_to_args(line) self.print(*args)
[ "def", "do_echo", "(", "self", ",", "line", ")", ":", "args", "=", "self", ".", "line_to_args", "(", "line", ")", "self", ".", "print", "(", "*", "args", ")" ]
echo TEXT... Display a line of text.
[ "echo", "TEXT", "..." ]
a92a8fa8074ac792241c83c640a51b394667c324
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L2181-L2187
234,127
dhylands/rshell
rshell/main.py
Shell.do_mkdir
def do_mkdir(self, line): """mkdir DIRECTORY... Creates one or more directories. """ args = self.line_to_args(line) for filename in args: filename = resolve_path(filename) if not mkdir(filename): print_err('Unable to create %s' % filename)
python
def do_mkdir(self, line): """mkdir DIRECTORY... Creates one or more directories. """ args = self.line_to_args(line) for filename in args: filename = resolve_path(filename) if not mkdir(filename): print_err('Unable to create %s' % filename)
[ "def", "do_mkdir", "(", "self", ",", "line", ")", ":", "args", "=", "self", ".", "line_to_args", "(", "line", ")", "for", "filename", "in", "args", ":", "filename", "=", "resolve_path", "(", "filename", ")", "if", "not", "mkdir", "(", "filename", ")", ":", "print_err", "(", "'Unable to create %s'", "%", "filename", ")" ]
mkdir DIRECTORY... Creates one or more directories.
[ "mkdir", "DIRECTORY", "..." ]
a92a8fa8074ac792241c83c640a51b394667c324
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L2379-L2388
234,128
dhylands/rshell
rshell/main.py
Shell.repl_serial_to_stdout
def repl_serial_to_stdout(self, dev): """Runs as a thread which has a sole purpose of readding bytes from the serial port and writing them to stdout. Used by do_repl. """ with self.serial_reader_running: try: save_timeout = dev.timeout # Set a timeout so that the read returns periodically with no data # and allows us to check whether the main thread wants us to quit. dev.timeout = 1 while not self.quit_serial_reader: try: char = dev.read(1) except serial.serialutil.SerialException: # This happens if the pyboard reboots, or a USB port # goes away. return except TypeError: # This is a bug in serialposix.py starting with python 3.3 # which causes a TypeError during the handling of the # select.error. So we treat this the same as # serial.serialutil.SerialException: return except ConnectionResetError: # This happens over a telnet session, if it resets return if not char: # This means that the read timed out. We'll check the quit # flag and return if needed if self.quit_when_no_output: break continue self.stdout.write(char) self.stdout.flush() dev.timeout = save_timeout except DeviceError: # The device is no longer present. return
python
def repl_serial_to_stdout(self, dev): """Runs as a thread which has a sole purpose of readding bytes from the serial port and writing them to stdout. Used by do_repl. """ with self.serial_reader_running: try: save_timeout = dev.timeout # Set a timeout so that the read returns periodically with no data # and allows us to check whether the main thread wants us to quit. dev.timeout = 1 while not self.quit_serial_reader: try: char = dev.read(1) except serial.serialutil.SerialException: # This happens if the pyboard reboots, or a USB port # goes away. return except TypeError: # This is a bug in serialposix.py starting with python 3.3 # which causes a TypeError during the handling of the # select.error. So we treat this the same as # serial.serialutil.SerialException: return except ConnectionResetError: # This happens over a telnet session, if it resets return if not char: # This means that the read timed out. We'll check the quit # flag and return if needed if self.quit_when_no_output: break continue self.stdout.write(char) self.stdout.flush() dev.timeout = save_timeout except DeviceError: # The device is no longer present. return
[ "def", "repl_serial_to_stdout", "(", "self", ",", "dev", ")", ":", "with", "self", ".", "serial_reader_running", ":", "try", ":", "save_timeout", "=", "dev", ".", "timeout", "# Set a timeout so that the read returns periodically with no data", "# and allows us to check whether the main thread wants us to quit.", "dev", ".", "timeout", "=", "1", "while", "not", "self", ".", "quit_serial_reader", ":", "try", ":", "char", "=", "dev", ".", "read", "(", "1", ")", "except", "serial", ".", "serialutil", ".", "SerialException", ":", "# This happens if the pyboard reboots, or a USB port", "# goes away.", "return", "except", "TypeError", ":", "# This is a bug in serialposix.py starting with python 3.3", "# which causes a TypeError during the handling of the", "# select.error. So we treat this the same as", "# serial.serialutil.SerialException:", "return", "except", "ConnectionResetError", ":", "# This happens over a telnet session, if it resets", "return", "if", "not", "char", ":", "# This means that the read timed out. We'll check the quit", "# flag and return if needed", "if", "self", ".", "quit_when_no_output", ":", "break", "continue", "self", ".", "stdout", ".", "write", "(", "char", ")", "self", ".", "stdout", ".", "flush", "(", ")", "dev", ".", "timeout", "=", "save_timeout", "except", "DeviceError", ":", "# The device is no longer present.", "return" ]
Runs as a thread which has a sole purpose of readding bytes from the serial port and writing them to stdout. Used by do_repl.
[ "Runs", "as", "a", "thread", "which", "has", "a", "sole", "purpose", "of", "readding", "bytes", "from", "the", "serial", "port", "and", "writing", "them", "to", "stdout", ".", "Used", "by", "do_repl", "." ]
a92a8fa8074ac792241c83c640a51b394667c324
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L2390-L2427
234,129
hyperledger/indy-plenum
stp_core/loop/motor.py
Motor.set_status
def set_status(self, value): """ Set the status of the motor to the specified value if not already set. """ if not self._status == value: old = self._status self._status = value logger.info("{} changing status from {} to {}".format(self, old.name, value.name)) self._statusChanged(old, value)
python
def set_status(self, value): """ Set the status of the motor to the specified value if not already set. """ if not self._status == value: old = self._status self._status = value logger.info("{} changing status from {} to {}".format(self, old.name, value.name)) self._statusChanged(old, value)
[ "def", "set_status", "(", "self", ",", "value", ")", ":", "if", "not", "self", ".", "_status", "==", "value", ":", "old", "=", "self", ".", "_status", "self", ".", "_status", "=", "value", "logger", ".", "info", "(", "\"{} changing status from {} to {}\"", ".", "format", "(", "self", ",", "old", ".", "name", ",", "value", ".", "name", ")", ")", "self", ".", "_statusChanged", "(", "old", ",", "value", ")" ]
Set the status of the motor to the specified value if not already set.
[ "Set", "the", "status", "of", "the", "motor", "to", "the", "specified", "value", "if", "not", "already", "set", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/stp_core/loop/motor.py#L28-L36
234,130
hyperledger/indy-plenum
stp_core/loop/motor.py
Motor.stop
def stop(self, *args, **kwargs): """ Set the status to Status.stopping and also call `onStopping` with the provided args and kwargs. """ if self.status in (Status.stopping, Status.stopped): logger.debug("{} is already {}".format(self, self.status.name)) else: self.status = Status.stopping self.onStopping(*args, **kwargs) self.status = Status.stopped
python
def stop(self, *args, **kwargs): """ Set the status to Status.stopping and also call `onStopping` with the provided args and kwargs. """ if self.status in (Status.stopping, Status.stopped): logger.debug("{} is already {}".format(self, self.status.name)) else: self.status = Status.stopping self.onStopping(*args, **kwargs) self.status = Status.stopped
[ "def", "stop", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "status", "in", "(", "Status", ".", "stopping", ",", "Status", ".", "stopped", ")", ":", "logger", ".", "debug", "(", "\"{} is already {}\"", ".", "format", "(", "self", ",", "self", ".", "status", ".", "name", ")", ")", "else", ":", "self", ".", "status", "=", "Status", ".", "stopping", "self", ".", "onStopping", "(", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "status", "=", "Status", ".", "stopped" ]
Set the status to Status.stopping and also call `onStopping` with the provided args and kwargs.
[ "Set", "the", "status", "to", "Status", ".", "stopping", "and", "also", "call", "onStopping", "with", "the", "provided", "args", "and", "kwargs", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/stp_core/loop/motor.py#L58-L68
234,131
hyperledger/indy-plenum
plenum/server/primary_selector.py
PrimarySelector.next_primary_replica_name_for_master
def next_primary_replica_name_for_master(self, node_reg, node_ids): """ Returns name and corresponding instance name of the next node which is supposed to be a new Primary. In fact it is not round-robin on this abstraction layer as currently the primary of master instance is pointed directly depending on view number, instance id and total number of nodes. But since the view number is incremented by 1 before primary selection then current approach may be treated as round robin. """ name = self._next_primary_node_name_for_master(node_reg, node_ids) return name, Replica.generateName(nodeName=name, instId=0)
python
def next_primary_replica_name_for_master(self, node_reg, node_ids): """ Returns name and corresponding instance name of the next node which is supposed to be a new Primary. In fact it is not round-robin on this abstraction layer as currently the primary of master instance is pointed directly depending on view number, instance id and total number of nodes. But since the view number is incremented by 1 before primary selection then current approach may be treated as round robin. """ name = self._next_primary_node_name_for_master(node_reg, node_ids) return name, Replica.generateName(nodeName=name, instId=0)
[ "def", "next_primary_replica_name_for_master", "(", "self", ",", "node_reg", ",", "node_ids", ")", ":", "name", "=", "self", ".", "_next_primary_node_name_for_master", "(", "node_reg", ",", "node_ids", ")", "return", "name", ",", "Replica", ".", "generateName", "(", "nodeName", "=", "name", ",", "instId", "=", "0", ")" ]
Returns name and corresponding instance name of the next node which is supposed to be a new Primary. In fact it is not round-robin on this abstraction layer as currently the primary of master instance is pointed directly depending on view number, instance id and total number of nodes. But since the view number is incremented by 1 before primary selection then current approach may be treated as round robin.
[ "Returns", "name", "and", "corresponding", "instance", "name", "of", "the", "next", "node", "which", "is", "supposed", "to", "be", "a", "new", "Primary", ".", "In", "fact", "it", "is", "not", "round", "-", "robin", "on", "this", "abstraction", "layer", "as", "currently", "the", "primary", "of", "master", "instance", "is", "pointed", "directly", "depending", "on", "view", "number", "instance", "id", "and", "total", "number", "of", "nodes", ".", "But", "since", "the", "view", "number", "is", "incremented", "by", "1", "before", "primary", "selection", "then", "current", "approach", "may", "be", "treated", "as", "round", "robin", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/primary_selector.py#L61-L72
234,132
hyperledger/indy-plenum
plenum/server/primary_selector.py
PrimarySelector.next_primary_replica_name_for_backup
def next_primary_replica_name_for_backup(self, instance_id, master_primary_rank, primaries, node_reg, node_ids): """ Returns name and corresponding instance name of the next node which is supposed to be a new Primary for backup instance in round-robin fashion starting from primary of master instance. """ if node_reg is None: node_reg = self.node.nodeReg total_nodes = len(node_reg) rank = (master_primary_rank + 1) % total_nodes name = self.node.get_name_by_rank(rank, node_reg, node_ids) while name in primaries: rank = (rank + 1) % total_nodes name = self.node.get_name_by_rank(rank, node_reg, node_ids) return name, Replica.generateName(nodeName=name, instId=instance_id)
python
def next_primary_replica_name_for_backup(self, instance_id, master_primary_rank, primaries, node_reg, node_ids): """ Returns name and corresponding instance name of the next node which is supposed to be a new Primary for backup instance in round-robin fashion starting from primary of master instance. """ if node_reg is None: node_reg = self.node.nodeReg total_nodes = len(node_reg) rank = (master_primary_rank + 1) % total_nodes name = self.node.get_name_by_rank(rank, node_reg, node_ids) while name in primaries: rank = (rank + 1) % total_nodes name = self.node.get_name_by_rank(rank, node_reg, node_ids) return name, Replica.generateName(nodeName=name, instId=instance_id)
[ "def", "next_primary_replica_name_for_backup", "(", "self", ",", "instance_id", ",", "master_primary_rank", ",", "primaries", ",", "node_reg", ",", "node_ids", ")", ":", "if", "node_reg", "is", "None", ":", "node_reg", "=", "self", ".", "node", ".", "nodeReg", "total_nodes", "=", "len", "(", "node_reg", ")", "rank", "=", "(", "master_primary_rank", "+", "1", ")", "%", "total_nodes", "name", "=", "self", ".", "node", ".", "get_name_by_rank", "(", "rank", ",", "node_reg", ",", "node_ids", ")", "while", "name", "in", "primaries", ":", "rank", "=", "(", "rank", "+", "1", ")", "%", "total_nodes", "name", "=", "self", ".", "node", ".", "get_name_by_rank", "(", "rank", ",", "node_reg", ",", "node_ids", ")", "return", "name", ",", "Replica", ".", "generateName", "(", "nodeName", "=", "name", ",", "instId", "=", "instance_id", ")" ]
Returns name and corresponding instance name of the next node which is supposed to be a new Primary for backup instance in round-robin fashion starting from primary of master instance.
[ "Returns", "name", "and", "corresponding", "instance", "name", "of", "the", "next", "node", "which", "is", "supposed", "to", "be", "a", "new", "Primary", "for", "backup", "instance", "in", "round", "-", "robin", "fashion", "starting", "from", "primary", "of", "master", "instance", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/primary_selector.py#L74-L89
234,133
hyperledger/indy-plenum
plenum/server/primary_selector.py
PrimarySelector.process_selection
def process_selection(self, instance_count, node_reg, node_ids): # Select primaries for current view_no if instance_count == 0: return [] ''' Build a set of names of primaries, it is needed to avoid duplicates of primary nodes for different replicas. ''' primaries = [] primary_rank = None for i in range(instance_count): if i == 0: primary_name = self._next_primary_node_name_for_master(node_reg, node_ids) primary_rank = self.node.get_rank_by_name(primary_name, node_reg, node_ids) if primary_rank is None: raise LogicError('primary_rank must not be None') else: primary_name, _ = self.next_primary_replica_name_for_backup( i, primary_rank, primaries, node_reg, node_ids) primaries.append(primary_name) logger.display("{} selected primary {} for instance {} (view {})" .format(PRIMARY_SELECTION_PREFIX, primary_name, i, self.viewNo), extra={"cli": "ANNOUNCE", "tags": ["node-election"]}) if len(primaries) != instance_count: raise LogicError('instances inconsistency') if len(primaries) != len(set(primaries)): raise LogicError('repeating instances') return primaries
python
def process_selection(self, instance_count, node_reg, node_ids): # Select primaries for current view_no if instance_count == 0: return [] ''' Build a set of names of primaries, it is needed to avoid duplicates of primary nodes for different replicas. ''' primaries = [] primary_rank = None for i in range(instance_count): if i == 0: primary_name = self._next_primary_node_name_for_master(node_reg, node_ids) primary_rank = self.node.get_rank_by_name(primary_name, node_reg, node_ids) if primary_rank is None: raise LogicError('primary_rank must not be None') else: primary_name, _ = self.next_primary_replica_name_for_backup( i, primary_rank, primaries, node_reg, node_ids) primaries.append(primary_name) logger.display("{} selected primary {} for instance {} (view {})" .format(PRIMARY_SELECTION_PREFIX, primary_name, i, self.viewNo), extra={"cli": "ANNOUNCE", "tags": ["node-election"]}) if len(primaries) != instance_count: raise LogicError('instances inconsistency') if len(primaries) != len(set(primaries)): raise LogicError('repeating instances') return primaries
[ "def", "process_selection", "(", "self", ",", "instance_count", ",", "node_reg", ",", "node_ids", ")", ":", "# Select primaries for current view_no", "if", "instance_count", "==", "0", ":", "return", "[", "]", "primaries", "=", "[", "]", "primary_rank", "=", "None", "for", "i", "in", "range", "(", "instance_count", ")", ":", "if", "i", "==", "0", ":", "primary_name", "=", "self", ".", "_next_primary_node_name_for_master", "(", "node_reg", ",", "node_ids", ")", "primary_rank", "=", "self", ".", "node", ".", "get_rank_by_name", "(", "primary_name", ",", "node_reg", ",", "node_ids", ")", "if", "primary_rank", "is", "None", ":", "raise", "LogicError", "(", "'primary_rank must not be None'", ")", "else", ":", "primary_name", ",", "_", "=", "self", ".", "next_primary_replica_name_for_backup", "(", "i", ",", "primary_rank", ",", "primaries", ",", "node_reg", ",", "node_ids", ")", "primaries", ".", "append", "(", "primary_name", ")", "logger", ".", "display", "(", "\"{} selected primary {} for instance {} (view {})\"", ".", "format", "(", "PRIMARY_SELECTION_PREFIX", ",", "primary_name", ",", "i", ",", "self", ".", "viewNo", ")", ",", "extra", "=", "{", "\"cli\"", ":", "\"ANNOUNCE\"", ",", "\"tags\"", ":", "[", "\"node-election\"", "]", "}", ")", "if", "len", "(", "primaries", ")", "!=", "instance_count", ":", "raise", "LogicError", "(", "'instances inconsistency'", ")", "if", "len", "(", "primaries", ")", "!=", "len", "(", "set", "(", "primaries", ")", ")", ":", "raise", "LogicError", "(", "'repeating instances'", ")", "return", "primaries" ]
Build a set of names of primaries, it is needed to avoid duplicates of primary nodes for different replicas.
[ "Build", "a", "set", "of", "names", "of", "primaries", "it", "is", "needed", "to", "avoid", "duplicates", "of", "primary", "nodes", "for", "different", "replicas", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/primary_selector.py#L147-L182
234,134
hyperledger/indy-plenum
plenum/server/replicas.py
Replicas.take_ordereds_out_of_turn
def take_ordereds_out_of_turn(self) -> tuple: """ Takes all Ordered messages from outbox out of turn """ for replica in self._replicas.values(): yield replica.instId, replica._remove_ordered_from_queue()
python
def take_ordereds_out_of_turn(self) -> tuple: """ Takes all Ordered messages from outbox out of turn """ for replica in self._replicas.values(): yield replica.instId, replica._remove_ordered_from_queue()
[ "def", "take_ordereds_out_of_turn", "(", "self", ")", "->", "tuple", ":", "for", "replica", "in", "self", ".", "_replicas", ".", "values", "(", ")", ":", "yield", "replica", ".", "instId", ",", "replica", ".", "_remove_ordered_from_queue", "(", ")" ]
Takes all Ordered messages from outbox out of turn
[ "Takes", "all", "Ordered", "messages", "from", "outbox", "out", "of", "turn" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replicas.py#L131-L136
234,135
hyperledger/indy-plenum
plenum/server/replicas.py
Replicas._new_replica
def _new_replica(self, instance_id: int, is_master: bool, bls_bft: BlsBft) -> Replica: """ Create a new replica with the specified parameters. """ return self._replica_class(self._node, instance_id, self._config, is_master, bls_bft, self._metrics)
python
def _new_replica(self, instance_id: int, is_master: bool, bls_bft: BlsBft) -> Replica: """ Create a new replica with the specified parameters. """ return self._replica_class(self._node, instance_id, self._config, is_master, bls_bft, self._metrics)
[ "def", "_new_replica", "(", "self", ",", "instance_id", ":", "int", ",", "is_master", ":", "bool", ",", "bls_bft", ":", "BlsBft", ")", "->", "Replica", ":", "return", "self", ".", "_replica_class", "(", "self", ".", "_node", ",", "instance_id", ",", "self", ".", "_config", ",", "is_master", ",", "bls_bft", ",", "self", ".", "_metrics", ")" ]
Create a new replica with the specified parameters.
[ "Create", "a", "new", "replica", "with", "the", "specified", "parameters", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replicas.py#L138-L142
234,136
hyperledger/indy-plenum
plenum/common/throttler.py
Throttler.acquire
def acquire(self): """ Acquires lock for action. :return: True and 0.0 if lock successfully acquired or False and number of seconds to wait before the next try """ now = self.get_current_time() logger.debug("now: {}, len(actionsLog): {}".format( now, len(self.actionsLog))) self._trimActionsLog(now) logger.debug("after trim, len(actionsLog): {}".format( len(self.actionsLog))) if len(self.actionsLog) == 0: self.actionsLog.append(now) logger.debug("len(actionsLog) was 0, after append, len(actionsLog):" " {}".format(len(self.actionsLog))) return True, 0.0 timeToWaitAfterPreviousTry = self.delayFunction(len(self.actionsLog)) timePassed = now - self.actionsLog[-1] logger.debug("timeToWaitAfterPreviousTry: {}, timePassed: {}". format(timeToWaitAfterPreviousTry, timePassed)) if timeToWaitAfterPreviousTry < timePassed: self.actionsLog.append(now) logger.debug( "timeToWaitAfterPreviousTry < timePassed was true, after " "append, len(actionsLog): {}".format(len(self.actionsLog))) return True, 0.0 else: logger.debug( "timeToWaitAfterPreviousTry < timePassed was false, " "len(actionsLog): {}".format(len(self.actionsLog))) return False, timeToWaitAfterPreviousTry - timePassed
python
def acquire(self): """ Acquires lock for action. :return: True and 0.0 if lock successfully acquired or False and number of seconds to wait before the next try """ now = self.get_current_time() logger.debug("now: {}, len(actionsLog): {}".format( now, len(self.actionsLog))) self._trimActionsLog(now) logger.debug("after trim, len(actionsLog): {}".format( len(self.actionsLog))) if len(self.actionsLog) == 0: self.actionsLog.append(now) logger.debug("len(actionsLog) was 0, after append, len(actionsLog):" " {}".format(len(self.actionsLog))) return True, 0.0 timeToWaitAfterPreviousTry = self.delayFunction(len(self.actionsLog)) timePassed = now - self.actionsLog[-1] logger.debug("timeToWaitAfterPreviousTry: {}, timePassed: {}". format(timeToWaitAfterPreviousTry, timePassed)) if timeToWaitAfterPreviousTry < timePassed: self.actionsLog.append(now) logger.debug( "timeToWaitAfterPreviousTry < timePassed was true, after " "append, len(actionsLog): {}".format(len(self.actionsLog))) return True, 0.0 else: logger.debug( "timeToWaitAfterPreviousTry < timePassed was false, " "len(actionsLog): {}".format(len(self.actionsLog))) return False, timeToWaitAfterPreviousTry - timePassed
[ "def", "acquire", "(", "self", ")", ":", "now", "=", "self", ".", "get_current_time", "(", ")", "logger", ".", "debug", "(", "\"now: {}, len(actionsLog): {}\"", ".", "format", "(", "now", ",", "len", "(", "self", ".", "actionsLog", ")", ")", ")", "self", ".", "_trimActionsLog", "(", "now", ")", "logger", ".", "debug", "(", "\"after trim, len(actionsLog): {}\"", ".", "format", "(", "len", "(", "self", ".", "actionsLog", ")", ")", ")", "if", "len", "(", "self", ".", "actionsLog", ")", "==", "0", ":", "self", ".", "actionsLog", ".", "append", "(", "now", ")", "logger", ".", "debug", "(", "\"len(actionsLog) was 0, after append, len(actionsLog):\"", "\" {}\"", ".", "format", "(", "len", "(", "self", ".", "actionsLog", ")", ")", ")", "return", "True", ",", "0.0", "timeToWaitAfterPreviousTry", "=", "self", ".", "delayFunction", "(", "len", "(", "self", ".", "actionsLog", ")", ")", "timePassed", "=", "now", "-", "self", ".", "actionsLog", "[", "-", "1", "]", "logger", ".", "debug", "(", "\"timeToWaitAfterPreviousTry: {}, timePassed: {}\"", ".", "format", "(", "timeToWaitAfterPreviousTry", ",", "timePassed", ")", ")", "if", "timeToWaitAfterPreviousTry", "<", "timePassed", ":", "self", ".", "actionsLog", ".", "append", "(", "now", ")", "logger", ".", "debug", "(", "\"timeToWaitAfterPreviousTry < timePassed was true, after \"", "\"append, len(actionsLog): {}\"", ".", "format", "(", "len", "(", "self", ".", "actionsLog", ")", ")", ")", "return", "True", ",", "0.0", "else", ":", "logger", ".", "debug", "(", "\"timeToWaitAfterPreviousTry < timePassed was false, \"", "\"len(actionsLog): {}\"", ".", "format", "(", "len", "(", "self", ".", "actionsLog", ")", ")", ")", "return", "False", ",", "timeToWaitAfterPreviousTry", "-", "timePassed" ]
Acquires lock for action. :return: True and 0.0 if lock successfully acquired or False and number of seconds to wait before the next try
[ "Acquires", "lock", "for", "action", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/common/throttler.py#L28-L60
234,137
hyperledger/indy-plenum
plenum/common/tools.py
lazy_field
def lazy_field(prop): """ Decorator which helps in creating lazy properties """ @property def wrapper(self): if self not in _lazy_value_cache: _lazy_value_cache[self] = {} self_cache = _lazy_value_cache[self] if prop in self_cache: return self_cache[prop] prop_value = prop(self) self_cache[prop] = prop_value return prop_value return wrapper
python
def lazy_field(prop): """ Decorator which helps in creating lazy properties """ @property def wrapper(self): if self not in _lazy_value_cache: _lazy_value_cache[self] = {} self_cache = _lazy_value_cache[self] if prop in self_cache: return self_cache[prop] prop_value = prop(self) self_cache[prop] = prop_value return prop_value return wrapper
[ "def", "lazy_field", "(", "prop", ")", ":", "@", "property", "def", "wrapper", "(", "self", ")", ":", "if", "self", "not", "in", "_lazy_value_cache", ":", "_lazy_value_cache", "[", "self", "]", "=", "{", "}", "self_cache", "=", "_lazy_value_cache", "[", "self", "]", "if", "prop", "in", "self_cache", ":", "return", "self_cache", "[", "prop", "]", "prop_value", "=", "prop", "(", "self", ")", "self_cache", "[", "prop", "]", "=", "prop_value", "return", "prop_value", "return", "wrapper" ]
Decorator which helps in creating lazy properties
[ "Decorator", "which", "helps", "in", "creating", "lazy", "properties" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/common/tools.py#L6-L20
234,138
hyperledger/indy-plenum
stp_core/network/network_interface.py
NetworkInterface.getRemote
def getRemote(self, name: str = None, ha: HA = None): """ Find the remote by name or ha. :param name: the name of the remote to find :param ha: host address pair the remote to find :raises: RemoteNotFound """ return self.findInRemotesByName(name) if name else \ self.findInRemotesByHA(ha)
python
def getRemote(self, name: str = None, ha: HA = None): """ Find the remote by name or ha. :param name: the name of the remote to find :param ha: host address pair the remote to find :raises: RemoteNotFound """ return self.findInRemotesByName(name) if name else \ self.findInRemotesByHA(ha)
[ "def", "getRemote", "(", "self", ",", "name", ":", "str", "=", "None", ",", "ha", ":", "HA", "=", "None", ")", ":", "return", "self", ".", "findInRemotesByName", "(", "name", ")", "if", "name", "else", "self", ".", "findInRemotesByHA", "(", "ha", ")" ]
Find the remote by name or ha. :param name: the name of the remote to find :param ha: host address pair the remote to find :raises: RemoteNotFound
[ "Find", "the", "remote", "by", "name", "or", "ha", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/stp_core/network/network_interface.py#L135-L144
234,139
hyperledger/indy-plenum
stp_core/network/network_interface.py
NetworkInterface.findInRemotesByName
def findInRemotesByName(self, name: str): """ Find the remote by name. :param name: the name of the remote to find :raises: RemoteNotFound """ remotes = [r for r in self.remotes.values() if r.name == name] if len(remotes) > 1: raise DuplicateRemotes(remotes) if not remotes: raise RemoteNotFound(name) return remotes[0]
python
def findInRemotesByName(self, name: str): """ Find the remote by name. :param name: the name of the remote to find :raises: RemoteNotFound """ remotes = [r for r in self.remotes.values() if r.name == name] if len(remotes) > 1: raise DuplicateRemotes(remotes) if not remotes: raise RemoteNotFound(name) return remotes[0]
[ "def", "findInRemotesByName", "(", "self", ",", "name", ":", "str", ")", ":", "remotes", "=", "[", "r", "for", "r", "in", "self", ".", "remotes", ".", "values", "(", ")", "if", "r", ".", "name", "==", "name", "]", "if", "len", "(", "remotes", ")", ">", "1", ":", "raise", "DuplicateRemotes", "(", "remotes", ")", "if", "not", "remotes", ":", "raise", "RemoteNotFound", "(", "name", ")", "return", "remotes", "[", "0", "]" ]
Find the remote by name. :param name: the name of the remote to find :raises: RemoteNotFound
[ "Find", "the", "remote", "by", "name", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/stp_core/network/network_interface.py#L155-L168
234,140
hyperledger/indy-plenum
stp_core/network/network_interface.py
NetworkInterface.removeRemoteByName
def removeRemoteByName(self, name: str) -> int: """ Remove the remote by name. :param name: the name of the remote to remove :raises: RemoteNotFound """ remote = self.getRemote(name) rid = remote.uid self.removeRemote(remote) return rid
python
def removeRemoteByName(self, name: str) -> int: """ Remove the remote by name. :param name: the name of the remote to remove :raises: RemoteNotFound """ remote = self.getRemote(name) rid = remote.uid self.removeRemote(remote) return rid
[ "def", "removeRemoteByName", "(", "self", ",", "name", ":", "str", ")", "->", "int", ":", "remote", "=", "self", ".", "getRemote", "(", "name", ")", "rid", "=", "remote", ".", "uid", "self", ".", "removeRemote", "(", "remote", ")", "return", "rid" ]
Remove the remote by name. :param name: the name of the remote to remove :raises: RemoteNotFound
[ "Remove", "the", "remote", "by", "name", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/stp_core/network/network_interface.py#L177-L187
234,141
hyperledger/indy-plenum
stp_core/network/network_interface.py
NetworkInterface.sameAddr
def sameAddr(self, ha, ha2) -> bool: """ Check whether the two arguments correspond to the same address """ if ha == ha2: return True if ha[1] != ha2[1]: return False return ha[0] in self.localips and ha2[0] in self.localips
python
def sameAddr(self, ha, ha2) -> bool: """ Check whether the two arguments correspond to the same address """ if ha == ha2: return True if ha[1] != ha2[1]: return False return ha[0] in self.localips and ha2[0] in self.localips
[ "def", "sameAddr", "(", "self", ",", "ha", ",", "ha2", ")", "->", "bool", ":", "if", "ha", "==", "ha2", ":", "return", "True", "if", "ha", "[", "1", "]", "!=", "ha2", "[", "1", "]", ":", "return", "False", "return", "ha", "[", "0", "]", "in", "self", ".", "localips", "and", "ha2", "[", "0", "]", "in", "self", ".", "localips" ]
Check whether the two arguments correspond to the same address
[ "Check", "whether", "the", "two", "arguments", "correspond", "to", "the", "same", "address" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/stp_core/network/network_interface.py#L196-L204
234,142
hyperledger/indy-plenum
stp_core/network/network_interface.py
NetworkInterface.remotesByConnected
def remotesByConnected(self): """ Partitions the remotes into connected and disconnected :return: tuple(connected remotes, disconnected remotes) """ conns, disconns = [], [] for r in self.remotes.values(): array = conns if self.isRemoteConnected(r) else disconns array.append(r) return conns, disconns
python
def remotesByConnected(self): """ Partitions the remotes into connected and disconnected :return: tuple(connected remotes, disconnected remotes) """ conns, disconns = [], [] for r in self.remotes.values(): array = conns if self.isRemoteConnected(r) else disconns array.append(r) return conns, disconns
[ "def", "remotesByConnected", "(", "self", ")", ":", "conns", ",", "disconns", "=", "[", "]", ",", "[", "]", "for", "r", "in", "self", ".", "remotes", ".", "values", "(", ")", ":", "array", "=", "conns", "if", "self", ".", "isRemoteConnected", "(", "r", ")", "else", "disconns", "array", ".", "append", "(", "r", ")", "return", "conns", ",", "disconns" ]
Partitions the remotes into connected and disconnected :return: tuple(connected remotes, disconnected remotes)
[ "Partitions", "the", "remotes", "into", "connected", "and", "disconnected" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/stp_core/network/network_interface.py#L206-L216
234,143
hyperledger/indy-plenum
plenum/client/wallet.py
Wallet.addIdentifier
def addIdentifier(self, identifier=None, seed=None, signer=None, alias=None, didMethodName=None): """ Adds signer to the wallet. Requires complete signer, identifier or seed. :param identifier: signer identifier or None to use random one :param seed: signer key seed or None to use random one :param signer: signer to add :param alias: a friendly readable name for the signer :param didMethodName: name of DID Method if not the default :return: """ dm = self.didMethods.get(didMethodName) signer = signer or dm.newSigner(identifier=identifier, seed=seed) self.idsToSigners[signer.identifier] = signer if self.defaultId is None: # setting this signer as default signer to let use sign* methods # without explicit specification of signer self.defaultId = signer.identifier if alias: signer.alias = alias if signer.alias: self.aliasesToIds[signer.alias] = signer.identifier return signer.identifier, signer
python
def addIdentifier(self, identifier=None, seed=None, signer=None, alias=None, didMethodName=None): """ Adds signer to the wallet. Requires complete signer, identifier or seed. :param identifier: signer identifier or None to use random one :param seed: signer key seed or None to use random one :param signer: signer to add :param alias: a friendly readable name for the signer :param didMethodName: name of DID Method if not the default :return: """ dm = self.didMethods.get(didMethodName) signer = signer or dm.newSigner(identifier=identifier, seed=seed) self.idsToSigners[signer.identifier] = signer if self.defaultId is None: # setting this signer as default signer to let use sign* methods # without explicit specification of signer self.defaultId = signer.identifier if alias: signer.alias = alias if signer.alias: self.aliasesToIds[signer.alias] = signer.identifier return signer.identifier, signer
[ "def", "addIdentifier", "(", "self", ",", "identifier", "=", "None", ",", "seed", "=", "None", ",", "signer", "=", "None", ",", "alias", "=", "None", ",", "didMethodName", "=", "None", ")", ":", "dm", "=", "self", ".", "didMethods", ".", "get", "(", "didMethodName", ")", "signer", "=", "signer", "or", "dm", ".", "newSigner", "(", "identifier", "=", "identifier", ",", "seed", "=", "seed", ")", "self", ".", "idsToSigners", "[", "signer", ".", "identifier", "]", "=", "signer", "if", "self", ".", "defaultId", "is", "None", ":", "# setting this signer as default signer to let use sign* methods", "# without explicit specification of signer", "self", ".", "defaultId", "=", "signer", ".", "identifier", "if", "alias", ":", "signer", ".", "alias", "=", "alias", "if", "signer", ".", "alias", ":", "self", ".", "aliasesToIds", "[", "signer", ".", "alias", "]", "=", "signer", ".", "identifier", "return", "signer", ".", "identifier", ",", "signer" ]
Adds signer to the wallet. Requires complete signer, identifier or seed. :param identifier: signer identifier or None to use random one :param seed: signer key seed or None to use random one :param signer: signer to add :param alias: a friendly readable name for the signer :param didMethodName: name of DID Method if not the default :return:
[ "Adds", "signer", "to", "the", "wallet", ".", "Requires", "complete", "signer", "identifier", "or", "seed", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/client/wallet.py#L82-L111
234,144
hyperledger/indy-plenum
plenum/client/wallet.py
Wallet.requiredIdr
def requiredIdr(self, idr: Identifier=None, alias: str=None): """ Checks whether signer identifier specified, or can it be inferred from alias or can be default used instead :param idr: :param alias: :param other: :return: signer identifier """ # TODO Need to create a new Identifier type that supports DIDs and CIDs if idr: if ':' in idr: idr = idr.split(':')[1] else: idr = self.aliasesToIds[alias] if alias else self.defaultId if not idr: raise EmptyIdentifier return idr
python
def requiredIdr(self, idr: Identifier=None, alias: str=None): """ Checks whether signer identifier specified, or can it be inferred from alias or can be default used instead :param idr: :param alias: :param other: :return: signer identifier """ # TODO Need to create a new Identifier type that supports DIDs and CIDs if idr: if ':' in idr: idr = idr.split(':')[1] else: idr = self.aliasesToIds[alias] if alias else self.defaultId if not idr: raise EmptyIdentifier return idr
[ "def", "requiredIdr", "(", "self", ",", "idr", ":", "Identifier", "=", "None", ",", "alias", ":", "str", "=", "None", ")", ":", "# TODO Need to create a new Identifier type that supports DIDs and CIDs", "if", "idr", ":", "if", "':'", "in", "idr", ":", "idr", "=", "idr", ".", "split", "(", "':'", ")", "[", "1", "]", "else", ":", "idr", "=", "self", ".", "aliasesToIds", "[", "alias", "]", "if", "alias", "else", "self", ".", "defaultId", "if", "not", "idr", ":", "raise", "EmptyIdentifier", "return", "idr" ]
Checks whether signer identifier specified, or can it be inferred from alias or can be default used instead :param idr: :param alias: :param other: :return: signer identifier
[ "Checks", "whether", "signer", "identifier", "specified", "or", "can", "it", "be", "inferred", "from", "alias", "or", "can", "be", "default", "used", "instead" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/client/wallet.py#L139-L161
234,145
hyperledger/indy-plenum
plenum/client/wallet.py
Wallet.signMsg
def signMsg(self, msg: Dict, identifier: Identifier=None, otherIdentifier: Identifier=None): """ Creates signature for message using specified signer :param msg: message to sign :param identifier: signer identifier :param otherIdentifier: :return: signature that then can be assigned to request """ idr = self.requiredIdr(idr=identifier or otherIdentifier) signer = self._signerById(idr) signature = signer.sign(msg) return signature
python
def signMsg(self, msg: Dict, identifier: Identifier=None, otherIdentifier: Identifier=None): """ Creates signature for message using specified signer :param msg: message to sign :param identifier: signer identifier :param otherIdentifier: :return: signature that then can be assigned to request """ idr = self.requiredIdr(idr=identifier or otherIdentifier) signer = self._signerById(idr) signature = signer.sign(msg) return signature
[ "def", "signMsg", "(", "self", ",", "msg", ":", "Dict", ",", "identifier", ":", "Identifier", "=", "None", ",", "otherIdentifier", ":", "Identifier", "=", "None", ")", ":", "idr", "=", "self", ".", "requiredIdr", "(", "idr", "=", "identifier", "or", "otherIdentifier", ")", "signer", "=", "self", ".", "_signerById", "(", "idr", ")", "signature", "=", "signer", ".", "sign", "(", "msg", ")", "return", "signature" ]
Creates signature for message using specified signer :param msg: message to sign :param identifier: signer identifier :param otherIdentifier: :return: signature that then can be assigned to request
[ "Creates", "signature", "for", "message", "using", "specified", "signer" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/client/wallet.py#L163-L178
234,146
hyperledger/indy-plenum
plenum/client/wallet.py
Wallet.signRequest
def signRequest(self, req: Request, identifier: Identifier=None) -> Request: """ Signs request. Modifies reqId and signature. May modify identifier. :param req: request :param requestIdStore: request id generator :param identifier: signer identifier :return: signed request """ idr = self.requiredIdr(idr=identifier or req._identifier) # idData = self._getIdData(idr) req._identifier = idr req.reqId = req.gen_req_id() # req.digest = req.getDigest() # QUESTION: `self.ids[idr]` would be overwritten if same identifier # is used to send 2 requests, why is `IdData` persisted? # self.ids[idr] = IdData(idData.signer, req.reqId) req.signature = self.signMsg(msg=req.signingPayloadState(identifier=idr), identifier=idr, otherIdentifier=req.identifier) return req
python
def signRequest(self, req: Request, identifier: Identifier=None) -> Request: """ Signs request. Modifies reqId and signature. May modify identifier. :param req: request :param requestIdStore: request id generator :param identifier: signer identifier :return: signed request """ idr = self.requiredIdr(idr=identifier or req._identifier) # idData = self._getIdData(idr) req._identifier = idr req.reqId = req.gen_req_id() # req.digest = req.getDigest() # QUESTION: `self.ids[idr]` would be overwritten if same identifier # is used to send 2 requests, why is `IdData` persisted? # self.ids[idr] = IdData(idData.signer, req.reqId) req.signature = self.signMsg(msg=req.signingPayloadState(identifier=idr), identifier=idr, otherIdentifier=req.identifier) return req
[ "def", "signRequest", "(", "self", ",", "req", ":", "Request", ",", "identifier", ":", "Identifier", "=", "None", ")", "->", "Request", ":", "idr", "=", "self", ".", "requiredIdr", "(", "idr", "=", "identifier", "or", "req", ".", "_identifier", ")", "# idData = self._getIdData(idr)", "req", ".", "_identifier", "=", "idr", "req", ".", "reqId", "=", "req", ".", "gen_req_id", "(", ")", "# req.digest = req.getDigest()", "# QUESTION: `self.ids[idr]` would be overwritten if same identifier", "# is used to send 2 requests, why is `IdData` persisted?", "# self.ids[idr] = IdData(idData.signer, req.reqId)", "req", ".", "signature", "=", "self", ".", "signMsg", "(", "msg", "=", "req", ".", "signingPayloadState", "(", "identifier", "=", "idr", ")", ",", "identifier", "=", "idr", ",", "otherIdentifier", "=", "req", ".", "identifier", ")", "return", "req" ]
Signs request. Modifies reqId and signature. May modify identifier. :param req: request :param requestIdStore: request id generator :param identifier: signer identifier :return: signed request
[ "Signs", "request", ".", "Modifies", "reqId", "and", "signature", ".", "May", "modify", "identifier", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/client/wallet.py#L180-L204
234,147
hyperledger/indy-plenum
plenum/client/wallet.py
Wallet.signOp
def signOp(self, op: Dict, identifier: Identifier=None) -> Request: """ Signs the message if a signer is configured :param identifier: signing identifier; if not supplied the default for the wallet is used. :param op: Operation to be signed :return: a signed Request object """ request = Request(operation=op, protocolVersion=CURRENT_PROTOCOL_VERSION) return self.signRequest(request, identifier)
python
def signOp(self, op: Dict, identifier: Identifier=None) -> Request: """ Signs the message if a signer is configured :param identifier: signing identifier; if not supplied the default for the wallet is used. :param op: Operation to be signed :return: a signed Request object """ request = Request(operation=op, protocolVersion=CURRENT_PROTOCOL_VERSION) return self.signRequest(request, identifier)
[ "def", "signOp", "(", "self", ",", "op", ":", "Dict", ",", "identifier", ":", "Identifier", "=", "None", ")", "->", "Request", ":", "request", "=", "Request", "(", "operation", "=", "op", ",", "protocolVersion", "=", "CURRENT_PROTOCOL_VERSION", ")", "return", "self", ".", "signRequest", "(", "request", ",", "identifier", ")" ]
Signs the message if a signer is configured :param identifier: signing identifier; if not supplied the default for the wallet is used. :param op: Operation to be signed :return: a signed Request object
[ "Signs", "the", "message", "if", "a", "signer", "is", "configured" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/client/wallet.py#L206-L219
234,148
hyperledger/indy-plenum
plenum/client/wallet.py
Wallet.listIds
def listIds(self, exclude=list()): """ For each signer in this wallet, return its alias if present else return its identifier. :param exclude: :return: List of identifiers/aliases. """ lst = list(self.aliasesToIds.keys()) others = set(self.idsToSigners.keys()) - \ set(self.aliasesToIds.values()) lst.extend(list(others)) for x in exclude: lst.remove(x) return lst
python
def listIds(self, exclude=list()): """ For each signer in this wallet, return its alias if present else return its identifier. :param exclude: :return: List of identifiers/aliases. """ lst = list(self.aliasesToIds.keys()) others = set(self.idsToSigners.keys()) - \ set(self.aliasesToIds.values()) lst.extend(list(others)) for x in exclude: lst.remove(x) return lst
[ "def", "listIds", "(", "self", ",", "exclude", "=", "list", "(", ")", ")", ":", "lst", "=", "list", "(", "self", ".", "aliasesToIds", ".", "keys", "(", ")", ")", "others", "=", "set", "(", "self", ".", "idsToSigners", ".", "keys", "(", ")", ")", "-", "set", "(", "self", ".", "aliasesToIds", ".", "values", "(", ")", ")", "lst", ".", "extend", "(", "list", "(", "others", ")", ")", "for", "x", "in", "exclude", ":", "lst", ".", "remove", "(", "x", ")", "return", "lst" ]
For each signer in this wallet, return its alias if present else return its identifier. :param exclude: :return: List of identifiers/aliases.
[ "For", "each", "signer", "in", "this", "wallet", "return", "its", "alias", "if", "present", "else", "return", "its", "identifier", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/client/wallet.py#L267-L281
234,149
hyperledger/indy-plenum
plenum/client/wallet.py
WalletStorageHelper.saveWallet
def saveWallet(self, wallet, fpath): """Save wallet into specified localtion. Returns the canonical path for the ``fpath`` where ``wallet`` has been stored. Error cases: - ``fpath`` is not inside the keyrings base dir - ValueError raised - directory part of ``fpath`` exists and it's not a directory - NotADirectoryError raised - ``fpath`` exists and it's a directory - IsADirectoryError raised :param wallet: wallet to save :param fpath: wallet file path, absolute or relative to keyrings base dir """ if not fpath: raise ValueError("empty path") _fpath = self._normalize(fpath) _dpath = _fpath.parent try: _dpath.relative_to(self._baseDir) except ValueError: raise ValueError( "path {} is not is not relative to the keyrings {}".format( fpath, self._baseDir)) self._createDirIfNotExists(_dpath) # ensure permissions from the bottom of the directory hierarchy while _dpath != self._baseDir: self._ensurePermissions(_dpath, self.dmode) _dpath = _dpath.parent with _fpath.open("w") as wf: self._ensurePermissions(_fpath, self.fmode) encodedWallet = self.encode(wallet) wf.write(encodedWallet) logger.debug("stored wallet '{}' in {}".format( wallet.name, _fpath)) return str(_fpath)
python
def saveWallet(self, wallet, fpath): """Save wallet into specified localtion. Returns the canonical path for the ``fpath`` where ``wallet`` has been stored. Error cases: - ``fpath`` is not inside the keyrings base dir - ValueError raised - directory part of ``fpath`` exists and it's not a directory - NotADirectoryError raised - ``fpath`` exists and it's a directory - IsADirectoryError raised :param wallet: wallet to save :param fpath: wallet file path, absolute or relative to keyrings base dir """ if not fpath: raise ValueError("empty path") _fpath = self._normalize(fpath) _dpath = _fpath.parent try: _dpath.relative_to(self._baseDir) except ValueError: raise ValueError( "path {} is not is not relative to the keyrings {}".format( fpath, self._baseDir)) self._createDirIfNotExists(_dpath) # ensure permissions from the bottom of the directory hierarchy while _dpath != self._baseDir: self._ensurePermissions(_dpath, self.dmode) _dpath = _dpath.parent with _fpath.open("w") as wf: self._ensurePermissions(_fpath, self.fmode) encodedWallet = self.encode(wallet) wf.write(encodedWallet) logger.debug("stored wallet '{}' in {}".format( wallet.name, _fpath)) return str(_fpath)
[ "def", "saveWallet", "(", "self", ",", "wallet", ",", "fpath", ")", ":", "if", "not", "fpath", ":", "raise", "ValueError", "(", "\"empty path\"", ")", "_fpath", "=", "self", ".", "_normalize", "(", "fpath", ")", "_dpath", "=", "_fpath", ".", "parent", "try", ":", "_dpath", ".", "relative_to", "(", "self", ".", "_baseDir", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"path {} is not is not relative to the keyrings {}\"", ".", "format", "(", "fpath", ",", "self", ".", "_baseDir", ")", ")", "self", ".", "_createDirIfNotExists", "(", "_dpath", ")", "# ensure permissions from the bottom of the directory hierarchy", "while", "_dpath", "!=", "self", ".", "_baseDir", ":", "self", ".", "_ensurePermissions", "(", "_dpath", ",", "self", ".", "dmode", ")", "_dpath", "=", "_dpath", ".", "parent", "with", "_fpath", ".", "open", "(", "\"w\"", ")", "as", "wf", ":", "self", ".", "_ensurePermissions", "(", "_fpath", ",", "self", ".", "fmode", ")", "encodedWallet", "=", "self", ".", "encode", "(", "wallet", ")", "wf", ".", "write", "(", "encodedWallet", ")", "logger", ".", "debug", "(", "\"stored wallet '{}' in {}\"", ".", "format", "(", "wallet", ".", "name", ",", "_fpath", ")", ")", "return", "str", "(", "_fpath", ")" ]
Save wallet into specified localtion. Returns the canonical path for the ``fpath`` where ``wallet`` has been stored. Error cases: - ``fpath`` is not inside the keyrings base dir - ValueError raised - directory part of ``fpath`` exists and it's not a directory - NotADirectoryError raised - ``fpath`` exists and it's a directory - IsADirectoryError raised :param wallet: wallet to save :param fpath: wallet file path, absolute or relative to keyrings base dir
[ "Save", "wallet", "into", "specified", "localtion", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/client/wallet.py#L351-L394
234,150
hyperledger/indy-plenum
plenum/client/wallet.py
WalletStorageHelper.loadWallet
def loadWallet(self, fpath): """Load wallet from specified localtion. Returns loaded wallet. Error cases: - ``fpath`` is not inside the keyrings base dir - ValueError raised - ``fpath`` exists and it's a directory - IsADirectoryError raised :param fpath: wallet file path, absolute or relative to keyrings base dir """ if not fpath: raise ValueError("empty path") _fpath = self._normalize(fpath) _dpath = _fpath.parent try: _dpath.relative_to(self._baseDir) except ValueError: raise ValueError( "path {} is not is not relative to the wallets {}".format( fpath, self._baseDir)) with _fpath.open() as wf: wallet = self.decode(wf.read()) return wallet
python
def loadWallet(self, fpath): """Load wallet from specified localtion. Returns loaded wallet. Error cases: - ``fpath`` is not inside the keyrings base dir - ValueError raised - ``fpath`` exists and it's a directory - IsADirectoryError raised :param fpath: wallet file path, absolute or relative to keyrings base dir """ if not fpath: raise ValueError("empty path") _fpath = self._normalize(fpath) _dpath = _fpath.parent try: _dpath.relative_to(self._baseDir) except ValueError: raise ValueError( "path {} is not is not relative to the wallets {}".format( fpath, self._baseDir)) with _fpath.open() as wf: wallet = self.decode(wf.read()) return wallet
[ "def", "loadWallet", "(", "self", ",", "fpath", ")", ":", "if", "not", "fpath", ":", "raise", "ValueError", "(", "\"empty path\"", ")", "_fpath", "=", "self", ".", "_normalize", "(", "fpath", ")", "_dpath", "=", "_fpath", ".", "parent", "try", ":", "_dpath", ".", "relative_to", "(", "self", ".", "_baseDir", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"path {} is not is not relative to the wallets {}\"", ".", "format", "(", "fpath", ",", "self", ".", "_baseDir", ")", ")", "with", "_fpath", ".", "open", "(", ")", "as", "wf", ":", "wallet", "=", "self", ".", "decode", "(", "wf", ".", "read", "(", ")", ")", "return", "wallet" ]
Load wallet from specified localtion. Returns loaded wallet. Error cases: - ``fpath`` is not inside the keyrings base dir - ValueError raised - ``fpath`` exists and it's a directory - IsADirectoryError raised :param fpath: wallet file path, absolute or relative to keyrings base dir
[ "Load", "wallet", "from", "specified", "localtion", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/client/wallet.py#L396-L424
234,151
hyperledger/indy-plenum
plenum/server/models.py
Prepares.addVote
def addVote(self, prepare: Prepare, voter: str) -> None: """ Add the specified PREPARE to this replica's list of received PREPAREs. :param prepare: the PREPARE to add to the list :param voter: the name of the node who sent the PREPARE """ self._add_msg(prepare, voter)
python
def addVote(self, prepare: Prepare, voter: str) -> None: """ Add the specified PREPARE to this replica's list of received PREPAREs. :param prepare: the PREPARE to add to the list :param voter: the name of the node who sent the PREPARE """ self._add_msg(prepare, voter)
[ "def", "addVote", "(", "self", ",", "prepare", ":", "Prepare", ",", "voter", ":", "str", ")", "->", "None", ":", "self", ".", "_add_msg", "(", "prepare", ",", "voter", ")" ]
Add the specified PREPARE to this replica's list of received PREPAREs. :param prepare: the PREPARE to add to the list :param voter: the name of the node who sent the PREPARE
[ "Add", "the", "specified", "PREPARE", "to", "this", "replica", "s", "list", "of", "received", "PREPAREs", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/models.py#L62-L70
234,152
hyperledger/indy-plenum
plenum/server/models.py
Commits.addVote
def addVote(self, commit: Commit, voter: str) -> None: """ Add the specified COMMIT to this replica's list of received COMMITs. :param commit: the COMMIT to add to the list :param voter: the name of the replica who sent the COMMIT """ super()._add_msg(commit, voter)
python
def addVote(self, commit: Commit, voter: str) -> None: """ Add the specified COMMIT to this replica's list of received COMMITs. :param commit: the COMMIT to add to the list :param voter: the name of the replica who sent the COMMIT """ super()._add_msg(commit, voter)
[ "def", "addVote", "(", "self", ",", "commit", ":", "Commit", ",", "voter", ":", "str", ")", "->", "None", ":", "super", "(", ")", ".", "_add_msg", "(", "commit", ",", "voter", ")" ]
Add the specified COMMIT to this replica's list of received COMMITs. :param commit: the COMMIT to add to the list :param voter: the name of the replica who sent the COMMIT
[ "Add", "the", "specified", "COMMIT", "to", "this", "replica", "s", "list", "of", "received", "COMMITs", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/models.py#L96-L104
234,153
hyperledger/indy-plenum
plenum/common/message_processor.py
MessageProcessor.discard
def discard(self, msg, reason, logMethod=logging.error, cliOutput=False): """ Discard a message and log a reason using the specified `logMethod`. :param msg: the message to discard :param reason: the reason why this message is being discarded :param logMethod: the logging function to be used :param cliOutput: if truthy, informs a CLI that the logged msg should be printed """ reason = "" if not reason else " because {}".format(reason) logMethod("{} discarding message {}{}".format(self, msg, reason), extra={"cli": cliOutput})
python
def discard(self, msg, reason, logMethod=logging.error, cliOutput=False): """ Discard a message and log a reason using the specified `logMethod`. :param msg: the message to discard :param reason: the reason why this message is being discarded :param logMethod: the logging function to be used :param cliOutput: if truthy, informs a CLI that the logged msg should be printed """ reason = "" if not reason else " because {}".format(reason) logMethod("{} discarding message {}{}".format(self, msg, reason), extra={"cli": cliOutput})
[ "def", "discard", "(", "self", ",", "msg", ",", "reason", ",", "logMethod", "=", "logging", ".", "error", ",", "cliOutput", "=", "False", ")", ":", "reason", "=", "\"\"", "if", "not", "reason", "else", "\" because {}\"", ".", "format", "(", "reason", ")", "logMethod", "(", "\"{} discarding message {}{}\"", ".", "format", "(", "self", ",", "msg", ",", "reason", ")", ",", "extra", "=", "{", "\"cli\"", ":", "cliOutput", "}", ")" ]
Discard a message and log a reason using the specified `logMethod`. :param msg: the message to discard :param reason: the reason why this message is being discarded :param logMethod: the logging function to be used :param cliOutput: if truthy, informs a CLI that the logged msg should be printed
[ "Discard", "a", "message", "and", "log", "a", "reason", "using", "the", "specified", "logMethod", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/common/message_processor.py#L18-L30
234,154
hyperledger/indy-plenum
plenum/common/message_processor.py
MessageProcessor.toDict
def toDict(self, msg: Dict) -> Dict: """ Return a dictionary form of the message :param msg: the message to be sent :raises: ValueError if msg cannot be converted to an appropriate format for transmission """ if isinstance(msg, Request): tmsg = msg.as_dict elif hasattr(msg, "_asdict"): tmsg = dict(msg._asdict()) elif hasattr(msg, "__dict__"): tmsg = dict(msg.__dict__) elif self.allowDictOnly: raise ValueError("Message cannot be converted to an appropriate " "format for transmission") else: tmsg = msg return tmsg
python
def toDict(self, msg: Dict) -> Dict: """ Return a dictionary form of the message :param msg: the message to be sent :raises: ValueError if msg cannot be converted to an appropriate format for transmission """ if isinstance(msg, Request): tmsg = msg.as_dict elif hasattr(msg, "_asdict"): tmsg = dict(msg._asdict()) elif hasattr(msg, "__dict__"): tmsg = dict(msg.__dict__) elif self.allowDictOnly: raise ValueError("Message cannot be converted to an appropriate " "format for transmission") else: tmsg = msg return tmsg
[ "def", "toDict", "(", "self", ",", "msg", ":", "Dict", ")", "->", "Dict", ":", "if", "isinstance", "(", "msg", ",", "Request", ")", ":", "tmsg", "=", "msg", ".", "as_dict", "elif", "hasattr", "(", "msg", ",", "\"_asdict\"", ")", ":", "tmsg", "=", "dict", "(", "msg", ".", "_asdict", "(", ")", ")", "elif", "hasattr", "(", "msg", ",", "\"__dict__\"", ")", ":", "tmsg", "=", "dict", "(", "msg", ".", "__dict__", ")", "elif", "self", ".", "allowDictOnly", ":", "raise", "ValueError", "(", "\"Message cannot be converted to an appropriate \"", "\"format for transmission\"", ")", "else", ":", "tmsg", "=", "msg", "return", "tmsg" ]
Return a dictionary form of the message :param msg: the message to be sent :raises: ValueError if msg cannot be converted to an appropriate format for transmission
[ "Return", "a", "dictionary", "form", "of", "the", "message" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/common/message_processor.py#L32-L52
234,155
hyperledger/indy-plenum
plenum/server/replica_freshness_checker.py
FreshnessChecker.update_freshness
def update_freshness(self, ledger_id, ts): ''' Updates the time at which the ledger was updated. Should be called whenever a txn for the ledger is ordered. :param ledger_id: the ID of the ledgers a txn was ordered for :param ts: the current time :return: None ''' if ledger_id in self._ledger_freshness: self._ledger_freshness[ledger_id].last_updated = ts
python
def update_freshness(self, ledger_id, ts): ''' Updates the time at which the ledger was updated. Should be called whenever a txn for the ledger is ordered. :param ledger_id: the ID of the ledgers a txn was ordered for :param ts: the current time :return: None ''' if ledger_id in self._ledger_freshness: self._ledger_freshness[ledger_id].last_updated = ts
[ "def", "update_freshness", "(", "self", ",", "ledger_id", ",", "ts", ")", ":", "if", "ledger_id", "in", "self", ".", "_ledger_freshness", ":", "self", ".", "_ledger_freshness", "[", "ledger_id", "]", ".", "last_updated", "=", "ts" ]
Updates the time at which the ledger was updated. Should be called whenever a txn for the ledger is ordered. :param ledger_id: the ID of the ledgers a txn was ordered for :param ts: the current time :return: None
[ "Updates", "the", "time", "at", "which", "the", "ledger", "was", "updated", ".", "Should", "be", "called", "whenever", "a", "txn", "for", "the", "ledger", "is", "ordered", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica_freshness_checker.py#L50-L60
234,156
hyperledger/indy-plenum
plenum/server/replica_freshness_checker.py
FreshnessChecker.get_last_update_time
def get_last_update_time(self): ''' Gets the time at which each ledger was updated. Can be called at any time to get this information. :return: an ordered dict of outdated ledgers sorted by last update time (from old to new) and then by ledger ID (in case of equal update time) ''' last_updated = {ledger_id: freshness_state.last_updated for ledger_id, freshness_state in self._ledger_freshness.items()} return OrderedDict( sorted( last_updated.items(), key=lambda item: (item[1], item[0]) ) )
python
def get_last_update_time(self): ''' Gets the time at which each ledger was updated. Can be called at any time to get this information. :return: an ordered dict of outdated ledgers sorted by last update time (from old to new) and then by ledger ID (in case of equal update time) ''' last_updated = {ledger_id: freshness_state.last_updated for ledger_id, freshness_state in self._ledger_freshness.items()} return OrderedDict( sorted( last_updated.items(), key=lambda item: (item[1], item[0]) ) )
[ "def", "get_last_update_time", "(", "self", ")", ":", "last_updated", "=", "{", "ledger_id", ":", "freshness_state", ".", "last_updated", "for", "ledger_id", ",", "freshness_state", "in", "self", ".", "_ledger_freshness", ".", "items", "(", ")", "}", "return", "OrderedDict", "(", "sorted", "(", "last_updated", ".", "items", "(", ")", ",", "key", "=", "lambda", "item", ":", "(", "item", "[", "1", "]", ",", "item", "[", "0", "]", ")", ")", ")" ]
Gets the time at which each ledger was updated. Can be called at any time to get this information. :return: an ordered dict of outdated ledgers sorted by last update time (from old to new) and then by ledger ID (in case of equal update time)
[ "Gets", "the", "time", "at", "which", "each", "ledger", "was", "updated", ".", "Can", "be", "called", "at", "any", "time", "to", "get", "this", "information", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/replica_freshness_checker.py#L62-L77
234,157
hyperledger/indy-plenum
common/serializers/signing_serializer.py
SigningSerializer.serialize
def serialize(self, obj, level=0, objname=None, topLevelKeysToIgnore=None, toBytes=True): """ Create a string representation of the given object. Examples: :: >>> serialize("str") 'str' >>> serialize([1,2,3,4,5]) '1,2,3,4,5' >>> signing.serlize({1:'a', 2:'b'}) '1:a|2:b' >>> signing.serlize({1:'a', 2:'b', 3:[1,{2:'k'}]}) '1:a|2:b|3:1,2:k' :param obj: the object to serlize :param level: a parameter used internally for recursion to serialize nested data structures :param topLevelKeysToIgnore: the list of top level keys to ignore for serialization :return: a string representation of `obj` """ res = None if not isinstance(obj, acceptableTypes): error("invalid type found {}: {}".format(objname, obj)) elif isinstance(obj, str): res = obj elif isinstance(obj, dict): if level > 0: keys = list(obj.keys()) else: topLevelKeysToIgnore = topLevelKeysToIgnore or [] keys = [k for k in obj.keys() if k not in topLevelKeysToIgnore] keys.sort() strs = [] for k in keys: onm = ".".join([str(objname), str(k)]) if objname else k strs.append( str(k) + ":" + self.serialize(obj[k], level + 1, onm, toBytes=False)) res = "|".join(strs) elif isinstance(obj, Iterable): strs = [] for o in obj: strs.append(self.serialize( o, level + 1, objname, toBytes=False)) res = ",".join(strs) elif obj is None: res = "" else: res = str(obj) # logger.trace("serialized msg {} into {}".format(obj, res)) if not toBytes: return res return res.encode('utf-8')
python
def serialize(self, obj, level=0, objname=None, topLevelKeysToIgnore=None, toBytes=True): """ Create a string representation of the given object. Examples: :: >>> serialize("str") 'str' >>> serialize([1,2,3,4,5]) '1,2,3,4,5' >>> signing.serlize({1:'a', 2:'b'}) '1:a|2:b' >>> signing.serlize({1:'a', 2:'b', 3:[1,{2:'k'}]}) '1:a|2:b|3:1,2:k' :param obj: the object to serlize :param level: a parameter used internally for recursion to serialize nested data structures :param topLevelKeysToIgnore: the list of top level keys to ignore for serialization :return: a string representation of `obj` """ res = None if not isinstance(obj, acceptableTypes): error("invalid type found {}: {}".format(objname, obj)) elif isinstance(obj, str): res = obj elif isinstance(obj, dict): if level > 0: keys = list(obj.keys()) else: topLevelKeysToIgnore = topLevelKeysToIgnore or [] keys = [k for k in obj.keys() if k not in topLevelKeysToIgnore] keys.sort() strs = [] for k in keys: onm = ".".join([str(objname), str(k)]) if objname else k strs.append( str(k) + ":" + self.serialize(obj[k], level + 1, onm, toBytes=False)) res = "|".join(strs) elif isinstance(obj, Iterable): strs = [] for o in obj: strs.append(self.serialize( o, level + 1, objname, toBytes=False)) res = ",".join(strs) elif obj is None: res = "" else: res = str(obj) # logger.trace("serialized msg {} into {}".format(obj, res)) if not toBytes: return res return res.encode('utf-8')
[ "def", "serialize", "(", "self", ",", "obj", ",", "level", "=", "0", ",", "objname", "=", "None", ",", "topLevelKeysToIgnore", "=", "None", ",", "toBytes", "=", "True", ")", ":", "res", "=", "None", "if", "not", "isinstance", "(", "obj", ",", "acceptableTypes", ")", ":", "error", "(", "\"invalid type found {}: {}\"", ".", "format", "(", "objname", ",", "obj", ")", ")", "elif", "isinstance", "(", "obj", ",", "str", ")", ":", "res", "=", "obj", "elif", "isinstance", "(", "obj", ",", "dict", ")", ":", "if", "level", ">", "0", ":", "keys", "=", "list", "(", "obj", ".", "keys", "(", ")", ")", "else", ":", "topLevelKeysToIgnore", "=", "topLevelKeysToIgnore", "or", "[", "]", "keys", "=", "[", "k", "for", "k", "in", "obj", ".", "keys", "(", ")", "if", "k", "not", "in", "topLevelKeysToIgnore", "]", "keys", ".", "sort", "(", ")", "strs", "=", "[", "]", "for", "k", "in", "keys", ":", "onm", "=", "\".\"", ".", "join", "(", "[", "str", "(", "objname", ")", ",", "str", "(", "k", ")", "]", ")", "if", "objname", "else", "k", "strs", ".", "append", "(", "str", "(", "k", ")", "+", "\":\"", "+", "self", ".", "serialize", "(", "obj", "[", "k", "]", ",", "level", "+", "1", ",", "onm", ",", "toBytes", "=", "False", ")", ")", "res", "=", "\"|\"", ".", "join", "(", "strs", ")", "elif", "isinstance", "(", "obj", ",", "Iterable", ")", ":", "strs", "=", "[", "]", "for", "o", "in", "obj", ":", "strs", ".", "append", "(", "self", ".", "serialize", "(", "o", ",", "level", "+", "1", ",", "objname", ",", "toBytes", "=", "False", ")", ")", "res", "=", "\",\"", ".", "join", "(", "strs", ")", "elif", "obj", "is", "None", ":", "res", "=", "\"\"", "else", ":", "res", "=", "str", "(", "obj", ")", "# logger.trace(\"serialized msg {} into {}\".format(obj, res))", "if", "not", "toBytes", ":", "return", "res", "return", "res", ".", "encode", "(", "'utf-8'", ")" ]
Create a string representation of the given object. Examples: :: >>> serialize("str") 'str' >>> serialize([1,2,3,4,5]) '1,2,3,4,5' >>> signing.serlize({1:'a', 2:'b'}) '1:a|2:b' >>> signing.serlize({1:'a', 2:'b', 3:[1,{2:'k'}]}) '1:a|2:b|3:1,2:k' :param obj: the object to serlize :param level: a parameter used internally for recursion to serialize nested data structures :param topLevelKeysToIgnore: the list of top level keys to ignore for serialization :return: a string representation of `obj`
[ "Create", "a", "string", "representation", "of", "the", "given", "object", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/common/serializers/signing_serializer.py#L35-L92
234,158
hyperledger/indy-plenum
plenum/common/transaction_store.py
TransactionStore.reset
def reset(self): """ Clear the values of all attributes of the transaction store. """ self.getsCounter = 0 # dictionary of processed requests for each client. Value for each # client is a dictionary with request id as key and transaction id as # value self.processedRequests = {} # type: Dict[str, Dict[int, str]] # dictionary of responses to be sent for each client. Value for each # client is an asyncio Queue self.responses = {} # type: Dict[str, asyncio.Queue] # dictionary with key as transaction id and `Reply` as # value self.transactions = {}
python
def reset(self): """ Clear the values of all attributes of the transaction store. """ self.getsCounter = 0 # dictionary of processed requests for each client. Value for each # client is a dictionary with request id as key and transaction id as # value self.processedRequests = {} # type: Dict[str, Dict[int, str]] # dictionary of responses to be sent for each client. Value for each # client is an asyncio Queue self.responses = {} # type: Dict[str, asyncio.Queue] # dictionary with key as transaction id and `Reply` as # value self.transactions = {}
[ "def", "reset", "(", "self", ")", ":", "self", ".", "getsCounter", "=", "0", "# dictionary of processed requests for each client. Value for each", "# client is a dictionary with request id as key and transaction id as", "# value", "self", ".", "processedRequests", "=", "{", "}", "# type: Dict[str, Dict[int, str]]", "# dictionary of responses to be sent for each client. Value for each", "# client is an asyncio Queue", "self", ".", "responses", "=", "{", "}", "# type: Dict[str, asyncio.Queue]", "# dictionary with key as transaction id and `Reply` as", "# value", "self", ".", "transactions", "=", "{", "}" ]
Clear the values of all attributes of the transaction store.
[ "Clear", "the", "values", "of", "all", "attributes", "of", "the", "transaction", "store", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/common/transaction_store.py#L33-L50
234,159
hyperledger/indy-plenum
plenum/common/transaction_store.py
TransactionStore.stop
def stop(self, timeout: int = 5) -> None: """ Try to stop the transaction store in the given timeout or raise an exception. """ self.running = False start = time.perf_counter() while True: if self.getsCounter == 0: return True elif time.perf_counter() <= start + timeout: time.sleep(.1) else: raise StopTimeout("Stop timed out waiting for {} gets to " "complete.".format(self.getsCounter))
python
def stop(self, timeout: int = 5) -> None: """ Try to stop the transaction store in the given timeout or raise an exception. """ self.running = False start = time.perf_counter() while True: if self.getsCounter == 0: return True elif time.perf_counter() <= start + timeout: time.sleep(.1) else: raise StopTimeout("Stop timed out waiting for {} gets to " "complete.".format(self.getsCounter))
[ "def", "stop", "(", "self", ",", "timeout", ":", "int", "=", "5", ")", "->", "None", ":", "self", ".", "running", "=", "False", "start", "=", "time", ".", "perf_counter", "(", ")", "while", "True", ":", "if", "self", ".", "getsCounter", "==", "0", ":", "return", "True", "elif", "time", ".", "perf_counter", "(", ")", "<=", "start", "+", "timeout", ":", "time", ".", "sleep", "(", ".1", ")", "else", ":", "raise", "StopTimeout", "(", "\"Stop timed out waiting for {} gets to \"", "\"complete.\"", ".", "format", "(", "self", ".", "getsCounter", ")", ")" ]
Try to stop the transaction store in the given timeout or raise an exception.
[ "Try", "to", "stop", "the", "transaction", "store", "in", "the", "given", "timeout", "or", "raise", "an", "exception", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/common/transaction_store.py#L57-L71
234,160
hyperledger/indy-plenum
plenum/common/transaction_store.py
TransactionStore.addToProcessedTxns
def addToProcessedTxns(self, identifier: str, txnId: str, reply: Reply) -> None: """ Add a client request to the transaction store's list of processed requests. """ self.transactions[txnId] = reply if identifier not in self.processedRequests: self.processedRequests[identifier] = {} self.processedRequests[identifier][reply.reqId] = txnId
python
def addToProcessedTxns(self, identifier: str, txnId: str, reply: Reply) -> None: """ Add a client request to the transaction store's list of processed requests. """ self.transactions[txnId] = reply if identifier not in self.processedRequests: self.processedRequests[identifier] = {} self.processedRequests[identifier][reply.reqId] = txnId
[ "def", "addToProcessedTxns", "(", "self", ",", "identifier", ":", "str", ",", "txnId", ":", "str", ",", "reply", ":", "Reply", ")", "->", "None", ":", "self", ".", "transactions", "[", "txnId", "]", "=", "reply", "if", "identifier", "not", "in", "self", ".", "processedRequests", ":", "self", ".", "processedRequests", "[", "identifier", "]", "=", "{", "}", "self", ".", "processedRequests", "[", "identifier", "]", "[", "reply", ".", "reqId", "]", "=", "txnId" ]
Add a client request to the transaction store's list of processed requests.
[ "Add", "a", "client", "request", "to", "the", "transaction", "store", "s", "list", "of", "processed", "requests", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/common/transaction_store.py#L73-L84
234,161
hyperledger/indy-plenum
plenum/common/transaction_store.py
TransactionStore.append
async def append(self, reply: Reply) \ -> None: """ Add the given Reply to this transaction store's list of responses. Also add to processedRequests if not added previously. """ result = reply.result identifier = result.get(f.IDENTIFIER.nm) txnId = result.get(TXN_ID) logger.debug("Reply being sent {}".format(reply)) if self._isNewTxn(identifier, reply, txnId): self.addToProcessedTxns(identifier, txnId, reply) if identifier not in self.responses: self.responses[identifier] = asyncio.Queue() await self.responses[identifier].put(reply)
python
async def append(self, reply: Reply) \ -> None: """ Add the given Reply to this transaction store's list of responses. Also add to processedRequests if not added previously. """ result = reply.result identifier = result.get(f.IDENTIFIER.nm) txnId = result.get(TXN_ID) logger.debug("Reply being sent {}".format(reply)) if self._isNewTxn(identifier, reply, txnId): self.addToProcessedTxns(identifier, txnId, reply) if identifier not in self.responses: self.responses[identifier] = asyncio.Queue() await self.responses[identifier].put(reply)
[ "async", "def", "append", "(", "self", ",", "reply", ":", "Reply", ")", "->", "None", ":", "result", "=", "reply", ".", "result", "identifier", "=", "result", ".", "get", "(", "f", ".", "IDENTIFIER", ".", "nm", ")", "txnId", "=", "result", ".", "get", "(", "TXN_ID", ")", "logger", ".", "debug", "(", "\"Reply being sent {}\"", ".", "format", "(", "reply", ")", ")", "if", "self", ".", "_isNewTxn", "(", "identifier", ",", "reply", ",", "txnId", ")", ":", "self", ".", "addToProcessedTxns", "(", "identifier", ",", "txnId", ",", "reply", ")", "if", "identifier", "not", "in", "self", ".", "responses", ":", "self", ".", "responses", "[", "identifier", "]", "=", "asyncio", ".", "Queue", "(", ")", "await", "self", ".", "responses", "[", "identifier", "]", ".", "put", "(", "reply", ")" ]
Add the given Reply to this transaction store's list of responses. Also add to processedRequests if not added previously.
[ "Add", "the", "given", "Reply", "to", "this", "transaction", "store", "s", "list", "of", "responses", ".", "Also", "add", "to", "processedRequests", "if", "not", "added", "previously", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/common/transaction_store.py#L86-L100
234,162
hyperledger/indy-plenum
plenum/common/transaction_store.py
TransactionStore._isNewTxn
def _isNewTxn(self, identifier, reply, txnId) -> bool: """ If client is not in `processedRequests` or requestId is not there in processed requests and txnId is present then its a new reply """ return (identifier not in self.processedRequests or reply.reqId not in self.processedRequests[identifier]) and \ txnId is not None
python
def _isNewTxn(self, identifier, reply, txnId) -> bool: """ If client is not in `processedRequests` or requestId is not there in processed requests and txnId is present then its a new reply """ return (identifier not in self.processedRequests or reply.reqId not in self.processedRequests[identifier]) and \ txnId is not None
[ "def", "_isNewTxn", "(", "self", ",", "identifier", ",", "reply", ",", "txnId", ")", "->", "bool", ":", "return", "(", "identifier", "not", "in", "self", ".", "processedRequests", "or", "reply", ".", "reqId", "not", "in", "self", ".", "processedRequests", "[", "identifier", "]", ")", "and", "txnId", "is", "not", "None" ]
If client is not in `processedRequests` or requestId is not there in processed requests and txnId is present then its a new reply
[ "If", "client", "is", "not", "in", "processedRequests", "or", "requestId", "is", "not", "there", "in", "processed", "requests", "and", "txnId", "is", "present", "then", "its", "a", "new", "reply" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/common/transaction_store.py#L110-L117
234,163
hyperledger/indy-plenum
plenum/server/propagator.py
Requests.add
def add(self, req: Request): """ Add the specified request to this request store. """ key = req.key if key not in self: self[key] = ReqState(req) return self[key]
python
def add(self, req: Request): """ Add the specified request to this request store. """ key = req.key if key not in self: self[key] = ReqState(req) return self[key]
[ "def", "add", "(", "self", ",", "req", ":", "Request", ")", ":", "key", "=", "req", ".", "key", "if", "key", "not", "in", "self", ":", "self", "[", "key", "]", "=", "ReqState", "(", "req", ")", "return", "self", "[", "key", "]" ]
Add the specified request to this request store.
[ "Add", "the", "specified", "request", "to", "this", "request", "store", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/propagator.py#L74-L81
234,164
hyperledger/indy-plenum
plenum/server/propagator.py
Requests.ordered_by_replica
def ordered_by_replica(self, request_key): """ Should be called by each replica when request is ordered or replica is removed. """ state = self.get(request_key) if not state: return state.unordered_by_replicas_num -= 1
python
def ordered_by_replica(self, request_key): """ Should be called by each replica when request is ordered or replica is removed. """ state = self.get(request_key) if not state: return state.unordered_by_replicas_num -= 1
[ "def", "ordered_by_replica", "(", "self", ",", "request_key", ")", ":", "state", "=", "self", ".", "get", "(", "request_key", ")", "if", "not", "state", ":", "return", "state", ".", "unordered_by_replicas_num", "-=", "1" ]
Should be called by each replica when request is ordered or replica is removed.
[ "Should", "be", "called", "by", "each", "replica", "when", "request", "is", "ordered", "or", "replica", "is", "removed", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/propagator.py#L89-L96
234,165
hyperledger/indy-plenum
plenum/server/propagator.py
Requests.mark_as_forwarded
def mark_as_forwarded(self, req: Request, to: int): """ Works together with 'mark_as_executed' and 'free' methods. It marks request as forwarded to 'to' replicas. To let request be removed, it should be marked as executed and each of 'to' replicas should call 'free'. """ self[req.key].forwarded = True self[req.key].forwardedTo = to self[req.key].unordered_by_replicas_num = to
python
def mark_as_forwarded(self, req: Request, to: int): """ Works together with 'mark_as_executed' and 'free' methods. It marks request as forwarded to 'to' replicas. To let request be removed, it should be marked as executed and each of 'to' replicas should call 'free'. """ self[req.key].forwarded = True self[req.key].forwardedTo = to self[req.key].unordered_by_replicas_num = to
[ "def", "mark_as_forwarded", "(", "self", ",", "req", ":", "Request", ",", "to", ":", "int", ")", ":", "self", "[", "req", ".", "key", "]", ".", "forwarded", "=", "True", "self", "[", "req", ".", "key", "]", ".", "forwardedTo", "=", "to", "self", "[", "req", ".", "key", "]", ".", "unordered_by_replicas_num", "=", "to" ]
Works together with 'mark_as_executed' and 'free' methods. It marks request as forwarded to 'to' replicas. To let request be removed, it should be marked as executed and each of 'to' replicas should call 'free'.
[ "Works", "together", "with", "mark_as_executed", "and", "free", "methods", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/propagator.py#L98-L108
234,166
hyperledger/indy-plenum
plenum/server/propagator.py
Requests.add_propagate
def add_propagate(self, req: Request, sender: str): """ Add the specified request to the list of received PROPAGATEs. :param req: the REQUEST to add :param sender: the name of the node sending the msg """ data = self.add(req) data.propagates[sender] = req
python
def add_propagate(self, req: Request, sender: str): """ Add the specified request to the list of received PROPAGATEs. :param req: the REQUEST to add :param sender: the name of the node sending the msg """ data = self.add(req) data.propagates[sender] = req
[ "def", "add_propagate", "(", "self", ",", "req", ":", "Request", ",", "sender", ":", "str", ")", ":", "data", "=", "self", ".", "add", "(", "req", ")", "data", ".", "propagates", "[", "sender", "]", "=", "req" ]
Add the specified request to the list of received PROPAGATEs. :param req: the REQUEST to add :param sender: the name of the node sending the msg
[ "Add", "the", "specified", "request", "to", "the", "list", "of", "received", "PROPAGATEs", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/propagator.py#L110-L119
234,167
hyperledger/indy-plenum
plenum/server/propagator.py
Requests.votes
def votes(self, req) -> int: """ Get the number of propagates for a given reqId and identifier. """ try: votes = len(self[req.key].propagates) except KeyError: votes = 0 return votes
python
def votes(self, req) -> int: """ Get the number of propagates for a given reqId and identifier. """ try: votes = len(self[req.key].propagates) except KeyError: votes = 0 return votes
[ "def", "votes", "(", "self", ",", "req", ")", "->", "int", ":", "try", ":", "votes", "=", "len", "(", "self", "[", "req", ".", "key", "]", ".", "propagates", ")", "except", "KeyError", ":", "votes", "=", "0", "return", "votes" ]
Get the number of propagates for a given reqId and identifier.
[ "Get", "the", "number", "of", "propagates", "for", "a", "given", "reqId", "and", "identifier", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/propagator.py#L121-L129
234,168
hyperledger/indy-plenum
plenum/server/propagator.py
Requests.mark_as_executed
def mark_as_executed(self, req: Request): """ Works together with 'mark_as_forwarded' and 'free' methods. It makes request to be removed if all replicas request was forwarded to freed it. """ state = self[req.key] state.executed = True self._clean(state)
python
def mark_as_executed(self, req: Request): """ Works together with 'mark_as_forwarded' and 'free' methods. It makes request to be removed if all replicas request was forwarded to freed it. """ state = self[req.key] state.executed = True self._clean(state)
[ "def", "mark_as_executed", "(", "self", ",", "req", ":", "Request", ")", ":", "state", "=", "self", "[", "req", ".", "key", "]", "state", ".", "executed", "=", "True", "self", ".", "_clean", "(", "state", ")" ]
Works together with 'mark_as_forwarded' and 'free' methods. It makes request to be removed if all replicas request was forwarded to freed it.
[ "Works", "together", "with", "mark_as_forwarded", "and", "free", "methods", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/propagator.py#L141-L150
234,169
hyperledger/indy-plenum
plenum/server/propagator.py
Requests.free
def free(self, request_key): """ Works together with 'mark_as_forwarded' and 'mark_as_executed' methods. It makes request to be removed if all replicas request was forwarded to freed it and if request executor marked it as executed. """ state = self.get(request_key) if not state: return state.forwardedTo -= 1 self._clean(state)
python
def free(self, request_key): """ Works together with 'mark_as_forwarded' and 'mark_as_executed' methods. It makes request to be removed if all replicas request was forwarded to freed it and if request executor marked it as executed. """ state = self.get(request_key) if not state: return state.forwardedTo -= 1 self._clean(state)
[ "def", "free", "(", "self", ",", "request_key", ")", ":", "state", "=", "self", ".", "get", "(", "request_key", ")", "if", "not", "state", ":", "return", "state", ".", "forwardedTo", "-=", "1", "self", ".", "_clean", "(", "state", ")" ]
Works together with 'mark_as_forwarded' and 'mark_as_executed' methods. It makes request to be removed if all replicas request was forwarded to freed it and if request executor marked it as executed.
[ "Works", "together", "with", "mark_as_forwarded", "and", "mark_as_executed", "methods", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/propagator.py#L152-L164
234,170
hyperledger/indy-plenum
plenum/server/propagator.py
Requests.has_propagated
def has_propagated(self, req: Request, sender: str) -> bool: """ Check whether the request specified has already been propagated. """ return req.key in self and sender in self[req.key].propagates
python
def has_propagated(self, req: Request, sender: str) -> bool: """ Check whether the request specified has already been propagated. """ return req.key in self and sender in self[req.key].propagates
[ "def", "has_propagated", "(", "self", ",", "req", ":", "Request", ",", "sender", ":", "str", ")", "->", "bool", ":", "return", "req", ".", "key", "in", "self", "and", "sender", "in", "self", "[", "req", ".", "key", "]", ".", "propagates" ]
Check whether the request specified has already been propagated.
[ "Check", "whether", "the", "request", "specified", "has", "already", "been", "propagated", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/propagator.py#L180-L184
234,171
hyperledger/indy-plenum
plenum/server/propagator.py
Propagator.propagate
def propagate(self, request: Request, clientName): """ Broadcast a PROPAGATE to all other nodes :param request: the REQUEST to propagate """ if self.requests.has_propagated(request, self.name): logger.trace("{} already propagated {}".format(self, request)) else: with self.metrics.measure_time(MetricsName.SEND_PROPAGATE_TIME): self.requests.add_propagate(request, self.name) propagate = self.createPropagate(request, clientName) logger.debug("{} propagating request {} from client {}".format(self, request.key, clientName), extra={"cli": True, "tags": ["node-propagate"]}) self.send(propagate)
python
def propagate(self, request: Request, clientName): """ Broadcast a PROPAGATE to all other nodes :param request: the REQUEST to propagate """ if self.requests.has_propagated(request, self.name): logger.trace("{} already propagated {}".format(self, request)) else: with self.metrics.measure_time(MetricsName.SEND_PROPAGATE_TIME): self.requests.add_propagate(request, self.name) propagate = self.createPropagate(request, clientName) logger.debug("{} propagating request {} from client {}".format(self, request.key, clientName), extra={"cli": True, "tags": ["node-propagate"]}) self.send(propagate)
[ "def", "propagate", "(", "self", ",", "request", ":", "Request", ",", "clientName", ")", ":", "if", "self", ".", "requests", ".", "has_propagated", "(", "request", ",", "self", ".", "name", ")", ":", "logger", ".", "trace", "(", "\"{} already propagated {}\"", ".", "format", "(", "self", ",", "request", ")", ")", "else", ":", "with", "self", ".", "metrics", ".", "measure_time", "(", "MetricsName", ".", "SEND_PROPAGATE_TIME", ")", ":", "self", ".", "requests", ".", "add_propagate", "(", "request", ",", "self", ".", "name", ")", "propagate", "=", "self", ".", "createPropagate", "(", "request", ",", "clientName", ")", "logger", ".", "debug", "(", "\"{} propagating request {} from client {}\"", ".", "format", "(", "self", ",", "request", ".", "key", ",", "clientName", ")", ",", "extra", "=", "{", "\"cli\"", ":", "True", ",", "\"tags\"", ":", "[", "\"node-propagate\"", "]", "}", ")", "self", ".", "send", "(", "propagate", ")" ]
Broadcast a PROPAGATE to all other nodes :param request: the REQUEST to propagate
[ "Broadcast", "a", "PROPAGATE", "to", "all", "other", "nodes" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/propagator.py#L203-L217
234,172
hyperledger/indy-plenum
plenum/server/propagator.py
Propagator.createPropagate
def createPropagate( request: Union[Request, dict], client_name) -> Propagate: """ Create a new PROPAGATE for the given REQUEST. :param request: the client REQUEST :return: a new PROPAGATE msg """ if not isinstance(request, (Request, dict)): logger.error("{}Request not formatted properly to create propagate" .format(THREE_PC_PREFIX)) return logger.trace("Creating PROPAGATE for REQUEST {}".format(request)) request = request.as_dict if isinstance(request, Request) else \ request if isinstance(client_name, bytes): client_name = client_name.decode() return Propagate(request, client_name)
python
def createPropagate( request: Union[Request, dict], client_name) -> Propagate: """ Create a new PROPAGATE for the given REQUEST. :param request: the client REQUEST :return: a new PROPAGATE msg """ if not isinstance(request, (Request, dict)): logger.error("{}Request not formatted properly to create propagate" .format(THREE_PC_PREFIX)) return logger.trace("Creating PROPAGATE for REQUEST {}".format(request)) request = request.as_dict if isinstance(request, Request) else \ request if isinstance(client_name, bytes): client_name = client_name.decode() return Propagate(request, client_name)
[ "def", "createPropagate", "(", "request", ":", "Union", "[", "Request", ",", "dict", "]", ",", "client_name", ")", "->", "Propagate", ":", "if", "not", "isinstance", "(", "request", ",", "(", "Request", ",", "dict", ")", ")", ":", "logger", ".", "error", "(", "\"{}Request not formatted properly to create propagate\"", ".", "format", "(", "THREE_PC_PREFIX", ")", ")", "return", "logger", ".", "trace", "(", "\"Creating PROPAGATE for REQUEST {}\"", ".", "format", "(", "request", ")", ")", "request", "=", "request", ".", "as_dict", "if", "isinstance", "(", "request", ",", "Request", ")", "else", "request", "if", "isinstance", "(", "client_name", ",", "bytes", ")", ":", "client_name", "=", "client_name", ".", "decode", "(", ")", "return", "Propagate", "(", "request", ",", "client_name", ")" ]
Create a new PROPAGATE for the given REQUEST. :param request: the client REQUEST :return: a new PROPAGATE msg
[ "Create", "a", "new", "PROPAGATE", "for", "the", "given", "REQUEST", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/propagator.py#L220-L237
234,173
hyperledger/indy-plenum
plenum/server/propagator.py
Propagator.forward
def forward(self, request: Request): """ Forward the specified client REQUEST to the other replicas on this node :param request: the REQUEST to propagate """ key = request.key num_replicas = self.replicas.num_replicas logger.debug('{} forwarding request {} to {} replicas' .format(self, key, num_replicas)) self.replicas.pass_message(ReqKey(key)) self.monitor.requestUnOrdered(key) self.requests.mark_as_forwarded(request, num_replicas)
python
def forward(self, request: Request): """ Forward the specified client REQUEST to the other replicas on this node :param request: the REQUEST to propagate """ key = request.key num_replicas = self.replicas.num_replicas logger.debug('{} forwarding request {} to {} replicas' .format(self, key, num_replicas)) self.replicas.pass_message(ReqKey(key)) self.monitor.requestUnOrdered(key) self.requests.mark_as_forwarded(request, num_replicas)
[ "def", "forward", "(", "self", ",", "request", ":", "Request", ")", ":", "key", "=", "request", ".", "key", "num_replicas", "=", "self", ".", "replicas", ".", "num_replicas", "logger", ".", "debug", "(", "'{} forwarding request {} to {} replicas'", ".", "format", "(", "self", ",", "key", ",", "num_replicas", ")", ")", "self", ".", "replicas", ".", "pass_message", "(", "ReqKey", "(", "key", ")", ")", "self", ".", "monitor", ".", "requestUnOrdered", "(", "key", ")", "self", ".", "requests", ".", "mark_as_forwarded", "(", "request", ",", "num_replicas", ")" ]
Forward the specified client REQUEST to the other replicas on this node :param request: the REQUEST to propagate
[ "Forward", "the", "specified", "client", "REQUEST", "to", "the", "other", "replicas", "on", "this", "node" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/propagator.py#L273-L285
234,174
hyperledger/indy-plenum
plenum/server/propagator.py
Propagator.recordAndPropagate
def recordAndPropagate(self, request: Request, clientName): """ Record the request in the list of requests and propagate. :param request: :param clientName: """ self.requests.add(request) self.propagate(request, clientName) self.tryForwarding(request)
python
def recordAndPropagate(self, request: Request, clientName): """ Record the request in the list of requests and propagate. :param request: :param clientName: """ self.requests.add(request) self.propagate(request, clientName) self.tryForwarding(request)
[ "def", "recordAndPropagate", "(", "self", ",", "request", ":", "Request", ",", "clientName", ")", ":", "self", ".", "requests", ".", "add", "(", "request", ")", "self", ".", "propagate", "(", "request", ",", "clientName", ")", "self", ".", "tryForwarding", "(", "request", ")" ]
Record the request in the list of requests and propagate. :param request: :param clientName:
[ "Record", "the", "request", "in", "the", "list", "of", "requests", "and", "propagate", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/propagator.py#L288-L297
234,175
hyperledger/indy-plenum
plenum/server/propagator.py
Propagator.tryForwarding
def tryForwarding(self, request: Request): """ Try to forward the request if the required conditions are met. See the method `canForward` for the conditions to check before forwarding a request. """ cannot_reason_msg = self.canForward(request) if cannot_reason_msg is None: # If haven't got the client request(REQUEST) for the corresponding # propagate request(PROPAGATE) but have enough propagate requests # to move ahead self.forward(request) else: logger.trace("{} not forwarding request {} to its replicas " "since {}".format(self, request, cannot_reason_msg))
python
def tryForwarding(self, request: Request): """ Try to forward the request if the required conditions are met. See the method `canForward` for the conditions to check before forwarding a request. """ cannot_reason_msg = self.canForward(request) if cannot_reason_msg is None: # If haven't got the client request(REQUEST) for the corresponding # propagate request(PROPAGATE) but have enough propagate requests # to move ahead self.forward(request) else: logger.trace("{} not forwarding request {} to its replicas " "since {}".format(self, request, cannot_reason_msg))
[ "def", "tryForwarding", "(", "self", ",", "request", ":", "Request", ")", ":", "cannot_reason_msg", "=", "self", ".", "canForward", "(", "request", ")", "if", "cannot_reason_msg", "is", "None", ":", "# If haven't got the client request(REQUEST) for the corresponding", "# propagate request(PROPAGATE) but have enough propagate requests", "# to move ahead", "self", ".", "forward", "(", "request", ")", "else", ":", "logger", ".", "trace", "(", "\"{} not forwarding request {} to its replicas \"", "\"since {}\"", ".", "format", "(", "self", ",", "request", ",", "cannot_reason_msg", ")", ")" ]
Try to forward the request if the required conditions are met. See the method `canForward` for the conditions to check before forwarding a request.
[ "Try", "to", "forward", "the", "request", "if", "the", "required", "conditions", "are", "met", ".", "See", "the", "method", "canForward", "for", "the", "conditions", "to", "check", "before", "forwarding", "a", "request", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/propagator.py#L299-L313
234,176
hyperledger/indy-plenum
stp_zmq/zstack.py
ZStack.removeRemote
def removeRemote(self, remote: Remote, clear=True): """ Currently not using clear """ name = remote.name pkey = remote.publicKey vkey = remote.verKey if name in self.remotes: self.remotes.pop(name) self.remotesByKeys.pop(pkey, None) self.verifiers.pop(vkey, None) else: logger.info('No remote named {} present')
python
def removeRemote(self, remote: Remote, clear=True): """ Currently not using clear """ name = remote.name pkey = remote.publicKey vkey = remote.verKey if name in self.remotes: self.remotes.pop(name) self.remotesByKeys.pop(pkey, None) self.verifiers.pop(vkey, None) else: logger.info('No remote named {} present')
[ "def", "removeRemote", "(", "self", ",", "remote", ":", "Remote", ",", "clear", "=", "True", ")", ":", "name", "=", "remote", ".", "name", "pkey", "=", "remote", ".", "publicKey", "vkey", "=", "remote", ".", "verKey", "if", "name", "in", "self", ".", "remotes", ":", "self", ".", "remotes", ".", "pop", "(", "name", ")", "self", ".", "remotesByKeys", ".", "pop", "(", "pkey", ",", "None", ")", "self", ".", "verifiers", ".", "pop", "(", "vkey", ",", "None", ")", "else", ":", "logger", ".", "info", "(", "'No remote named {} present'", ")" ]
Currently not using clear
[ "Currently", "not", "using", "clear" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/stp_zmq/zstack.py#L153-L165
234,177
hyperledger/indy-plenum
stp_zmq/zstack.py
ZStack.service
async def service(self, limit=None, quota: Optional[Quota] = None) -> int: """ Service `limit` number of received messages in this stack. :param limit: the maximum number of messages to be processed. If None, processes all of the messages in rxMsgs. :return: the number of messages processed. """ if self.listener: await self._serviceStack(self.age, quota) else: logger.info("{} is stopped".format(self)) r = len(self.rxMsgs) if r > 0: pracLimit = limit if limit else sys.maxsize return self.processReceived(pracLimit) return 0
python
async def service(self, limit=None, quota: Optional[Quota] = None) -> int: """ Service `limit` number of received messages in this stack. :param limit: the maximum number of messages to be processed. If None, processes all of the messages in rxMsgs. :return: the number of messages processed. """ if self.listener: await self._serviceStack(self.age, quota) else: logger.info("{} is stopped".format(self)) r = len(self.rxMsgs) if r > 0: pracLimit = limit if limit else sys.maxsize return self.processReceived(pracLimit) return 0
[ "async", "def", "service", "(", "self", ",", "limit", "=", "None", ",", "quota", ":", "Optional", "[", "Quota", "]", "=", "None", ")", "->", "int", ":", "if", "self", ".", "listener", ":", "await", "self", ".", "_serviceStack", "(", "self", ".", "age", ",", "quota", ")", "else", ":", "logger", ".", "info", "(", "\"{} is stopped\"", ".", "format", "(", "self", ")", ")", "r", "=", "len", "(", "self", ".", "rxMsgs", ")", "if", "r", ">", "0", ":", "pracLimit", "=", "limit", "if", "limit", "else", "sys", ".", "maxsize", "return", "self", ".", "processReceived", "(", "pracLimit", ")", "return", "0" ]
Service `limit` number of received messages in this stack. :param limit: the maximum number of messages to be processed. If None, processes all of the messages in rxMsgs. :return: the number of messages processed.
[ "Service", "limit", "number", "of", "received", "messages", "in", "this", "stack", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/stp_zmq/zstack.py#L445-L462
234,178
hyperledger/indy-plenum
stp_zmq/zstack.py
ZStack.connect
def connect(self, name=None, remoteId=None, ha=None, verKeyRaw=None, publicKeyRaw=None): """ Connect to the node specified by name. """ if not name: raise ValueError('Remote name should be specified') publicKey = None if name in self.remotes: remote = self.remotes[name] else: publicKey = z85.encode( publicKeyRaw) if publicKeyRaw else self.getPublicKey(name) verKey = z85.encode( verKeyRaw) if verKeyRaw else self.getVerKey(name) if not ha or not publicKey or (self.isRestricted and not verKey): raise ValueError('{} doesnt have enough info to connect. ' 'Need ha, public key and verkey. {} {} {}'. format(name, ha, verKey, publicKey)) remote = self.addRemote(name, ha, verKey, publicKey) public, secret = self.selfEncKeys remote.connect(self.ctx, public, secret) logger.info("{}{} looking for {} at {}:{}" .format(CONNECTION_PREFIX, self, name or remote.name, *remote.ha), extra={"cli": "PLAIN", "tags": ["node-looking"]}) # This should be scheduled as an async task self.sendPingPong(remote, is_ping=True) # re-send previously stashed pings/pongs from unknown remotes logger.trace("{} stashed pongs: {}".format(self.name, str(self._stashed_pongs))) if publicKey in self._stashed_pongs: logger.trace("{} sending stashed pongs to {}".format(self.name, str(z85_to_friendly(publicKey)))) self._stashed_pongs.discard(publicKey) self.sendPingPong(name, is_ping=False) return remote.uid
python
def connect(self, name=None, remoteId=None, ha=None, verKeyRaw=None, publicKeyRaw=None): """ Connect to the node specified by name. """ if not name: raise ValueError('Remote name should be specified') publicKey = None if name in self.remotes: remote = self.remotes[name] else: publicKey = z85.encode( publicKeyRaw) if publicKeyRaw else self.getPublicKey(name) verKey = z85.encode( verKeyRaw) if verKeyRaw else self.getVerKey(name) if not ha or not publicKey or (self.isRestricted and not verKey): raise ValueError('{} doesnt have enough info to connect. ' 'Need ha, public key and verkey. {} {} {}'. format(name, ha, verKey, publicKey)) remote = self.addRemote(name, ha, verKey, publicKey) public, secret = self.selfEncKeys remote.connect(self.ctx, public, secret) logger.info("{}{} looking for {} at {}:{}" .format(CONNECTION_PREFIX, self, name or remote.name, *remote.ha), extra={"cli": "PLAIN", "tags": ["node-looking"]}) # This should be scheduled as an async task self.sendPingPong(remote, is_ping=True) # re-send previously stashed pings/pongs from unknown remotes logger.trace("{} stashed pongs: {}".format(self.name, str(self._stashed_pongs))) if publicKey in self._stashed_pongs: logger.trace("{} sending stashed pongs to {}".format(self.name, str(z85_to_friendly(publicKey)))) self._stashed_pongs.discard(publicKey) self.sendPingPong(name, is_ping=False) return remote.uid
[ "def", "connect", "(", "self", ",", "name", "=", "None", ",", "remoteId", "=", "None", ",", "ha", "=", "None", ",", "verKeyRaw", "=", "None", ",", "publicKeyRaw", "=", "None", ")", ":", "if", "not", "name", ":", "raise", "ValueError", "(", "'Remote name should be specified'", ")", "publicKey", "=", "None", "if", "name", "in", "self", ".", "remotes", ":", "remote", "=", "self", ".", "remotes", "[", "name", "]", "else", ":", "publicKey", "=", "z85", ".", "encode", "(", "publicKeyRaw", ")", "if", "publicKeyRaw", "else", "self", ".", "getPublicKey", "(", "name", ")", "verKey", "=", "z85", ".", "encode", "(", "verKeyRaw", ")", "if", "verKeyRaw", "else", "self", ".", "getVerKey", "(", "name", ")", "if", "not", "ha", "or", "not", "publicKey", "or", "(", "self", ".", "isRestricted", "and", "not", "verKey", ")", ":", "raise", "ValueError", "(", "'{} doesnt have enough info to connect. '", "'Need ha, public key and verkey. {} {} {}'", ".", "format", "(", "name", ",", "ha", ",", "verKey", ",", "publicKey", ")", ")", "remote", "=", "self", ".", "addRemote", "(", "name", ",", "ha", ",", "verKey", ",", "publicKey", ")", "public", ",", "secret", "=", "self", ".", "selfEncKeys", "remote", ".", "connect", "(", "self", ".", "ctx", ",", "public", ",", "secret", ")", "logger", ".", "info", "(", "\"{}{} looking for {} at {}:{}\"", ".", "format", "(", "CONNECTION_PREFIX", ",", "self", ",", "name", "or", "remote", ".", "name", ",", "*", "remote", ".", "ha", ")", ",", "extra", "=", "{", "\"cli\"", ":", "\"PLAIN\"", ",", "\"tags\"", ":", "[", "\"node-looking\"", "]", "}", ")", "# This should be scheduled as an async task", "self", ".", "sendPingPong", "(", "remote", ",", "is_ping", "=", "True", ")", "# re-send previously stashed pings/pongs from unknown remotes", "logger", ".", "trace", "(", "\"{} stashed pongs: {}\"", ".", "format", "(", "self", ".", "name", ",", "str", "(", "self", ".", "_stashed_pongs", ")", ")", ")", "if", "publicKey", "in", "self", ".", "_stashed_pongs", ":", "logger", ".", "trace", "(", "\"{} sending stashed pongs to {}\"", ".", "format", "(", "self", ".", "name", ",", "str", "(", "z85_to_friendly", "(", "publicKey", ")", ")", ")", ")", "self", ".", "_stashed_pongs", ".", "discard", "(", "publicKey", ")", "self", ".", "sendPingPong", "(", "name", ",", "is_ping", "=", "False", ")", "return", "remote", ".", "uid" ]
Connect to the node specified by name.
[ "Connect", "to", "the", "node", "specified", "by", "name", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/stp_zmq/zstack.py#L584-L629
234,179
hyperledger/indy-plenum
stp_zmq/zstack.py
ZStack.reconnectRemote
def reconnectRemote(self, remote): """ Disconnect remote and connect to it again :param remote: instance of Remote from self.remotes :param remoteName: name of remote :return: """ if not isinstance(remote, Remote): raise PlenumTypeError('remote', remote, Remote) logger.info('{} reconnecting to {}'.format(self, remote)) public, secret = self.selfEncKeys remote.disconnect() remote.connect(self.ctx, public, secret) self.sendPingPong(remote, is_ping=True)
python
def reconnectRemote(self, remote): """ Disconnect remote and connect to it again :param remote: instance of Remote from self.remotes :param remoteName: name of remote :return: """ if not isinstance(remote, Remote): raise PlenumTypeError('remote', remote, Remote) logger.info('{} reconnecting to {}'.format(self, remote)) public, secret = self.selfEncKeys remote.disconnect() remote.connect(self.ctx, public, secret) self.sendPingPong(remote, is_ping=True)
[ "def", "reconnectRemote", "(", "self", ",", "remote", ")", ":", "if", "not", "isinstance", "(", "remote", ",", "Remote", ")", ":", "raise", "PlenumTypeError", "(", "'remote'", ",", "remote", ",", "Remote", ")", "logger", ".", "info", "(", "'{} reconnecting to {}'", ".", "format", "(", "self", ",", "remote", ")", ")", "public", ",", "secret", "=", "self", ".", "selfEncKeys", "remote", ".", "disconnect", "(", ")", "remote", ".", "connect", "(", "self", ".", "ctx", ",", "public", ",", "secret", ")", "self", ".", "sendPingPong", "(", "remote", ",", "is_ping", "=", "True", ")" ]
Disconnect remote and connect to it again :param remote: instance of Remote from self.remotes :param remoteName: name of remote :return:
[ "Disconnect", "remote", "and", "connect", "to", "it", "again" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/stp_zmq/zstack.py#L631-L645
234,180
hyperledger/indy-plenum
plenum/persistence/db_hash_store.py
DbHashStore._readMultiple
def _readMultiple(self, start, end, db): """ Returns a list of hashes with serial numbers between start and end, both inclusive. """ self._validatePos(start, end) # Converting any bytearray to bytes return [bytes(db.get(str(pos))) for pos in range(start, end + 1)]
python
def _readMultiple(self, start, end, db): """ Returns a list of hashes with serial numbers between start and end, both inclusive. """ self._validatePos(start, end) # Converting any bytearray to bytes return [bytes(db.get(str(pos))) for pos in range(start, end + 1)]
[ "def", "_readMultiple", "(", "self", ",", "start", ",", "end", ",", "db", ")", ":", "self", ".", "_validatePos", "(", "start", ",", "end", ")", "# Converting any bytearray to bytes", "return", "[", "bytes", "(", "db", ".", "get", "(", "str", "(", "pos", ")", ")", ")", "for", "pos", "in", "range", "(", "start", ",", "end", "+", "1", ")", "]" ]
Returns a list of hashes with serial numbers between start and end, both inclusive.
[ "Returns", "a", "list", "of", "hashes", "with", "serial", "numbers", "between", "start", "and", "end", "both", "inclusive", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/persistence/db_hash_store.py#L67-L74
234,181
hyperledger/indy-plenum
state/trie/pruning_trie.py
pack_nibbles
def pack_nibbles(nibbles): """pack nibbles to binary :param nibbles: a nibbles sequence. may have a terminator """ if nibbles[-1] == NIBBLE_TERMINATOR: flags = 2 nibbles = nibbles[:-1] else: flags = 0 oddlen = len(nibbles) % 2 flags |= oddlen # set lowest bit if odd number of nibbles if oddlen: nibbles = [flags] + nibbles else: nibbles = [flags, 0] + nibbles o = b'' for i in range(0, len(nibbles), 2): o += ascii_chr(16 * nibbles[i] + nibbles[i + 1]) return o
python
def pack_nibbles(nibbles): """pack nibbles to binary :param nibbles: a nibbles sequence. may have a terminator """ if nibbles[-1] == NIBBLE_TERMINATOR: flags = 2 nibbles = nibbles[:-1] else: flags = 0 oddlen = len(nibbles) % 2 flags |= oddlen # set lowest bit if odd number of nibbles if oddlen: nibbles = [flags] + nibbles else: nibbles = [flags, 0] + nibbles o = b'' for i in range(0, len(nibbles), 2): o += ascii_chr(16 * nibbles[i] + nibbles[i + 1]) return o
[ "def", "pack_nibbles", "(", "nibbles", ")", ":", "if", "nibbles", "[", "-", "1", "]", "==", "NIBBLE_TERMINATOR", ":", "flags", "=", "2", "nibbles", "=", "nibbles", "[", ":", "-", "1", "]", "else", ":", "flags", "=", "0", "oddlen", "=", "len", "(", "nibbles", ")", "%", "2", "flags", "|=", "oddlen", "# set lowest bit if odd number of nibbles", "if", "oddlen", ":", "nibbles", "=", "[", "flags", "]", "+", "nibbles", "else", ":", "nibbles", "=", "[", "flags", ",", "0", "]", "+", "nibbles", "o", "=", "b''", "for", "i", "in", "range", "(", "0", ",", "len", "(", "nibbles", ")", ",", "2", ")", ":", "o", "+=", "ascii_chr", "(", "16", "*", "nibbles", "[", "i", "]", "+", "nibbles", "[", "i", "+", "1", "]", ")", "return", "o" ]
pack nibbles to binary :param nibbles: a nibbles sequence. may have a terminator
[ "pack", "nibbles", "to", "binary" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/state/trie/pruning_trie.py#L140-L161
234,182
hyperledger/indy-plenum
state/trie/pruning_trie.py
Trie._get_last_node_for_prfx
def _get_last_node_for_prfx(self, node, key_prfx, seen_prfx): """ get last node for the given prefix, also update `seen_prfx` to track the path already traversed :param node: node in form of list, or BLANK_NODE :param key_prfx: prefix to look for :param seen_prfx: prefix already seen, updates with each call :return: BLANK_NODE if does not exist, otherwise value or hash """ node_type = self._get_node_type(node) if node_type == NODE_TYPE_BLANK: return BLANK_NODE if node_type == NODE_TYPE_BRANCH: # already reach the expected node if not key_prfx: return node sub_node = self._decode_to_node(node[key_prfx[0]]) seen_prfx.append(key_prfx[0]) return self._get_last_node_for_prfx(sub_node, key_prfx[1:], seen_prfx) # key value node curr_key = key_nibbles_from_key_value_node(node) if node_type == NODE_TYPE_LEAF: # Return this node only if the complete prefix is part of the current key if starts_with(curr_key, key_prfx): # Do not update `seen_prefix` as node has the prefix return node else: return BLANK_NODE if node_type == NODE_TYPE_EXTENSION: # traverse child nodes if len(key_prfx) > len(curr_key): if starts_with(key_prfx, curr_key): sub_node = self._get_inner_node_from_extension(node) seen_prfx.extend(curr_key) return self._get_last_node_for_prfx(sub_node, key_prfx[len(curr_key):], seen_prfx) else: return BLANK_NODE else: if starts_with(curr_key, key_prfx): # Do not update `seen_prefix` as node has the prefix return node else: return BLANK_NODE
python
def _get_last_node_for_prfx(self, node, key_prfx, seen_prfx): """ get last node for the given prefix, also update `seen_prfx` to track the path already traversed :param node: node in form of list, or BLANK_NODE :param key_prfx: prefix to look for :param seen_prfx: prefix already seen, updates with each call :return: BLANK_NODE if does not exist, otherwise value or hash """ node_type = self._get_node_type(node) if node_type == NODE_TYPE_BLANK: return BLANK_NODE if node_type == NODE_TYPE_BRANCH: # already reach the expected node if not key_prfx: return node sub_node = self._decode_to_node(node[key_prfx[0]]) seen_prfx.append(key_prfx[0]) return self._get_last_node_for_prfx(sub_node, key_prfx[1:], seen_prfx) # key value node curr_key = key_nibbles_from_key_value_node(node) if node_type == NODE_TYPE_LEAF: # Return this node only if the complete prefix is part of the current key if starts_with(curr_key, key_prfx): # Do not update `seen_prefix` as node has the prefix return node else: return BLANK_NODE if node_type == NODE_TYPE_EXTENSION: # traverse child nodes if len(key_prfx) > len(curr_key): if starts_with(key_prfx, curr_key): sub_node = self._get_inner_node_from_extension(node) seen_prfx.extend(curr_key) return self._get_last_node_for_prfx(sub_node, key_prfx[len(curr_key):], seen_prfx) else: return BLANK_NODE else: if starts_with(curr_key, key_prfx): # Do not update `seen_prefix` as node has the prefix return node else: return BLANK_NODE
[ "def", "_get_last_node_for_prfx", "(", "self", ",", "node", ",", "key_prfx", ",", "seen_prfx", ")", ":", "node_type", "=", "self", ".", "_get_node_type", "(", "node", ")", "if", "node_type", "==", "NODE_TYPE_BLANK", ":", "return", "BLANK_NODE", "if", "node_type", "==", "NODE_TYPE_BRANCH", ":", "# already reach the expected node", "if", "not", "key_prfx", ":", "return", "node", "sub_node", "=", "self", ".", "_decode_to_node", "(", "node", "[", "key_prfx", "[", "0", "]", "]", ")", "seen_prfx", ".", "append", "(", "key_prfx", "[", "0", "]", ")", "return", "self", ".", "_get_last_node_for_prfx", "(", "sub_node", ",", "key_prfx", "[", "1", ":", "]", ",", "seen_prfx", ")", "# key value node", "curr_key", "=", "key_nibbles_from_key_value_node", "(", "node", ")", "if", "node_type", "==", "NODE_TYPE_LEAF", ":", "# Return this node only if the complete prefix is part of the current key", "if", "starts_with", "(", "curr_key", ",", "key_prfx", ")", ":", "# Do not update `seen_prefix` as node has the prefix", "return", "node", "else", ":", "return", "BLANK_NODE", "if", "node_type", "==", "NODE_TYPE_EXTENSION", ":", "# traverse child nodes", "if", "len", "(", "key_prfx", ")", ">", "len", "(", "curr_key", ")", ":", "if", "starts_with", "(", "key_prfx", ",", "curr_key", ")", ":", "sub_node", "=", "self", ".", "_get_inner_node_from_extension", "(", "node", ")", "seen_prfx", ".", "extend", "(", "curr_key", ")", "return", "self", ".", "_get_last_node_for_prfx", "(", "sub_node", ",", "key_prfx", "[", "len", "(", "curr_key", ")", ":", "]", ",", "seen_prfx", ")", "else", ":", "return", "BLANK_NODE", "else", ":", "if", "starts_with", "(", "curr_key", ",", "key_prfx", ")", ":", "# Do not update `seen_prefix` as node has the prefix", "return", "node", "else", ":", "return", "BLANK_NODE" ]
get last node for the given prefix, also update `seen_prfx` to track the path already traversed :param node: node in form of list, or BLANK_NODE :param key_prfx: prefix to look for :param seen_prfx: prefix already seen, updates with each call :return: BLANK_NODE if does not exist, otherwise value or hash
[ "get", "last", "node", "for", "the", "given", "prefix", "also", "update", "seen_prfx", "to", "track", "the", "path", "already", "traversed" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/state/trie/pruning_trie.py#L410-L459
234,183
hyperledger/indy-plenum
plenum/server/monitor.py
Monitor.metrics
def metrics(self): """ Calculate and return the metrics. """ masterThrp, backupThrp = self.getThroughputs(self.instances.masterId) r = self.instance_throughput_ratio(self.instances.masterId) m = [ ("{} Monitor metrics:".format(self), None), ("Delta", self.Delta), ("Lambda", self.Lambda), ("Omega", self.Omega), ("instances started", self.instances.started), ("ordered request counts", {i: r[0] for i, r in self.numOrderedRequests.items()}), ("ordered request durations", {i: r[1] for i, r in self.numOrderedRequests.items()}), ("master request latencies", self.masterReqLatencies), ("client avg request latencies", {i: self.getLatency(i) for i in self.instances.ids}), ("throughput", {i: self.getThroughput(i) for i in self.instances.ids}), ("master throughput", masterThrp), ("total requests", self.totalRequests), ("avg backup throughput", backupThrp), ("master throughput ratio", r)] return m
python
def metrics(self): """ Calculate and return the metrics. """ masterThrp, backupThrp = self.getThroughputs(self.instances.masterId) r = self.instance_throughput_ratio(self.instances.masterId) m = [ ("{} Monitor metrics:".format(self), None), ("Delta", self.Delta), ("Lambda", self.Lambda), ("Omega", self.Omega), ("instances started", self.instances.started), ("ordered request counts", {i: r[0] for i, r in self.numOrderedRequests.items()}), ("ordered request durations", {i: r[1] for i, r in self.numOrderedRequests.items()}), ("master request latencies", self.masterReqLatencies), ("client avg request latencies", {i: self.getLatency(i) for i in self.instances.ids}), ("throughput", {i: self.getThroughput(i) for i in self.instances.ids}), ("master throughput", masterThrp), ("total requests", self.totalRequests), ("avg backup throughput", backupThrp), ("master throughput ratio", r)] return m
[ "def", "metrics", "(", "self", ")", ":", "masterThrp", ",", "backupThrp", "=", "self", ".", "getThroughputs", "(", "self", ".", "instances", ".", "masterId", ")", "r", "=", "self", ".", "instance_throughput_ratio", "(", "self", ".", "instances", ".", "masterId", ")", "m", "=", "[", "(", "\"{} Monitor metrics:\"", ".", "format", "(", "self", ")", ",", "None", ")", ",", "(", "\"Delta\"", ",", "self", ".", "Delta", ")", ",", "(", "\"Lambda\"", ",", "self", ".", "Lambda", ")", ",", "(", "\"Omega\"", ",", "self", ".", "Omega", ")", ",", "(", "\"instances started\"", ",", "self", ".", "instances", ".", "started", ")", ",", "(", "\"ordered request counts\"", ",", "{", "i", ":", "r", "[", "0", "]", "for", "i", ",", "r", "in", "self", ".", "numOrderedRequests", ".", "items", "(", ")", "}", ")", ",", "(", "\"ordered request durations\"", ",", "{", "i", ":", "r", "[", "1", "]", "for", "i", ",", "r", "in", "self", ".", "numOrderedRequests", ".", "items", "(", ")", "}", ")", ",", "(", "\"master request latencies\"", ",", "self", ".", "masterReqLatencies", ")", ",", "(", "\"client avg request latencies\"", ",", "{", "i", ":", "self", ".", "getLatency", "(", "i", ")", "for", "i", "in", "self", ".", "instances", ".", "ids", "}", ")", ",", "(", "\"throughput\"", ",", "{", "i", ":", "self", ".", "getThroughput", "(", "i", ")", "for", "i", "in", "self", ".", "instances", ".", "ids", "}", ")", ",", "(", "\"master throughput\"", ",", "masterThrp", ")", ",", "(", "\"total requests\"", ",", "self", ".", "totalRequests", ")", ",", "(", "\"avg backup throughput\"", ",", "backupThrp", ")", ",", "(", "\"master throughput ratio\"", ",", "r", ")", "]", "return", "m" ]
Calculate and return the metrics.
[ "Calculate", "and", "return", "the", "metrics", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/monitor.py#L256-L281
234,184
hyperledger/indy-plenum
plenum/server/monitor.py
Monitor.prettymetrics
def prettymetrics(self) -> str: """ Pretty printing for metrics """ rendered = ["{}: {}".format(*m) for m in self.metrics()] return "\n ".join(rendered)
python
def prettymetrics(self) -> str: """ Pretty printing for metrics """ rendered = ["{}: {}".format(*m) for m in self.metrics()] return "\n ".join(rendered)
[ "def", "prettymetrics", "(", "self", ")", "->", "str", ":", "rendered", "=", "[", "\"{}: {}\"", ".", "format", "(", "*", "m", ")", "for", "m", "in", "self", ".", "metrics", "(", ")", "]", "return", "\"\\n \"", ".", "join", "(", "rendered", ")" ]
Pretty printing for metrics
[ "Pretty", "printing", "for", "metrics" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/monitor.py#L284-L289
234,185
hyperledger/indy-plenum
plenum/server/monitor.py
Monitor.reset
def reset(self): """ Reset the monitor. Sets all monitored values to defaults. """ logger.debug("{}'s Monitor being reset".format(self)) instances_ids = self.instances.started.keys() self.numOrderedRequests = {inst_id: (0, 0) for inst_id in instances_ids} self.requestTracker.reset() self.masterReqLatencies = {} self.masterReqLatencyTooHigh = False self.totalViewChanges += 1 self.lastKnownTraffic = self.calculateTraffic() if self.acc_monitor: self.acc_monitor.reset() for i in instances_ids: rm = self.create_throughput_measurement(self.config) self.throughputs[i] = rm lm = self.latency_measurement_cls(self.config) self.clientAvgReqLatencies[i] = lm
python
def reset(self): """ Reset the monitor. Sets all monitored values to defaults. """ logger.debug("{}'s Monitor being reset".format(self)) instances_ids = self.instances.started.keys() self.numOrderedRequests = {inst_id: (0, 0) for inst_id in instances_ids} self.requestTracker.reset() self.masterReqLatencies = {} self.masterReqLatencyTooHigh = False self.totalViewChanges += 1 self.lastKnownTraffic = self.calculateTraffic() if self.acc_monitor: self.acc_monitor.reset() for i in instances_ids: rm = self.create_throughput_measurement(self.config) self.throughputs[i] = rm lm = self.latency_measurement_cls(self.config) self.clientAvgReqLatencies[i] = lm
[ "def", "reset", "(", "self", ")", ":", "logger", ".", "debug", "(", "\"{}'s Monitor being reset\"", ".", "format", "(", "self", ")", ")", "instances_ids", "=", "self", ".", "instances", ".", "started", ".", "keys", "(", ")", "self", ".", "numOrderedRequests", "=", "{", "inst_id", ":", "(", "0", ",", "0", ")", "for", "inst_id", "in", "instances_ids", "}", "self", ".", "requestTracker", ".", "reset", "(", ")", "self", ".", "masterReqLatencies", "=", "{", "}", "self", ".", "masterReqLatencyTooHigh", "=", "False", "self", ".", "totalViewChanges", "+=", "1", "self", ".", "lastKnownTraffic", "=", "self", ".", "calculateTraffic", "(", ")", "if", "self", ".", "acc_monitor", ":", "self", ".", "acc_monitor", ".", "reset", "(", ")", "for", "i", "in", "instances_ids", ":", "rm", "=", "self", ".", "create_throughput_measurement", "(", "self", ".", "config", ")", "self", ".", "throughputs", "[", "i", "]", "=", "rm", "lm", "=", "self", ".", "latency_measurement_cls", "(", "self", ".", "config", ")", "self", ".", "clientAvgReqLatencies", "[", "i", "]", "=", "lm" ]
Reset the monitor. Sets all monitored values to defaults.
[ "Reset", "the", "monitor", ".", "Sets", "all", "monitored", "values", "to", "defaults", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/monitor.py#L308-L326
234,186
hyperledger/indy-plenum
plenum/server/monitor.py
Monitor.addInstance
def addInstance(self, inst_id): """ Add one protocol instance for monitoring. """ self.instances.add(inst_id) self.requestTracker.add_instance(inst_id) self.numOrderedRequests[inst_id] = (0, 0) rm = self.create_throughput_measurement(self.config) self.throughputs[inst_id] = rm lm = self.latency_measurement_cls(self.config) self.clientAvgReqLatencies[inst_id] = lm if self.acc_monitor: self.acc_monitor.add_instance(inst_id)
python
def addInstance(self, inst_id): """ Add one protocol instance for monitoring. """ self.instances.add(inst_id) self.requestTracker.add_instance(inst_id) self.numOrderedRequests[inst_id] = (0, 0) rm = self.create_throughput_measurement(self.config) self.throughputs[inst_id] = rm lm = self.latency_measurement_cls(self.config) self.clientAvgReqLatencies[inst_id] = lm if self.acc_monitor: self.acc_monitor.add_instance(inst_id)
[ "def", "addInstance", "(", "self", ",", "inst_id", ")", ":", "self", ".", "instances", ".", "add", "(", "inst_id", ")", "self", ".", "requestTracker", ".", "add_instance", "(", "inst_id", ")", "self", ".", "numOrderedRequests", "[", "inst_id", "]", "=", "(", "0", ",", "0", ")", "rm", "=", "self", ".", "create_throughput_measurement", "(", "self", ".", "config", ")", "self", ".", "throughputs", "[", "inst_id", "]", "=", "rm", "lm", "=", "self", ".", "latency_measurement_cls", "(", "self", ".", "config", ")", "self", ".", "clientAvgReqLatencies", "[", "inst_id", "]", "=", "lm", "if", "self", ".", "acc_monitor", ":", "self", ".", "acc_monitor", ".", "add_instance", "(", "inst_id", ")" ]
Add one protocol instance for monitoring.
[ "Add", "one", "protocol", "instance", "for", "monitoring", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/monitor.py#L328-L341
234,187
hyperledger/indy-plenum
plenum/server/monitor.py
Monitor.requestOrdered
def requestOrdered(self, reqIdrs: List[str], instId: int, requests, byMaster: bool = False) -> Dict: """ Measure the time taken for ordering of a request and return it. Monitor might have been reset due to view change due to which this method returns None """ now = time.perf_counter() if self.acc_monitor: self.acc_monitor.update_time(now) durations = {} for key in reqIdrs: if key not in self.requestTracker: logger.debug("Got untracked ordered request with digest {}". format(key)) continue if self.acc_monitor: self.acc_monitor.request_ordered(key, instId) if key in self.requestTracker.handled_unordered(): started = self.requestTracker.started(key) logger.info('Consensus for ReqId: {} was achieved by {}:{} in {} seconds.' .format(key, self.name, instId, now - started)) duration = self.requestTracker.order(instId, key, now) self.throughputs[instId].add_request(now) if key in requests: identifier = requests[key].request.identifier self.clientAvgReqLatencies[instId].add_duration(identifier, duration) durations[key] = duration reqs, tm = self.numOrderedRequests[instId] orderedNow = len(durations) self.numOrderedRequests[instId] = (reqs + orderedNow, tm + sum(durations.values())) # TODO: Inefficient, as on every request a minimum of a large list is # calculated if min(r[0] for r in self.numOrderedRequests.values()) == (reqs + orderedNow): # If these requests is ordered by the last instance then increment # total requests, but why is this important, why cant is ordering # by master not enough? self.totalRequests += orderedNow self.postOnReqOrdered() if 0 == reqs: self.postOnNodeStarted(self.started) return durations
python
def requestOrdered(self, reqIdrs: List[str], instId: int, requests, byMaster: bool = False) -> Dict: """ Measure the time taken for ordering of a request and return it. Monitor might have been reset due to view change due to which this method returns None """ now = time.perf_counter() if self.acc_monitor: self.acc_monitor.update_time(now) durations = {} for key in reqIdrs: if key not in self.requestTracker: logger.debug("Got untracked ordered request with digest {}". format(key)) continue if self.acc_monitor: self.acc_monitor.request_ordered(key, instId) if key in self.requestTracker.handled_unordered(): started = self.requestTracker.started(key) logger.info('Consensus for ReqId: {} was achieved by {}:{} in {} seconds.' .format(key, self.name, instId, now - started)) duration = self.requestTracker.order(instId, key, now) self.throughputs[instId].add_request(now) if key in requests: identifier = requests[key].request.identifier self.clientAvgReqLatencies[instId].add_duration(identifier, duration) durations[key] = duration reqs, tm = self.numOrderedRequests[instId] orderedNow = len(durations) self.numOrderedRequests[instId] = (reqs + orderedNow, tm + sum(durations.values())) # TODO: Inefficient, as on every request a minimum of a large list is # calculated if min(r[0] for r in self.numOrderedRequests.values()) == (reqs + orderedNow): # If these requests is ordered by the last instance then increment # total requests, but why is this important, why cant is ordering # by master not enough? self.totalRequests += orderedNow self.postOnReqOrdered() if 0 == reqs: self.postOnNodeStarted(self.started) return durations
[ "def", "requestOrdered", "(", "self", ",", "reqIdrs", ":", "List", "[", "str", "]", ",", "instId", ":", "int", ",", "requests", ",", "byMaster", ":", "bool", "=", "False", ")", "->", "Dict", ":", "now", "=", "time", ".", "perf_counter", "(", ")", "if", "self", ".", "acc_monitor", ":", "self", ".", "acc_monitor", ".", "update_time", "(", "now", ")", "durations", "=", "{", "}", "for", "key", "in", "reqIdrs", ":", "if", "key", "not", "in", "self", ".", "requestTracker", ":", "logger", ".", "debug", "(", "\"Got untracked ordered request with digest {}\"", ".", "format", "(", "key", ")", ")", "continue", "if", "self", ".", "acc_monitor", ":", "self", ".", "acc_monitor", ".", "request_ordered", "(", "key", ",", "instId", ")", "if", "key", "in", "self", ".", "requestTracker", ".", "handled_unordered", "(", ")", ":", "started", "=", "self", ".", "requestTracker", ".", "started", "(", "key", ")", "logger", ".", "info", "(", "'Consensus for ReqId: {} was achieved by {}:{} in {} seconds.'", ".", "format", "(", "key", ",", "self", ".", "name", ",", "instId", ",", "now", "-", "started", ")", ")", "duration", "=", "self", ".", "requestTracker", ".", "order", "(", "instId", ",", "key", ",", "now", ")", "self", ".", "throughputs", "[", "instId", "]", ".", "add_request", "(", "now", ")", "if", "key", "in", "requests", ":", "identifier", "=", "requests", "[", "key", "]", ".", "request", ".", "identifier", "self", ".", "clientAvgReqLatencies", "[", "instId", "]", ".", "add_duration", "(", "identifier", ",", "duration", ")", "durations", "[", "key", "]", "=", "duration", "reqs", ",", "tm", "=", "self", ".", "numOrderedRequests", "[", "instId", "]", "orderedNow", "=", "len", "(", "durations", ")", "self", ".", "numOrderedRequests", "[", "instId", "]", "=", "(", "reqs", "+", "orderedNow", ",", "tm", "+", "sum", "(", "durations", ".", "values", "(", ")", ")", ")", "# TODO: Inefficient, as on every request a minimum of a large list is", "# calculated", "if", "min", "(", "r", "[", "0", "]", "for", "r", "in", "self", ".", "numOrderedRequests", ".", "values", "(", ")", ")", "==", "(", "reqs", "+", "orderedNow", ")", ":", "# If these requests is ordered by the last instance then increment", "# total requests, but why is this important, why cant is ordering", "# by master not enough?", "self", ".", "totalRequests", "+=", "orderedNow", "self", ".", "postOnReqOrdered", "(", ")", "if", "0", "==", "reqs", ":", "self", ".", "postOnNodeStarted", "(", "self", ".", "started", ")", "return", "durations" ]
Measure the time taken for ordering of a request and return it. Monitor might have been reset due to view change due to which this method returns None
[ "Measure", "the", "time", "taken", "for", "ordering", "of", "a", "request", "and", "return", "it", ".", "Monitor", "might", "have", "been", "reset", "due", "to", "view", "change", "due", "to", "which", "this", "method", "returns", "None" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/monitor.py#L353-L400
234,188
hyperledger/indy-plenum
plenum/server/monitor.py
Monitor.requestUnOrdered
def requestUnOrdered(self, key: str): """ Record the time at which request ordering started. """ now = time.perf_counter() if self.acc_monitor: self.acc_monitor.update_time(now) self.acc_monitor.request_received(key) self.requestTracker.start(key, now)
python
def requestUnOrdered(self, key: str): """ Record the time at which request ordering started. """ now = time.perf_counter() if self.acc_monitor: self.acc_monitor.update_time(now) self.acc_monitor.request_received(key) self.requestTracker.start(key, now)
[ "def", "requestUnOrdered", "(", "self", ",", "key", ":", "str", ")", ":", "now", "=", "time", ".", "perf_counter", "(", ")", "if", "self", ".", "acc_monitor", ":", "self", ".", "acc_monitor", ".", "update_time", "(", "now", ")", "self", ".", "acc_monitor", ".", "request_received", "(", "key", ")", "self", ".", "requestTracker", ".", "start", "(", "key", ",", "now", ")" ]
Record the time at which request ordering started.
[ "Record", "the", "time", "at", "which", "request", "ordering", "started", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/monitor.py#L402-L410
234,189
hyperledger/indy-plenum
plenum/server/monitor.py
Monitor.isMasterDegraded
def isMasterDegraded(self): """ Return whether the master instance is slow. """ if self.acc_monitor: self.acc_monitor.update_time(time.perf_counter()) return self.acc_monitor.is_master_degraded() else: return (self.instances.masterId is not None and (self.isMasterThroughputTooLow() or # TODO for now, view_change procedure can take more that 15 minutes # (5 minutes for catchup and 10 minutes for primary's answer). # Therefore, view_change triggering by max latency now is not indicative. # self.isMasterReqLatencyTooHigh() or self.isMasterAvgReqLatencyTooHigh()))
python
def isMasterDegraded(self): """ Return whether the master instance is slow. """ if self.acc_monitor: self.acc_monitor.update_time(time.perf_counter()) return self.acc_monitor.is_master_degraded() else: return (self.instances.masterId is not None and (self.isMasterThroughputTooLow() or # TODO for now, view_change procedure can take more that 15 minutes # (5 minutes for catchup and 10 minutes for primary's answer). # Therefore, view_change triggering by max latency now is not indicative. # self.isMasterReqLatencyTooHigh() or self.isMasterAvgReqLatencyTooHigh()))
[ "def", "isMasterDegraded", "(", "self", ")", ":", "if", "self", ".", "acc_monitor", ":", "self", ".", "acc_monitor", ".", "update_time", "(", "time", ".", "perf_counter", "(", ")", ")", "return", "self", ".", "acc_monitor", ".", "is_master_degraded", "(", ")", "else", ":", "return", "(", "self", ".", "instances", ".", "masterId", "is", "not", "None", "and", "(", "self", ".", "isMasterThroughputTooLow", "(", ")", "or", "# TODO for now, view_change procedure can take more that 15 minutes", "# (5 minutes for catchup and 10 minutes for primary's answer).", "# Therefore, view_change triggering by max latency now is not indicative.", "# self.isMasterReqLatencyTooHigh() or", "self", ".", "isMasterAvgReqLatencyTooHigh", "(", ")", ")", ")" ]
Return whether the master instance is slow.
[ "Return", "whether", "the", "master", "instance", "is", "slow", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/monitor.py#L425-L439
234,190
hyperledger/indy-plenum
plenum/server/monitor.py
Monitor.areBackupsDegraded
def areBackupsDegraded(self): """ Return slow instance. """ slow_instances = [] if self.acc_monitor: for instance in self.instances.backupIds: if self.acc_monitor.is_instance_degraded(instance): slow_instances.append(instance) else: for instance in self.instances.backupIds: if self.is_instance_throughput_too_low(instance): slow_instances.append(instance) return slow_instances
python
def areBackupsDegraded(self): """ Return slow instance. """ slow_instances = [] if self.acc_monitor: for instance in self.instances.backupIds: if self.acc_monitor.is_instance_degraded(instance): slow_instances.append(instance) else: for instance in self.instances.backupIds: if self.is_instance_throughput_too_low(instance): slow_instances.append(instance) return slow_instances
[ "def", "areBackupsDegraded", "(", "self", ")", ":", "slow_instances", "=", "[", "]", "if", "self", ".", "acc_monitor", ":", "for", "instance", "in", "self", ".", "instances", ".", "backupIds", ":", "if", "self", ".", "acc_monitor", ".", "is_instance_degraded", "(", "instance", ")", ":", "slow_instances", ".", "append", "(", "instance", ")", "else", ":", "for", "instance", "in", "self", ".", "instances", ".", "backupIds", ":", "if", "self", ".", "is_instance_throughput_too_low", "(", "instance", ")", ":", "slow_instances", ".", "append", "(", "instance", ")", "return", "slow_instances" ]
Return slow instance.
[ "Return", "slow", "instance", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/monitor.py#L441-L454
234,191
hyperledger/indy-plenum
plenum/server/monitor.py
Monitor.instance_throughput_ratio
def instance_throughput_ratio(self, inst_id): """ The relative throughput of an instance compared to the backup instances. """ inst_thrp, otherThrp = self.getThroughputs(inst_id) # Backup throughput may be 0 so moving ahead only if it is not 0 r = inst_thrp / otherThrp if otherThrp and inst_thrp is not None \ else None return r
python
def instance_throughput_ratio(self, inst_id): """ The relative throughput of an instance compared to the backup instances. """ inst_thrp, otherThrp = self.getThroughputs(inst_id) # Backup throughput may be 0 so moving ahead only if it is not 0 r = inst_thrp / otherThrp if otherThrp and inst_thrp is not None \ else None return r
[ "def", "instance_throughput_ratio", "(", "self", ",", "inst_id", ")", ":", "inst_thrp", ",", "otherThrp", "=", "self", ".", "getThroughputs", "(", "inst_id", ")", "# Backup throughput may be 0 so moving ahead only if it is not 0", "r", "=", "inst_thrp", "/", "otherThrp", "if", "otherThrp", "and", "inst_thrp", "is", "not", "None", "else", "None", "return", "r" ]
The relative throughput of an instance compared to the backup instances.
[ "The", "relative", "throughput", "of", "an", "instance", "compared", "to", "the", "backup", "instances", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/monitor.py#L456-L466
234,192
hyperledger/indy-plenum
plenum/server/monitor.py
Monitor.is_instance_throughput_too_low
def is_instance_throughput_too_low(self, inst_id): """ Return whether the throughput of the master instance is greater than the acceptable threshold """ r = self.instance_throughput_ratio(inst_id) if r is None: logger.debug("{} instance {} throughput is not " "measurable.".format(self, inst_id)) return None too_low = r < self.Delta if too_low: logger.display("{}{} instance {} throughput ratio {} is lower than Delta {}.". format(MONITORING_PREFIX, self, inst_id, r, self.Delta)) else: logger.trace("{} instance {} throughput ratio {} is acceptable.". format(self, inst_id, r)) return too_low
python
def is_instance_throughput_too_low(self, inst_id): """ Return whether the throughput of the master instance is greater than the acceptable threshold """ r = self.instance_throughput_ratio(inst_id) if r is None: logger.debug("{} instance {} throughput is not " "measurable.".format(self, inst_id)) return None too_low = r < self.Delta if too_low: logger.display("{}{} instance {} throughput ratio {} is lower than Delta {}.". format(MONITORING_PREFIX, self, inst_id, r, self.Delta)) else: logger.trace("{} instance {} throughput ratio {} is acceptable.". format(self, inst_id, r)) return too_low
[ "def", "is_instance_throughput_too_low", "(", "self", ",", "inst_id", ")", ":", "r", "=", "self", ".", "instance_throughput_ratio", "(", "inst_id", ")", "if", "r", "is", "None", ":", "logger", ".", "debug", "(", "\"{} instance {} throughput is not \"", "\"measurable.\"", ".", "format", "(", "self", ",", "inst_id", ")", ")", "return", "None", "too_low", "=", "r", "<", "self", ".", "Delta", "if", "too_low", ":", "logger", ".", "display", "(", "\"{}{} instance {} throughput ratio {} is lower than Delta {}.\"", ".", "format", "(", "MONITORING_PREFIX", ",", "self", ",", "inst_id", ",", "r", ",", "self", ".", "Delta", ")", ")", "else", ":", "logger", ".", "trace", "(", "\"{} instance {} throughput ratio {} is acceptable.\"", ".", "format", "(", "self", ",", "inst_id", ",", "r", ")", ")", "return", "too_low" ]
Return whether the throughput of the master instance is greater than the acceptable threshold
[ "Return", "whether", "the", "throughput", "of", "the", "master", "instance", "is", "greater", "than", "the", "acceptable", "threshold" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/monitor.py#L475-L492
234,193
hyperledger/indy-plenum
plenum/server/monitor.py
Monitor.isMasterReqLatencyTooHigh
def isMasterReqLatencyTooHigh(self): """ Return whether the request latency of the master instance is greater than the acceptable threshold """ # TODO for now, view_change procedure can take more that 15 minutes # (5 minutes for catchup and 10 minutes for primary's answer). # Therefore, view_change triggering by max latency is not indicative now. r = self.masterReqLatencyTooHigh or \ next(((key, lat) for key, lat in self.masterReqLatencies.items() if lat > self.Lambda), None) if r: logger.display("{}{} found master's latency {} to be higher than the threshold for request {}.". format(MONITORING_PREFIX, self, r[1], r[0])) else: logger.trace("{} found master's latency to be lower than the " "threshold for all requests.".format(self)) return r
python
def isMasterReqLatencyTooHigh(self): """ Return whether the request latency of the master instance is greater than the acceptable threshold """ # TODO for now, view_change procedure can take more that 15 minutes # (5 minutes for catchup and 10 minutes for primary's answer). # Therefore, view_change triggering by max latency is not indicative now. r = self.masterReqLatencyTooHigh or \ next(((key, lat) for key, lat in self.masterReqLatencies.items() if lat > self.Lambda), None) if r: logger.display("{}{} found master's latency {} to be higher than the threshold for request {}.". format(MONITORING_PREFIX, self, r[1], r[0])) else: logger.trace("{} found master's latency to be lower than the " "threshold for all requests.".format(self)) return r
[ "def", "isMasterReqLatencyTooHigh", "(", "self", ")", ":", "# TODO for now, view_change procedure can take more that 15 minutes", "# (5 minutes for catchup and 10 minutes for primary's answer).", "# Therefore, view_change triggering by max latency is not indicative now.", "r", "=", "self", ".", "masterReqLatencyTooHigh", "or", "next", "(", "(", "(", "key", ",", "lat", ")", "for", "key", ",", "lat", "in", "self", ".", "masterReqLatencies", ".", "items", "(", ")", "if", "lat", ">", "self", ".", "Lambda", ")", ",", "None", ")", "if", "r", ":", "logger", ".", "display", "(", "\"{}{} found master's latency {} to be higher than the threshold for request {}.\"", ".", "format", "(", "MONITORING_PREFIX", ",", "self", ",", "r", "[", "1", "]", ",", "r", "[", "0", "]", ")", ")", "else", ":", "logger", ".", "trace", "(", "\"{} found master's latency to be lower than the \"", "\"threshold for all requests.\"", ".", "format", "(", "self", ")", ")", "return", "r" ]
Return whether the request latency of the master instance is greater than the acceptable threshold
[ "Return", "whether", "the", "request", "latency", "of", "the", "master", "instance", "is", "greater", "than", "the", "acceptable", "threshold" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/monitor.py#L494-L512
234,194
hyperledger/indy-plenum
plenum/server/monitor.py
Monitor.is_instance_avg_req_latency_too_high
def is_instance_avg_req_latency_too_high(self, inst_id): """ Return whether the average request latency of an instance is greater than the acceptable threshold """ avg_lat, avg_lat_others = self.getLatencies() if not avg_lat or not avg_lat_others: return False d = avg_lat - avg_lat_others if d < self.Omega: return False if inst_id == self.instances.masterId: logger.info("{}{} found difference between master's and " "backups's avg latency {} to be higher than the " "threshold".format(MONITORING_PREFIX, self, d)) logger.trace( "{}'s master's avg request latency is {} and backup's " "avg request latency is {}".format(self, avg_lat, avg_lat_others)) return True
python
def is_instance_avg_req_latency_too_high(self, inst_id): """ Return whether the average request latency of an instance is greater than the acceptable threshold """ avg_lat, avg_lat_others = self.getLatencies() if not avg_lat or not avg_lat_others: return False d = avg_lat - avg_lat_others if d < self.Omega: return False if inst_id == self.instances.masterId: logger.info("{}{} found difference between master's and " "backups's avg latency {} to be higher than the " "threshold".format(MONITORING_PREFIX, self, d)) logger.trace( "{}'s master's avg request latency is {} and backup's " "avg request latency is {}".format(self, avg_lat, avg_lat_others)) return True
[ "def", "is_instance_avg_req_latency_too_high", "(", "self", ",", "inst_id", ")", ":", "avg_lat", ",", "avg_lat_others", "=", "self", ".", "getLatencies", "(", ")", "if", "not", "avg_lat", "or", "not", "avg_lat_others", ":", "return", "False", "d", "=", "avg_lat", "-", "avg_lat_others", "if", "d", "<", "self", ".", "Omega", ":", "return", "False", "if", "inst_id", "==", "self", ".", "instances", ".", "masterId", ":", "logger", ".", "info", "(", "\"{}{} found difference between master's and \"", "\"backups's avg latency {} to be higher than the \"", "\"threshold\"", ".", "format", "(", "MONITORING_PREFIX", ",", "self", ",", "d", ")", ")", "logger", ".", "trace", "(", "\"{}'s master's avg request latency is {} and backup's \"", "\"avg request latency is {}\"", ".", "format", "(", "self", ",", "avg_lat", ",", "avg_lat_others", ")", ")", "return", "True" ]
Return whether the average request latency of an instance is greater than the acceptable threshold
[ "Return", "whether", "the", "average", "request", "latency", "of", "an", "instance", "is", "greater", "than", "the", "acceptable", "threshold" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/monitor.py#L521-L541
234,195
hyperledger/indy-plenum
plenum/server/monitor.py
Monitor.getThroughputs
def getThroughputs(self, desired_inst_id: int): """ Return a tuple of the throughput of the given instance and the average throughput of the remaining instances. :param instId: the id of the protocol instance """ instance_thrp = self.getThroughput(desired_inst_id) totalReqs, totalTm = self.getInstanceMetrics(forAllExcept=desired_inst_id) # Average backup replica's throughput if len(self.throughputs) > 1: thrs = [] for inst_id, thr_obj in self.throughputs.items(): if inst_id == desired_inst_id: continue thr = self.getThroughput(inst_id) if thr is not None: thrs.append(thr) if thrs: if desired_inst_id == self.instances.masterId: other_thrp = self.throughput_avg_strategy_cls.get_avg(thrs) else: other_thrp = self.backup_throughput_avg_strategy_cls.get_avg(thrs) else: other_thrp = None else: other_thrp = None if instance_thrp == 0: if self.numOrderedRequests[desired_inst_id] == (0, 0): avgReqsPerInst = (totalReqs or 0) / self.instances.count if avgReqsPerInst <= 1: # too early to tell if we need an instance change instance_thrp = None return instance_thrp, other_thrp
python
def getThroughputs(self, desired_inst_id: int): """ Return a tuple of the throughput of the given instance and the average throughput of the remaining instances. :param instId: the id of the protocol instance """ instance_thrp = self.getThroughput(desired_inst_id) totalReqs, totalTm = self.getInstanceMetrics(forAllExcept=desired_inst_id) # Average backup replica's throughput if len(self.throughputs) > 1: thrs = [] for inst_id, thr_obj in self.throughputs.items(): if inst_id == desired_inst_id: continue thr = self.getThroughput(inst_id) if thr is not None: thrs.append(thr) if thrs: if desired_inst_id == self.instances.masterId: other_thrp = self.throughput_avg_strategy_cls.get_avg(thrs) else: other_thrp = self.backup_throughput_avg_strategy_cls.get_avg(thrs) else: other_thrp = None else: other_thrp = None if instance_thrp == 0: if self.numOrderedRequests[desired_inst_id] == (0, 0): avgReqsPerInst = (totalReqs or 0) / self.instances.count if avgReqsPerInst <= 1: # too early to tell if we need an instance change instance_thrp = None return instance_thrp, other_thrp
[ "def", "getThroughputs", "(", "self", ",", "desired_inst_id", ":", "int", ")", ":", "instance_thrp", "=", "self", ".", "getThroughput", "(", "desired_inst_id", ")", "totalReqs", ",", "totalTm", "=", "self", ".", "getInstanceMetrics", "(", "forAllExcept", "=", "desired_inst_id", ")", "# Average backup replica's throughput", "if", "len", "(", "self", ".", "throughputs", ")", ">", "1", ":", "thrs", "=", "[", "]", "for", "inst_id", ",", "thr_obj", "in", "self", ".", "throughputs", ".", "items", "(", ")", ":", "if", "inst_id", "==", "desired_inst_id", ":", "continue", "thr", "=", "self", ".", "getThroughput", "(", "inst_id", ")", "if", "thr", "is", "not", "None", ":", "thrs", ".", "append", "(", "thr", ")", "if", "thrs", ":", "if", "desired_inst_id", "==", "self", ".", "instances", ".", "masterId", ":", "other_thrp", "=", "self", ".", "throughput_avg_strategy_cls", ".", "get_avg", "(", "thrs", ")", "else", ":", "other_thrp", "=", "self", ".", "backup_throughput_avg_strategy_cls", ".", "get_avg", "(", "thrs", ")", "else", ":", "other_thrp", "=", "None", "else", ":", "other_thrp", "=", "None", "if", "instance_thrp", "==", "0", ":", "if", "self", ".", "numOrderedRequests", "[", "desired_inst_id", "]", "==", "(", "0", ",", "0", ")", ":", "avgReqsPerInst", "=", "(", "totalReqs", "or", "0", ")", "/", "self", ".", "instances", ".", "count", "if", "avgReqsPerInst", "<=", "1", ":", "# too early to tell if we need an instance change", "instance_thrp", "=", "None", "return", "instance_thrp", ",", "other_thrp" ]
Return a tuple of the throughput of the given instance and the average throughput of the remaining instances. :param instId: the id of the protocol instance
[ "Return", "a", "tuple", "of", "the", "throughput", "of", "the", "given", "instance", "and", "the", "average", "throughput", "of", "the", "remaining", "instances", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/monitor.py#L543-L577
234,196
hyperledger/indy-plenum
plenum/server/monitor.py
Monitor.getThroughput
def getThroughput(self, instId: int) -> float: """ Return the throughput of the specified instance. :param instId: the id of the protocol instance """ # We are using the instanceStarted time in the denominator instead of # a time interval. This is alright for now as all the instances on a # node are started at almost the same time. if instId not in self.instances.ids: return None perf_time = time.perf_counter() throughput = self.throughputs[instId].get_throughput(perf_time) return throughput
python
def getThroughput(self, instId: int) -> float: """ Return the throughput of the specified instance. :param instId: the id of the protocol instance """ # We are using the instanceStarted time in the denominator instead of # a time interval. This is alright for now as all the instances on a # node are started at almost the same time. if instId not in self.instances.ids: return None perf_time = time.perf_counter() throughput = self.throughputs[instId].get_throughput(perf_time) return throughput
[ "def", "getThroughput", "(", "self", ",", "instId", ":", "int", ")", "->", "float", ":", "# We are using the instanceStarted time in the denominator instead of", "# a time interval. This is alright for now as all the instances on a", "# node are started at almost the same time.", "if", "instId", "not", "in", "self", ".", "instances", ".", "ids", ":", "return", "None", "perf_time", "=", "time", ".", "perf_counter", "(", ")", "throughput", "=", "self", ".", "throughputs", "[", "instId", "]", ".", "get_throughput", "(", "perf_time", ")", "return", "throughput" ]
Return the throughput of the specified instance. :param instId: the id of the protocol instance
[ "Return", "the", "throughput", "of", "the", "specified", "instance", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/monitor.py#L579-L592
234,197
hyperledger/indy-plenum
plenum/server/monitor.py
Monitor.getInstanceMetrics
def getInstanceMetrics( self, forAllExcept: int) -> Tuple[Optional[int], Optional[float]]: """ Calculate and return the average throughput of all the instances except the one specified as `forAllExcept`. """ m = [(reqs, tm) for i, (reqs, tm) in self.numOrderedRequests.items() if i != forAllExcept] if m: reqs, tm = zip(*m) return sum(reqs), sum(tm) else: return None, None
python
def getInstanceMetrics( self, forAllExcept: int) -> Tuple[Optional[int], Optional[float]]: """ Calculate and return the average throughput of all the instances except the one specified as `forAllExcept`. """ m = [(reqs, tm) for i, (reqs, tm) in self.numOrderedRequests.items() if i != forAllExcept] if m: reqs, tm = zip(*m) return sum(reqs), sum(tm) else: return None, None
[ "def", "getInstanceMetrics", "(", "self", ",", "forAllExcept", ":", "int", ")", "->", "Tuple", "[", "Optional", "[", "int", "]", ",", "Optional", "[", "float", "]", "]", ":", "m", "=", "[", "(", "reqs", ",", "tm", ")", "for", "i", ",", "(", "reqs", ",", "tm", ")", "in", "self", ".", "numOrderedRequests", ".", "items", "(", ")", "if", "i", "!=", "forAllExcept", "]", "if", "m", ":", "reqs", ",", "tm", "=", "zip", "(", "*", "m", ")", "return", "sum", "(", "reqs", ")", ",", "sum", "(", "tm", ")", "else", ":", "return", "None", ",", "None" ]
Calculate and return the average throughput of all the instances except the one specified as `forAllExcept`.
[ "Calculate", "and", "return", "the", "average", "throughput", "of", "all", "the", "instances", "except", "the", "one", "specified", "as", "forAllExcept", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/monitor.py#L594-L607
234,198
hyperledger/indy-plenum
plenum/server/monitor.py
Monitor.getLatency
def getLatency(self, instId: int) -> float: """ Return a dict with client identifier as a key and calculated latency as a value """ if len(self.clientAvgReqLatencies) == 0: return 0.0 return self.clientAvgReqLatencies[instId].get_avg_latency()
python
def getLatency(self, instId: int) -> float: """ Return a dict with client identifier as a key and calculated latency as a value """ if len(self.clientAvgReqLatencies) == 0: return 0.0 return self.clientAvgReqLatencies[instId].get_avg_latency()
[ "def", "getLatency", "(", "self", ",", "instId", ":", "int", ")", "->", "float", ":", "if", "len", "(", "self", ".", "clientAvgReqLatencies", ")", "==", "0", ":", "return", "0.0", "return", "self", ".", "clientAvgReqLatencies", "[", "instId", "]", ".", "get_avg_latency", "(", ")" ]
Return a dict with client identifier as a key and calculated latency as a value
[ "Return", "a", "dict", "with", "client", "identifier", "as", "a", "key", "and", "calculated", "latency", "as", "a", "value" ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/monitor.py#L624-L630
234,199
hyperledger/indy-plenum
plenum/common/batched.py
Batched._enqueue
def _enqueue(self, msg: Any, rid: int, signer: Signer) -> None: """ Enqueue the message into the remote's queue. :param msg: the message to enqueue :param rid: the id of the remote node """ if rid not in self.outBoxes: self.outBoxes[rid] = deque() self.outBoxes[rid].append(msg)
python
def _enqueue(self, msg: Any, rid: int, signer: Signer) -> None: """ Enqueue the message into the remote's queue. :param msg: the message to enqueue :param rid: the id of the remote node """ if rid not in self.outBoxes: self.outBoxes[rid] = deque() self.outBoxes[rid].append(msg)
[ "def", "_enqueue", "(", "self", ",", "msg", ":", "Any", ",", "rid", ":", "int", ",", "signer", ":", "Signer", ")", "->", "None", ":", "if", "rid", "not", "in", "self", ".", "outBoxes", ":", "self", ".", "outBoxes", "[", "rid", "]", "=", "deque", "(", ")", "self", ".", "outBoxes", "[", "rid", "]", ".", "append", "(", "msg", ")" ]
Enqueue the message into the remote's queue. :param msg: the message to enqueue :param rid: the id of the remote node
[ "Enqueue", "the", "message", "into", "the", "remote", "s", "queue", "." ]
dcd144e238af7f17a869ffc9412f13dc488b7020
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/common/batched.py#L36-L45