code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
assert isinstance(data, bytes)
with self.lock:
self.buf += data
|
def add(self, data)
|
Add data to the buffer
| 6.574863
| 5.319955
| 1.235887
|
data = b''
with self.lock:
data, self.buf = self.buf, b''
return data
|
def get(self)
|
Get the content of the buffer
| 7.914587
| 5.551437
| 1.425682
|
while True:
try:
c = self.recv(1)
except socket.error as e:
if e.errno == errno.EWOULDBLOCK:
return
else:
raise
else:
self._do(c)
self.socket.setblocking(True)
self.send(b'A')
self.socket.setblocking(False)
|
def handle_read(self)
|
Handle all the available character commands in the socket
| 3.031657
| 2.804584
| 1.080965
|
x = a + b
z = x - a
y = (a - (x - z)) + (b - z)
return x, y
|
def knuth_sum(a, b)
|
Error-free transformation of the sum of two floating point numbers
according to
D.E. Knuth.
The Art of Computer Programming: Seminumerical Algorithms, volume 2.
Addison Wesley, Reading, Massachusetts, second edition, 1981.
The underlying problem is that the exact sum a+b of two floating point
number a and b is not necessarily a floating point number; for example if
you add a very large and a very small number. It is however known that the
difference between the best floating point approximation of a+b and the
exact a+b is again a floating point number. This routine returns the sum
and the error.
Algorithm 3.1 in <https://doi.org/10.1137/030601818>.
| 4.91548
| 5.115287
| 0.960939
|
x = a + b
y = b - (x - a) if abs(a) > abs(b) else a - (x - b)
return x, y
|
def decker_sum(a, b)
|
Computationally equivalent to knuth_sum, but formally a bit cheaper.
Only works for floats though (and not arrays), and the branch make it in
fact less favorable in terms of actual speed.
| 4.074808
| 4.026652
| 1.011959
|
q = p.reshape(p.shape[0], -1)
for _ in range(K):
_accupy.distill(q)
return q.reshape(p.shape)
|
def distill(p, K)
|
Algorithm 4.3. Error-free vector transformation for summation.
The vector p is transformed without changing the sum, and p_n is replaced
by float(sum(p)). Kahan [21] calls this a 'distillation algorithm.'
| 4.913209
| 6.373367
| 0.770897
|
# Don't override the input data.
q = p.copy()
distill(q, K - 1)
return numpy.sum(q[:-1], axis=0) + q[-1]
|
def ksum(p, K=2)
|
From
T. Ogita, S.M. Rump, and S. Oishi.
Accurate Sum and Dot Product,
SIAM J. Sci. Comput., 26(6), 1955–1988 (34 pages).
<https://doi.org/10.1137/030601818>.
Algorithm 4.8. Summation as in K-fold precision by (K−1)-fold error-free
vector transformation.
| 9.938807
| 10.136146
| 0.980531
|
q = p.reshape(p.shape[0], -1)
s = _accupy.kahan(q)
return s.reshape(p.shape[1:])
|
def kahan_sum(p)
|
Kahan summation
<https://en.wikipedia.org/wiki/Kahan_summation_algorithm>.
| 5.378358
| 6.407187
| 0.839426
|
selection = list(selected_shells(command))
if command and command != '*' and selection:
for i in selection:
if i.state != remote_dispatcher.STATE_DEAD and i.enabled != enable:
break
else:
toggle_shells('*', not enable)
for i in selection:
if i.state != remote_dispatcher.STATE_DEAD:
i.set_enabled(enable)
|
def toggle_shells(command, enable)
|
Enable or disable the specified shells. If the command would have
no effect, it changes all other shells to the inverse enable value.
| 6.159487
| 6.20392
| 0.992838
|
if not command or command == '*':
for i in dispatchers.all_instances():
yield i
return
selected = set()
instance_found = False
for pattern in command.split():
found = False
for expanded_pattern in expand_syntax(pattern):
for i in dispatchers.all_instances():
instance_found = True
if fnmatch(i.display_name, expanded_pattern):
found = True
if i not in selected:
selected.add(i)
yield i
if instance_found and not found:
console_output('{} not found\n'.format(pattern).encode())
|
def selected_shells(command)
|
Iterator over the shells with names matching the patterns.
An empty patterns matches all the shells
| 4.178833
| 4.126455
| 1.012693
|
res = [i.display_name + ' ' for i in dispatchers.all_instances() if
i.display_name.startswith(text) and
predicate(i) and
' ' + i.display_name + ' ' not in line]
return res
|
def complete_shells(line, text, predicate=lambda i: True)
|
Return the shell names to include in the completion
| 5.782547
| 5.217525
| 1.108293
|
xx = x.reshape(-1, x.shape[-1])
yy = y.reshape(y.shape[0], -1)
xx = numpy.ascontiguousarray(xx)
yy = numpy.ascontiguousarray(yy)
r = _accupy.kdot_helper(xx, yy).reshape((-1,) + x.shape[:-1] + y.shape[1:])
return ksum(r, K - 1)
|
def kdot(x, y, K=2)
|
Algorithm 5.10. Dot product algorithm in K-fold working precision,
K >= 3.
| 3.616744
| 3.679536
| 0.982935
|
xx = x.reshape(-1, x.shape[-1])
yy = y.reshape(y.shape[0], -1)
xx = numpy.ascontiguousarray(xx)
yy = numpy.ascontiguousarray(yy)
r = _accupy.kdot_helper(xx, yy).reshape((-1,) + x.shape[:-1] + y.shape[1:])
return fsum(r)
|
def fdot(x, y)
|
Algorithm 5.10. Dot product algorithm in K-fold working precision,
K >= 3.
| 3.833109
| 3.899724
| 0.982918
|
for i in dispatchers.all_instances():
try:
os.kill(-i.pid, signal.SIGKILL)
except OSError:
# The process was already dead, no problem
pass
|
def kill_all()
|
When polysh quits, we kill all the remote shells we started
| 5.483855
| 5.292077
| 1.036239
|
locale.setlocale(locale.LC_ALL, '')
atexit.register(kill_all)
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
args = parse_cmdline()
args.command = find_non_interactive_command(args.command)
args.exit_code = 0
args.interactive = (
not args.command
and sys.stdin.isatty()
and sys.stdout.isatty())
if args.interactive:
restore_tty_on_exit()
remote_dispatcher.options = args
hosts = []
for host in args.host_names:
hosts.extend(expand_syntax(host))
dispatchers.create_remote_dispatchers(hosts)
signal.signal(signal.SIGWINCH, lambda signum, frame:
dispatchers.update_terminal_size())
stdin.the_stdin_thread = stdin.StdinThread(args.interactive)
if args.profile:
def safe_loop():
try:
loop(args.interactive)
except BaseException:
pass
_profile(safe_loop)
else:
loop(args.interactive)
|
def run()
|
Launch polysh
| 4.4881
| 4.497483
| 0.997914
|
sentry_dsn = os.environ.get('POLYSH_SENTRY_DSN')
if sentry_dsn:
from raven import Client
client = Client(
dsn=sentry_dsn,
release='.'.join(map(str, VERSION)),
ignore_exceptions=[
KeyboardInterrupt
]
)
try:
run()
except Exception:
client.captureException()
else:
run()
|
def main()
|
Wrapper around run() to setup sentry
| 3.642267
| 3.112223
| 1.17031
|
new_data = b''
buffer_length = len(self.read_buffer)
try:
while buffer_length < self.MAX_BUFFER_SIZE:
try:
piece = self.recv(4096)
except OSError as e:
if e.errno == errno.EAGAIN:
# End of the available data
break
elif e.errno == errno.EIO and new_data:
# Hopefully we could read an error message before the
# actual termination
break
else:
raise
if not piece:
# A closed connection is indicated by signaling a read
# condition, and having recv() return 0.
break
new_data += piece
buffer_length += len(piece)
finally:
new_data = new_data.replace(b'\r', b'\n')
self.read_buffer += new_data
return new_data
|
def _handle_read_chunk(self)
|
Some data can be read
| 3.950065
| 3.872291
| 1.020085
|
self.write_buffer += buf
if len(self.write_buffer) > self.MAX_BUFFER_SIZE:
console_output('Buffer too big ({:d}) for {}\n'.format(
len(self.write_buffer), str(self)).encode())
raise asyncore.ExitNow(1)
return True
|
def dispatch_write(self, buf)
|
Augment the buffer with stuff to write when possible
| 5.206362
| 4.971889
| 1.04716
|
assert isinstance(buf, bytes)
while True:
try:
os.write(1, buf)
break
except IOError as e:
if e.errno != errno.EINTR:
raise
|
def safe_write(buf)
|
We can get a SIGWINCH when printing, which will cause write to raise
an EINTR. That's not a reason to stop printing.
| 2.607738
| 2.247659
| 1.160202
|
assert isinstance(msg, bytes)
assert isinstance(logging_msg, bytes) or logging_msg is None
from polysh import remote_dispatcher
remote_dispatcher.log(logging_msg or msg)
if remote_dispatcher.options.interactive:
from polysh.stdin import the_stdin_thread
the_stdin_thread.no_raw_input()
global last_status_length
if last_status_length:
safe_write('\r{}\r'.format(
last_status_length * ' ').encode())
last_status_length = 0
safe_write(msg)
|
def console_output(msg, logging_msg=None)
|
Use instead of print, to clear the status information before printing
| 6.120929
| 5.944936
| 1.029604
|
match = syntax_pattern.search(string)
if match:
prefix = string[:match.start()]
suffix = string[match.end():]
intervals = match.group(1).split(',')
for interval in intervals:
interval_match = interval_pattern.match(interval)
if interval_match:
start = interval_match.group(1)
end = (interval_match.group(2) or start).strip('-')
for i in _iter_numbers(start, end):
for expanded in expand_syntax(prefix + i + suffix):
yield expanded
else:
yield string
|
def expand_syntax(string)
|
Iterator over all the strings in the expansion of the argument
| 2.554843
| 2.554253
| 1.000231
|
return sorted([i for i in asyncore.socket_map.values() if
isinstance(i, remote_dispatcher.RemoteDispatcher)],
key=lambda i: i.display_name or '')
|
def all_instances()
|
Iterator over all the remote_dispatcher instances
| 7.465703
| 5.129054
| 1.455571
|
awaited = 0
total = 0
for i in all_instances():
if i.enabled:
total += 1
if i.state is not remote_dispatcher.STATE_IDLE:
awaited += 1
return awaited, total
|
def count_awaited_processes()
|
Return a tuple with the number of awaited processes and the total
number
| 5.639083
| 5.498604
| 1.025548
|
instances_found = False
for i in all_instances():
instances_found = True
if i.state not in (remote_dispatcher.STATE_TERMINATED,
remote_dispatcher.STATE_DEAD):
return False
return instances_found
|
def all_terminated()
|
For each remote shell determine if its terminated
| 5.552853
| 5.226584
| 1.062425
|
w, h = terminal_size()
w = max(w - display_names.max_display_name_length - 2, min(w, 10))
# python bug http://python.org/sf/1112949 on amd64
# from ajaxterm.py
bug = struct.unpack('i', struct.pack('I', termios.TIOCSWINSZ))[0]
packed_size = struct.pack('HHHH', h, w, 0, 0)
term_size = w, h
for i in all_instances():
if i.enabled and i.term_size != term_size:
i.term_size = term_size
fcntl.ioctl(i.fd, bug, packed_size)
|
def update_terminal_size()
|
Propagate the terminal size to the remote shells accounting for the
place taken by the longest name
| 5.994127
| 5.960204
| 1.005692
|
max_lengths = []
if info_list:
nr_columns = len(info_list[0])
else:
nr_columns = 0
for i in range(nr_columns):
max_lengths.append(max([len(info[i]) for info in info_list]))
flattened_info_list = []
for info_id in range(len(info_list)):
info = info_list[info_id]
for str_id in range(len(info) - 1):
# Don't justify the last column (i.e. the last printed line)
# as it can get much longer in some shells than in others
orig_str = info[str_id]
indent = max_lengths[str_id] - len(orig_str)
info[str_id] = orig_str + indent * b' '
flattened_info_list.append(b' '.join(info) + b'\n')
return flattened_info_list
|
def format_info(info_list)
|
Turn a 2-dimension list of bytes into a 1-dimension list of bytes with
correct spacing
| 3.035344
| 2.986282
| 1.016429
|
# Algorithm 6.1 from
#
# ACCURATE SUM AND DOT PRODUCT,
# TAKESHI OGITA, SIEGFRIED M. RUMP, AND SHIN'ICHI OISHI.
assert n >= 6
n2 = round(n / 2)
x = numpy.zeros(n)
y = numpy.zeros(n)
b = math.log2(c)
# vector of exponents between 0 and b/2:
e = numpy.rint(numpy.random.rand(n2) * b / 2).astype(int)
# make sure exponents b/2 and 0 actually occur in e
# vectors x,y
e[0] = round(b / 2) + 1
e[-1] = 0
# generate first half of vectors x, y
rx, ry = numpy.random.rand(2, n2)
x[:n2] = (2 * rx - 1) * 2 ** e
y[:n2] = (2 * ry - 1) * 2 ** e
def dot_exact(x, y):
mp.dps = dps
# convert to list first, see
# <https://github.com/fredrik-johansson/mpmath/pull/385>
return mp.fdot(x.tolist(), y.tolist())
# for i=n2+1:n and v=1:i,
# generate x_i, y_i such that (*) x(v)’*y(v) ~ 2^e(i-n2)
# generate exponents for second half
e = numpy.rint(numpy.linspace(b / 2, 0, n - n2)).astype(int)
rx, ry = numpy.random.rand(2, n2)
for i in range(n2, n):
# x_i random with generated exponent
x[i] = (2 * rx[i - n2] - 1) * 2 ** e[i - n2]
# y_i according to (*)
y[i] = (
(2 * ry[i - n2] - 1) * 2 ** e[i - n2] - dot_exact(x[: i + 1], y[: i + 1])
) / x[i]
x, y = numpy.random.permutation((x, y))
# the true dot product rounded to nearest floating point
d = dot_exact(x, y)
# the actual condition number
C = 2 * dot_exact(abs(x), abs(y)) / abs(d)
return x, y, d, C
|
def generate_ill_conditioned_dot_product(n, c, dps=100)
|
n ... length of vector
c ... target condition number
| 4.725657
| 4.650469
| 1.016168
|
prev_nr_read = nr_handle_read
asyncore.loop(count=1, timeout=timeout, use_poll=True)
return nr_handle_read - prev_nr_read
|
def main_loop_iteration(timeout=None)
|
Return the number of RemoteDispatcher.handle_read() calls made by this
iteration
| 5.594931
| 4.200292
| 1.332034
|
if options.user:
name = '%s@%s' % (options.user, name)
evaluated = options.ssh % {'host': name, 'port': port}
if evaluated == options.ssh:
evaluated = '%s %s' % (evaluated, name)
os.execlp('/bin/sh', 'sh', '-c', evaluated)
|
def launch_ssh(self, name, port)
|
Launch the ssh command in the child process
| 3.565905
| 3.439424
| 1.036774
|
if state is not self.state:
if self.debug:
self.print_debug(b'state => ' + STATE_NAMES[state].encode())
if self.state is STATE_NOT_STARTED:
self.read_in_state_not_started = b''
self.state = state
|
def change_state(self, state)
|
Change the state of the remote process, logging the change
| 6.311535
| 5.996448
| 1.052546
|
try:
os.kill(-self.pid, signal.SIGKILL)
except OSError:
# The process was already dead, no problem
pass
self.read_buffer = b''
self.write_buffer = b''
self.set_enabled(False)
if self.read_in_state_not_started:
self.print_lines(self.read_in_state_not_started)
self.read_in_state_not_started = b''
if options.abort_error and self.state is STATE_NOT_STARTED:
raise asyncore.ExitNow(1)
self.change_state(STATE_DEAD)
|
def disconnect(self)
|
We are no more interested in this remote process
| 4.966869
| 4.734533
| 1.049073
|
attr = termios.tcgetattr(self.fd)
attr[1] &= ~termios.ONLCR # oflag
attr[3] &= ~termios.ECHO # lflag
termios.tcsetattr(self.fd, termios.TCSANOW, attr)
# unsetopt zle prevents Zsh from resetting the tty
return b'unsetopt zle 2> /dev/null;stty -echo -onlcr -ctlecho;'
|
def configure_tty(self)
|
We don't want \n to be replaced with \r\n, and we disable the echo
| 8.563383
| 7.582639
| 1.129341
|
# No right prompt
command_line = b'PS2=;RPS1=;RPROMPT=;'
command_line += b'PROMPT_COMMAND=;'
command_line += b'TERM=ansi;'
command_line += b'unset HISTFILE;'
prompt1, prompt2 = callbacks.add(b'prompt', self.seen_prompt_cb, True)
command_line += b'PS1="' + prompt1 + b'""' + prompt2 + b'\n"\n'
return command_line
|
def set_prompt(self)
|
The prompt is important because we detect the readyness of a process
by waiting for its prompt.
| 9.522697
| 9.107814
| 1.045552
|
if self.state is not STATE_RUNNING or callbacks.any_in(data):
# Slow case :-(
return False
last_nl = data.rfind(b'\n')
if last_nl == -1:
# No '\n' in data => slow case
return False
self.read_buffer = data[last_nl + 1:]
self.print_lines(data[:last_nl])
return True
|
def handle_read_fast_case(self, data)
|
If we are in a fast case we'll avoid the long processing of each
line
| 6.332706
| 5.931469
| 1.067646
|
if self.state == STATE_DEAD:
return
global nr_handle_read
nr_handle_read += 1
new_data = self._handle_read_chunk()
if self.debug:
self.print_debug(b'==> ' + new_data)
if self.handle_read_fast_case(self.read_buffer):
return
lf_pos = new_data.find(b'\n')
if lf_pos >= 0:
# Optimization: we knew there were no '\n' in the previous read
# buffer, so we searched only in the new_data and we offset the
# found index by the length of the previous buffer
lf_pos += len(self.read_buffer) - len(new_data)
elif self.state is STATE_NOT_STARTED and \
options.password is not None and \
b'password:' in self.read_buffer.lower():
self.dispatch_write('{}\n'.format(options.password).encode())
self.read_buffer = b''
return
while lf_pos >= 0:
# For each line in the buffer
line = self.read_buffer[:lf_pos + 1]
if callbacks.process(line):
pass
elif self.state in (STATE_IDLE, STATE_RUNNING):
self.print_lines(line)
elif self.state is STATE_NOT_STARTED:
self.read_in_state_not_started += line
if b'The authenticity of host' in line:
msg = line.strip(b'\n') + b' Closing connection.'
self.disconnect()
elif b'REMOTE HOST IDENTIFICATION HAS CHANGED' in line:
msg = b'Remote host identification has changed.'
else:
msg = None
if msg:
self.print_lines(msg + b' Consider manually connecting or '
b'using ssh-keyscan.')
# Go to the next line in the buffer
self.read_buffer = self.read_buffer[lf_pos + 1:]
if self.handle_read_fast_case(self.read_buffer):
return
lf_pos = self.read_buffer.find(b'\n')
if self.state is STATE_NOT_STARTED and not self.init_string_sent:
self.dispatch_write(self.init_string)
self.init_string_sent = True
|
def handle_read(self)
|
We got some output from a remote shell, this is one of the state
machine
| 4.097639
| 4.053332
| 1.010931
|
if self.state is STATE_RUNNING:
if not callbacks.process(self.read_buffer):
self.print_lines(self.read_buffer)
self.read_buffer = b''
|
def print_unfinished_line(self)
|
The unfinished line stayed long enough in the buffer to be printed
| 8.13887
| 6.884743
| 1.18216
|
num_sent = self.send(self.write_buffer)
if self.debug:
if self.state is not STATE_NOT_STARTED or options.password is None:
self.print_debug(b'<== ' + self.write_buffer[:num_sent])
self.write_buffer = self.write_buffer[num_sent:]
|
def handle_write(self)
|
Let's write as much as we can
| 5.583064
| 5.282134
| 1.056971
|
assert isinstance(msg, bytes)
state = STATE_NAMES[self.state].encode()
console_output(b'[dbg] ' + self.display_name.encode() + b'[' + state +
b']: ' + msg + b'\n')
|
def print_debug(self, msg)
|
Log some debugging information to the console
| 7.769361
| 7.523017
| 1.032745
|
return [self.display_name.encode(),
self.enabled and b'enabled' or b'disabled',
STATE_NAMES[self.state].encode() + b':',
self.last_printed_line.strip()]
|
def get_info(self)
|
Return a list with all information available about this process
| 10.751369
| 9.654488
| 1.113614
|
if self.state != STATE_DEAD and self.enabled:
super().dispatch_write(buf)
return True
return False
|
def dispatch_write(self, buf)
|
There is new stuff to write when possible
| 7.050703
| 7.015524
| 1.005014
|
if not new_name:
name = self.hostname
else:
name = new_name.decode()
self.display_name = display_names.change(
self.display_name, name)
|
def change_name(self, new_name)
|
Change the name of the shell, possibly updating the maximum name
length
| 6.004745
| 6.127155
| 0.980022
|
if name:
# defug callback add?
rename1, rename2 = callbacks.add(
b'rename', self.change_name, False)
self.dispatch_command(b'/bin/echo "' + rename1 + b'""' + rename2 +
b'"' + name + b'\n')
else:
self.change_name(self.hostname.encode())
|
def rename(self, name)
|
Send to the remote shell, its new name to be shell expanded
| 13.087386
| 10.907677
| 1.199833
|
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = _ioctl_GWINSZ(fd)
os.close(fd)
except BaseException:
pass
if not cr: # env vars or finally defaults
try:
cr = os.environ['LINES'], os.environ['COLUMNS']
except BaseException:
cr = 25, 80
return int(cr[1]), int(cr[0])
|
def terminal_size(): # decide on *some* terminal size
cr = _ioctl_GWINSZ(0) or _ioctl_GWINSZ(
1) or _ioctl_GWINSZ(2) # try open fds
if not cr: # ...then ctty
try
|
Return (lines, columns).
| 2.994983
| 3.10357
| 0.965012
|
global completion_results
if state == 0:
line = readline.get_line_buffer()
if line.startswith(':'):
# Control command completion
completion_results = complete_control_command(line, text)
else:
if line.startswith('!') and text and line.startswith(text):
dropped_exclam = True
text = text[1:]
else:
dropped_exclam = False
completion_results = []
# Complete local paths
completion_results += complete_local_path(text)
# Complete from history
l = len(text)
completion_results += [w + ' ' for w in history_words if
len(w) > l and w.startswith(text)]
if readline.get_begidx() == 0:
# Completing first word from $PATH
completion_results += [w + ' ' for w in user_commands_in_path
if len(w) > l and w.startswith(text)]
completion_results = remove_dupes(completion_results)
if dropped_exclam:
completion_results = ['!' + r for r in completion_results]
if state < len(completion_results):
return completion_results[state]
completion_results = None
return None
|
def complete(text, state)
|
On tab press, return the next possible completion
| 3.678234
| 3.713008
| 0.990635
|
read_line = None
if 'read_line' in kwargs:
read_line = kwargs['read_line']
del kwargs['read_line']
p = subprocess.Popen(*args, **kwargs)
wait_stdout = None
wait_stderr = None
if p.stdout:
wait_stdout = sys.stdout.attach(p.stdout, read_line=read_line)
if p.stderr:
wait_stderr = sys.stderr.attach(p.stderr)
original_wait = p.wait
def wait():
original_wait()
if wait_stdout:
wait_stdout()
if wait_stderr:
wait_stderr()
p.wait = wait
return p
|
def Popen(*args, **kwargs)
|
Executes a command using subprocess.Popen and redirects output to AETROS and stdout.
Parses stdout as well for stdout API calls.
Use read_line argument to read stdout of command's stdout line by line.
Use returned process stdin to communicate with the command.
:return: subprocess.Popen
| 2.230954
| 2.21375
| 1.007772
|
job = JobBackend()
offline = False
if '1' == os.getenv('AETROS_OFFLINE', ''):
offline = True
if os.getenv('AETROS_JOB_ID'):
job.load(os.getenv('AETROS_JOB_ID'))
if not offline:
job.connect()
else:
job.create()
if not offline:
job.connect()
job.start(offline=offline)
return job
|
def context()
|
Returns a new JobBackend instance which connects to AETROS Trainer
based on "model" in aetros.yml or (internal: env:AETROS_MODEL_NAME environment variable).
internal: If env:AETROS_JOB_ID is not defined, it creates a new job.
Job is ended either by calling JobBackend.done(), JobBackend.fail() or JobBackend.abort().
If the script ends without calling one of the methods above, JobBackend.stop() is called and exit code defines
whether it is a fail() or done() result.
:return: JobBackend
| 4.016358
| 3.027535
| 1.32661
|
if self.stop_requested or self.stop_requested_force:
# signal has already been sent or we force a shutdown.
# handles the keystroke 2x CTRL+C to force an exit.
self.stop_requested_force = True
self.logger.warning('Force stopped: ' + str(sig))
# just kill the process, we don't care about the results
self.on_force_exit()
os._exit(1)
# with force_exit we really close the process, killing it in unknown state
# self.fail('Force stopped', force_exit=True)
# return
if self.is_master_process():
self.logger.warning('Received signal '+str(sig)+'. Send again to force stop. Stopping ...')
else:
self.logger.debug("Got child signal " + str(sig))
self.stop_requested = True
# the default SIGINT handle in python is not always installed, so we can't rely on the
# KeyboardInterrupt exception to be thrown.
# thread.interrupt_main would call sigint again.
# the shutdown listener will do the rest like committing rest memory files into Git and closing connections.
sys.exit(0 if self.in_early_stop else 1)
|
def on_sigint(self, sig, frame)
|
We got SIGINT signal.
| 10.769229
| 10.382028
| 1.037295
|
self.ended = True
self.running = False
# When the server sends an abort signal, we really have to close immediately,
# since for example the job has been already deleted.
# without touching the git and client any further
os._exit(1)
|
def external_aborted(self, params)
|
Immediately abort the job by server.
This runs in the Client:read() thread.
| 23.644022
| 21.589571
| 1.095159
|
# only the master processes handles the regular stop signal from the server, sending a SIGINT to
# all its child (means to us, non-master process)
if not self.is_master_process():
if force:
# make sure even the subprocess dies really on force
os._exit(1)
return
self.logger.warning("Received stop signal by server.")
if not self.stop_requested_force:
self.stop_requested_force = force
raise_sigint()
|
def external_stop(self, force)
|
Stop signal by server.
| 12.82651
| 11.832162
| 1.084038
|
self.lock.acquire()
try:
time_diff = time.time() - self.last_step_time
if self.last_step > step:
# it restarted
self.last_step = 0
made_steps_since_last_call = step - self.last_step
self.last_step = step
self.made_steps_since_last_sync += made_steps_since_last_call
self.made_steps_size_since_last_sync += made_steps_since_last_call * size
if time_diff >= 1 or step == total: # only each second or last batch
self.set_system_info('step', step, True)
self.set_system_info('steps', total, True)
steps_per_second = self.made_steps_since_last_sync / time_diff
samples_per_second = self.made_steps_size_since_last_sync / time_diff
self.last_step_time = time.time()
if size:
self.report_speed(samples_per_second)
epochs_per_second = steps_per_second / total # all batches
self.set_system_info('epochsPerSecond', epochs_per_second, True)
current_epochs = self.current_epoch if self.current_epoch else 1
total_epochs = self.total_epochs if self.total_epochs else 1
self.made_steps_since_last_sync = 0
self.made_steps_size_since_last_sync = 0
eta = 0
if step < total:
# time to end this epoch
if steps_per_second != 0:
eta = (total - step) / steps_per_second
# time until all epochs are done
if total_epochs - current_epochs > 0:
if epochs_per_second != 0:
eta += (total_epochs - (current_epochs)) / epochs_per_second
self.git.store_file('aetros/job/times/eta.json', simplejson.dumps(eta))
if label and self.step_label != label:
self.set_system_info('stepLabel', label, True)
self.step_label = label
if speed_label and self.step_speed_label != speed_label:
self.set_system_info('stepSpeedLabel', speed_label, True)
self.step_speed_label = speed_label
finally:
self.lock.release()
|
def step(self, step, total, label='STEP', speed_label='STEPS/S', size=1)
|
Increase the step indicator, which is a sub progress circle of the actual
main progress circle (epoch, progress() method).
| 2.657925
| 2.650645
| 1.002747
|
return JobLossChannel(self, name, xaxis, yaxis, layout)
|
def create_loss_channel(self, name='loss', xaxis=None, yaxis=None, layout=None)
|
:param name: string
:return: JobLossGraph
| 6.888734
| 4.543021
| 1.516333
|
return JobChannel(self, name, traces, main, kpi, kpiTrace, max_optimization, type, xaxis, yaxis, layout)
|
def create_channel(self, name, traces=None,
main=False, kpi=False, kpiTrace=0, max_optimization=True,
type=JobChannel.NUMBER,
xaxis=None, yaxis=None, layout=None)
|
:param name: str
:param traces: None|list : per default create a trace based on "name".
:param main: bool : whether this channel is visible in the job list as column for better comparison.
:param kpi: bool : whether this channel is the KPI (key performance indicator).
Used for hyperparameter optimization. Only one channel can be a kpi. Only first trace used.
:param kpiTrace: bool : if you have multiple traces, define which is the KPI. 0 based index.
:param max_optimization: bool : whether the optimization maximizes or minmizes the kpi. Use max_optimization=False to
tell the optimization algorithm that his channel minimizes a kpi, for instance the loss of a model.
:param type: str : One of JobChannel.NUMBER, JobChannel.TEXT
:param xaxis: dict
:param yaxis: dict
:param layout: dict
| 1.906277
| 2.503854
| 0.761337
|
self.in_shutdown = True
self.logger.debug('on_shutdown, stopped=%s, ended=%s, early_stop=%s, stop_requested=%s'
% (str(self.stopped), str(self.ended), str(self.in_early_stop), str(self.stop_requested)))
if self.stopped or self.ended:
# make really sure, ssh connection closed
self.client.close()
return
if self.in_early_stop:
self.done()
return
if self.stop_requested:
# when SIGINT has been triggered
if self.stop_requested_force:
if not self.is_master_process():
# if not master process, we just stop everything. status/progress is set by master
self.stop(force_exit=True)
else:
# master process
self.fail('Force stopped.', force_exit=True)
else:
if not self.is_master_process():
# if not master process, we just stop everything. status/progress is set by master
self.stop()
else:
# master process
self.abort()
return
if hasattr(sys, 'last_value'):
# sys.last_value contains a exception, when there was an uncaught one
if isinstance(sys.last_value, KeyboardInterrupt):
# can only happen when KeyboardInterrupt has been raised manually
# since the one from the default sigint handler will never reach here
# since we catch the sigint signal and sys.exit() before the default sigint handler
# is able to raise KeyboardInterrupt
self.abort()
else:
self.fail(type(sys.last_value).__name__ + ': ' + str(sys.last_value))
elif self.running:
self.done()
|
def on_shutdown(self)
|
Shutdown routine. Sets the last progress (done, aborted, failed) and tries to send last logs and git commits.
Also makes sure the ssh connection is closed (thus, the job marked as offline).
Is triggered by atexit.register().
| 4.361254
| 4.131452
| 1.055623
|
global last_exit_code
if not last_exit_code:
last_exit_code = 1
with self.git.batch_commit('FAILED'):
self.set_status('FAILED', add_section=False)
self.git.commit_json_file('FAIL_MESSAGE', 'aetros/job/crash/error', str(message) if message else '')
if isinstance(sys.stderr, GeneralLogger):
self.git.commit_json_file('FAIL_MESSAGE_LAST_LOG', 'aetros/job/crash/last_message', sys.stderr.last_messages)
self.logger.debug('Crash report stored in commit ' + self.git.get_head_commit())
self.stop(JOB_STATUS.PROGRESS_STATUS_FAILED, force_exit=force_exit)
|
def fail(self, message=None, force_exit=False)
|
Marks the job as failed, saves the given error message and force exists the process when force_exit=True.
| 7.024551
| 6.606541
| 1.063272
|
if self.stream_log and not self.ended:
# points to the Git stream write
self.stream_log.write(message)
return True
|
def write_log(self, message)
|
Proxy method for GeneralLogger.
| 15.452847
| 14.829576
| 1.042029
|
status = str(status)
if add_section:
self.section(status)
self.job_add_status('status', status)
|
def set_status(self, status, add_section=True)
|
Set an arbitrary status, visible in the big wheel of the job view.
| 7.142827
| 5.370971
| 1.329895
|
if not create_info:
create_info = {
'server': server,
'config': {
'insights': insights,
'command': ' '.join(sys.argv)
}
}
config = find_config(self.config_path, logger=self.logger)
if not config['model']:
raise Exception('AETROS config file (aetros.yml) not found.')
# first transform simple format in the full definition with parameter types
# (string, number, group, choice_group, etc)
full_hyperparameters = lose_parameters_to_full(config['parameters'])
# now extract hyperparameters from full definition, and overwrite stuff using
# incoming_hyperparameter if available
hyperparameter = extract_parameters(full_hyperparameters, hyperparameter)
create_info['config']['parameters'] = hyperparameter
self.job = create_info
if 'server' not in self.job and server:
# setting this disables server assignment
self.job['server'] = server
self.job['optimization'] = None
self.job['type'] = 'custom'
if 'parameters' not in self.job['config']:
self.job['config']['parameters'] = {}
if 'insights' not in self.job['config']:
self.job['config']['insights'] = insights
self.job['created'] = time.time()
self.git.create_job_id(self.job)
self.logger.debug("Job created with Git ref " + self.git.ref_head)
return self.job_id
|
def create(self, create_info=None, hyperparameter=None, server='local', insights=False)
|
Creates a new job in git and pushes it.
:param create_info: from the api.create_job_info(id). Contains the config and job info (type, server)
:param hyperparameter: simple nested dict with key->value, which overwrites stuff from aetros.yml
:param server: if None, the the job will be assigned to a server.
:param insights: whether you want to activate insights (for simple models)
| 5.651333
| 5.140796
| 1.099311
|
value = read_parameter_by_path(self.job['config']['parameters'], path, return_group)
if value is None:
return default
return value
|
def get_parameter(self, path, default=None, return_group=False)
|
Reads hyperparameter from job configuration. If nothing found use given default.
:param path: str
:param default: *
:param return_group: If true and path is a choice_group, we return the dict instead of the group name.
:return: *
| 6.727244
| 7.330933
| 0.917652
|
self.git.read_job(job_id, checkout=self.is_master_process())
self.load_job_from_ref()
|
def load(self, job_id)
|
Loads job into index and work-tree, restart its ref and sets as current.
:param job_id: int
| 13.244355
| 12.720363
| 1.041193
|
if not self.job_id:
raise Exception('Job not loaded yet. Use load(id) first.')
if not os.path.exists(self.git.work_tree + '/aetros/job.json'):
raise Exception('Could not load aetros/job.json from git repository. Make sure you have created the job correctly.')
with open(self.git.work_tree + '/aetros/job.json') as f:
self.job = simplejson.loads(f.read(), object_pairs_hook=collections.OrderedDict)
if not self.job:
raise Exception('Could not parse aetros/job.json from git repository. Make sure you have created the job correctly.')
self.logger.debug('job: ' + str(self.job))
|
def load_job_from_ref(self)
|
Loads the job.json into self.job
| 2.837799
| 2.65736
| 1.067901
|
if not self.job:
raise Exception('Job not loaded yet. Use load(id) first.')
return JobModel(self.job_id, self.job, self.home_config['storage_dir'])
|
def get_job_model(self)
|
Returns a new JobModel instance with current loaded job data attached.
:return: JobModel
| 8.3842
| 7.798246
| 1.075139
|
blacklist = ['.git', 'aetros']
working_tree = self.git.work_tree
def recursive(path='.'):
if os.path.basename(path) in blacklist:
return 0, 0
if os.path.isdir(path):
files = []
for file in os.listdir(path):
if path and path != '.':
file = path + '/' + file
added_files = recursive(file)
files += added_files
return files
else:
if path.endswith('.pyc'):
return []
if is_ignored(path, self.config['ignore']):
return []
return [os.path.relpath(path, working_tree)]
return recursive(working_tree)
|
def file_list(self)
|
Lists all files in the working directory.
| 3.964181
| 3.800057
| 1.04319
|
blacklist = ['.git']
def add_resursiv(path = '.', report=report):
if os.path.basename(path) in blacklist:
return 0, 0
if working_tree + '/aetros' == path:
# ignore in work_tree the folder ./aetros/, as it could be
# that we checked out a job and start it again.
return 0, 0
if os.path.isdir(path):
files = 0
size = 0
for file in os.listdir(path):
if path and path != '.':
file = path + '/' + file
added_files, added_size = add_resursiv(file)
files += added_files
size += added_size
return files, size
else:
if path.endswith('.pyc'):
return 0, 0
relative_path = os.path.relpath(path, working_tree)
if is_ignored(relative_path, self.config['ignore']):
return 0, 0
self.logger.debug("added file to job " + relative_path)
if report:
print("Added job file: " + relative_path)
self.git.add_file_path_in_work_tree(path, working_tree, verbose=False)
return 1, os.path.getsize(path)
return add_resursiv(working_tree, report=report)
|
def add_files(self, working_tree, report=False)
|
Commits all files from limited in aetros.yml. `files` is a whitelist, `exclude_files` is a blacklist.
If both are empty, we commit all files smaller than 10MB.
:return:
| 3.782106
| 3.768248
| 1.003678
|
if path.endswith('.txt'):
if not os.path.exists(path):
raise Exception("Given word2vec file does not exist: " + path)
f = open(path, 'r')
if not header_with_dimensions and not dimensions:
raise Exception('Either the word2vec file should contain the dimensions as header or it needs to be'
'specified manually using dimensions=[x,y] argument.')
if header_with_dimensions:
line = f.readline()
if ' ' not in line:
raise Exception('Given word2vec file should have in first line the dimensions, e.g.: 1000 200')
dimensions = np.fromstring(line, dtype=np.uint, sep=' ').tolist()
labels = ''
vectors = ''
line_pos = 1 if header_with_dimensions else 0
if len(dimensions) != 2:
raise Exception('dimensions invalid shape. e.g. [200, 32] => 200 rows, 32 cols.')
for line in iter(f.readline, ''):
line_pos += 1
space_pos = line.find(' ')
if -1 == space_pos:
message = 'Given word2vec does not have correct format in line ' + str(line_pos)
message += '\nGot: ' + str(line)
raise Exception(message)
labels += line[:space_pos] + '\n'
vectors += line[space_pos+1:] + ' '
vectors = np.fromstring(vectors, dtype=np.float32, sep=' ').tobytes()
else:
raise Exception("Given word2vec is not a .txt file. Other file formats are not supported.")
info = {
'dimensions': dimensions
}
name = os.path.basename(path)
self._ensure_insight(x)
remote_path = 'aetros/job/insight/'+str(x)+'/embedding/'
with self.git.batch_commit('INSIGHT_EMBEDDING ' + str(x)):
self.git.commit_file('WORD2VEC', remote_path + name + '/tensor.bytes', vectors)
self.git.commit_file('WORD2VEC', remote_path + name + '/metadata.tsv', labels)
self.git.commit_file('WORD2VEC', remote_path + name + '/info.json', simplejson.dumps(info))
|
def add_embedding_word2vec(self, x, path, dimensions=None, header_with_dimensions=True)
|
Parse the word2vec file and extracts vectors as bytes and labels as TSV file.
The format is simple: It's a UTF-8 encoded file, each word + vectors separated by new line.
Vector is space separated.
At the very first line might be dimensions, given as space separated value.
Line 1: 2 4\n
Line 2: word 200.3 4004.4 34.2 22.3\n
Line 3: word2 20.0 4.4 4.2 0.022\n
and so on
For performance reasons, you should prefer add_embedding_path().
| 3.475192
| 3.385641
| 1.02645
|
if not os.path.exists(vectors_path):
raise Exception("Given embedding vectors file does not exist: " + vectors_path)
if metadata and not os.path.exists(metadata):
raise Exception("Given embedding metadata file does not exist: " + metadata)
name = os.path.basename(vectors_path)
self._ensure_insight(x)
remote_path = 'aetros/job/insight/'+str(x)+'/embedding/'
info = {
'dimensions': dimensions,
'image_shape': image_shape,
'image': os.path.basename(image) if image else None,
}
with self.git.lock_write():
self.git.add_file_path(remote_path + name + '/tensor.bytes', vectors_path)
self.git.add_file_path(remote_path + name + '/metadata.tsv', metadata)
self.git.add_file(remote_path + name + '/info.json', simplejson.dumps(info))
if image:
self.git.add_file(remote_path + name + '/' + os.path.basename(image), image)
self.git.commit_index('INSIGHT_EMBEDDING ' + str(x))
|
def add_embedding_path(self, x, dimensions, vectors_path, metadata=None, image_shape=None, image=None)
|
Adds a new embedding with optional metadata.
Example how to generate vectors based on 2D numpy array:
# 4 vectors, each size of 3
vectors = [
[2.3, 4.0, 33],
[2.4, 4.2, 44],
[2.5, 3.9, 34],
[5.5, 200.2, 66]
]
metadata = [
# header, only necessary when more then on column
# can be anything.
['label', 'count'],
# for each vector from above an entry.
['red', '123'],
['white', '143'],
['yellow', '344'],
['house', '24'],
]
numpy.array(vectors, dtype=numpy.float32).tofile('vectors.bytes')
numpy.savetxt('metadata.tsv', numpy.array(metadata), delimiter='\t', fmt='%s')
job.add_embedding_path([4, 3], 'vectors.bytes', 'metadata.tsv')
Metadata format example:
Label\tCount\n
red\t4\n
yellow\t6\n
:param x: The x axis of the insights.
:param dimensions: 2D List of dimension, e.g [200, 20], means 200 vectors and each vector has size of 20.
:param vectors_path: A path to a floats64 bytes file, no separators, sum(dimensions)*floats64 long.
Example: If dimensions [200, 20] then the tensor file has 200*20 float32 bytes in it
:param metadata: A TSV file. If only one column long (=no tab separator per line), then there's no need for a header.
If you have more than one column, use the first line as header.
:param image_shape: Size of the image of each vector.
:param image: Path to an image sprite.
:return:
| 2.984046
| 2.742932
| 1.087904
|
current_year = datetime.datetime.now().strftime('%Y')
files = []
for line in fileinfo:
parts = re.split(
r'^([\-dbclps])' + # Directory flag [1]
r'([\-rwxs]{9})\s+' + # Permissions [2]
r'(\d+)\s+' + # Number of items [3]
r'([a-zA-Z0-9_-]+)\s+' + # File owner [4]
r'([a-zA-Z0-9_-]+)\s+' + # File group [5]
r'(\d+)\s+' + # File size in bytes [6]
r'(\w{3}\s+\d{1,2})\s+' + # 3-char month and 1/2-char day of the month [7]
r'(\d{1,2}:\d{1,2}|\d{4})\s+' + # Time or year (need to check conditions) [+= 7]
r'(.+)$', # File/directory name [8]
line
)
date = parts[7]
time = parts[8] if ':' in parts[8] else '00:00'
year = parts[8] if ':' not in parts[8] else current_year
dt_obj = parser.parse("%s %s %s" % (date, year, time))
files.append({
'directory': parts[1],
'perms': parts[2],
'items': parts[3],
'owner': parts[4],
'group': parts[5],
'size': int(parts[6]),
'date': date,
'time': time,
'year': year,
'name': parts[9],
'datetime': dt_obj
})
return files
|
def split_file_info(fileinfo)
|
Parse sane directory output usually ls -l
Adapted from https://gist.github.com/tobiasoberrauch/2942716
| 3.04085
| 2.946586
| 1.031991
|
if isinstance(local, file_type): # open file, leave open
local_file = local
elif local is None: # return string
local_file = buffer_type()
else: # path to file, open, write/close return None
local_file = open(local, 'wb')
self.conn.retrbinary("RETR %s" % remote, local_file.write)
if isinstance(local, file_type):
pass
elif local is None:
contents = local_file.getvalue()
local_file.close()
return contents
else:
local_file.close()
return None
|
def get(self, remote, local=None)
|
Gets the file from FTP server
local can be:
a file: opened for writing, left open
a string: path to output file
None: contents are returned
| 3.484536
| 3.112899
| 1.119386
|
remote_dir = os.path.dirname(remote)
remote_file = os.path.basename(local)\
if remote.endswith('/') else os.path.basename(remote)
if contents:
# local is ignored if contents is set
local_file = buffer_type(contents)
elif isinstance(local, file_type):
local_file = local
else:
local_file = open(local, 'rb')
current = self.conn.pwd()
self.descend(remote_dir, force=True)
size = 0
try:
self.conn.storbinary('STOR %s' % remote_file, local_file)
size = self.conn.size(remote_file)
except:
if not quiet:
raise
finally:
local_file.close()
self.conn.cwd(current)
return size
|
def put(self, local, remote, contents=None, quiet=False)
|
Puts a local file (or contents) on to the FTP server
local can be:
a string: path to inpit file
a file: opened for reading
None: contents are pushed
| 3.175283
| 3.169061
| 1.001963
|
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
try:
self.conn.mkd(dst)
except error_perm:
pass
errors = []
for name in names:
if name in ignored_names:
continue
src_name = os.path.join(src, name)
dst_name = os.path.join(dst, name)
try:
if os.path.islink(src_name):
pass
elif os.path.isdir(src_name):
self.upload_tree(src_name, dst_name, ignore)
else:
# Will raise a SpecialFileError for unsupported file types
self.put(src_name, dst_name)
except Exception as why:
errors.append((src_name, dst_name, str(why)))
return dst
|
def upload_tree(self, src, dst, ignore=None)
|
Recursively upload a directory tree.
Although similar to shutil.copytree we don't follow symlinks.
| 2.015482
| 1.86457
| 1.080937
|
if extra:
self.tmp_output = []
self.conn.dir(remote, self._collector)
directory_list = split_file_info(self.tmp_output)
else:
directory_list = self.conn.nlst(remote)
if remove_relative_paths:
return list(filter(self.is_not_relative_path, directory_list))
return directory_list
|
def list(self, remote='.', extra=False, remove_relative_paths=False)
|
Return directory list
| 4.57726
| 4.094445
| 1.117919
|
remote_dirs = remote.split('/')
for directory in remote_dirs:
try:
self.conn.cwd(directory)
except Exception:
if force:
self.conn.mkd(directory)
self.conn.cwd(directory)
return self.conn.pwd()
|
def descend(self, remote, force=False)
|
Descend, possibly creating directories as needed
| 3.232918
| 2.910121
| 1.110922
|
try:
self.conn.delete(remote)
except Exception:
return False
else:
return True
|
def delete(self, remote)
|
Delete a file from server
| 4.042645
| 3.876176
| 1.042946
|
try:
self.conn.cwd(remote)
except Exception:
return False
else:
return self.pwd()
|
def cd(self, remote)
|
Change working directory on server
| 4.827207
| 4.838479
| 0.99767
|
owner, name, id = unpack_full_job_id(full_id)
if isinstance(sys.stdout, GeneralLogger):
# we don't want to have stuff written to stdout before in job's log
sys.stdout.clear_buffer()
job_backend = JobBackend(model_name=owner + '/' + name)
if fetch:
job_backend.fetch(id)
job_backend.restart(id)
job_backend.start(collect_system=False, offline=offline)
job_backend.set_status('PREPARE', add_section=False)
job = job_backend.get_job_model()
if not cpus:
cpus = job.get_cpu()
if not memory:
memory = job.get_memory()
if not gpu_devices and job.get_gpu():
# if requested 2 GPUs and we have 3 GPUs with id [0,1,2], gpus should be [0,1]
gpu_devices = []
for i in range(0, job.get_gpu()):
gpu_devices.append(i)
start_command(logger, job_backend, env, volumes, cpus=cpus, memory=memory, gpu_devices=gpu_devices, offline=offline)
|
def start(logger, full_id, fetch=True, env=None, volumes=None, cpus=None, memory=None, gpu_devices=None, offline=False)
|
Starts the job with all logging of a job_id
| 4.352179
| 4.249815
| 1.024087
|
if not Image.isImageType(im):
raise TypeError("Input is not a PIL image.")
if mode is not None:
if mode != im.mode:
im = im.convert(mode)
elif im.mode == 'P':
# Mode 'P' means there is an indexed "palette". If we leave the mode
# as 'P', then when we do `a = array(im)` below, `a` will be a 2-D
# containing the indices into the palette, and not a 3-D array
# containing the RGB or RGBA values.
if 'transparency' in im.info:
im = im.convert('RGBA')
else:
im = im.convert('RGB')
if flatten:
im = im.convert('F')
elif im.mode == '1':
# Workaround for crash in PIL. When im is 1-bit, the call array(im)
# can cause a seg. fault, or generate garbage. See
# https://github.com/scipy/scipy/issues/2138 and
# https://github.com/python-pillow/Pillow/issues/350.
#
# This converts im from a 1-bit image to an 8-bit image.
im = im.convert('L')
a = array(im)
return a
|
def fromimage(im, flatten=False, mode=None)
|
Return a copy of a PIL image as a numpy array.
Parameters
----------
im : PIL image
Input image.
flatten : bool
If true, convert the output to grey-scale.
mode : str, optional
Mode to convert image to, e.g. ``'RGB'``. See the Notes of the
`imread` docstring for more details.
Returns
-------
fromimage : ndarray
The different colour bands/channels are stored in the
third dimension, such that a grey-image is MxN, an
RGB-image MxNx3 and an RGBA-image MxNx4.
| 3.703662
| 3.821529
| 0.969157
|
if data.dtype == uint8:
return data
if high > 255:
raise ValueError("`high` should be less than or equal to 255.")
if low < 0:
raise ValueError("`low` should be greater than or equal to 0.")
if high < low:
raise ValueError("`high` should be greater than or equal to `low`.")
if cmin is None:
cmin = data.min()
if cmax is None:
cmax = data.max()
cscale = cmax - cmin
if cscale < 0:
raise ValueError("`cmax` should be larger than `cmin`.")
elif cscale == 0:
cscale = 1
scale = float(high - low) / cscale
bytedata = (data - cmin) * scale + low
return (bytedata.clip(low, high) + 0.5).astype(uint8)
|
def bytescale(data, cmin=None, cmax=None, high=255, low=0)
|
Byte scales an array (image).
Byte scaling means converting the input image to uint8 dtype and scaling
the range to ``(low, high)`` (default 0-255).
If the input image already has dtype uint8, no scaling is done.
Parameters
----------
data : ndarray
PIL image data array.
cmin : scalar, optional
Bias scaling of small values. Default is ``data.min()``.
cmax : scalar, optional
Bias scaling of large values. Default is ``data.max()``.
high : scalar, optional
Scale max value to `high`. Default is 255.
low : scalar, optional
Scale min value to `low`. Default is 0.
Returns
-------
img_array : uint8 ndarray
The byte-scaled array.
Examples
--------
>>> from scipy.misc import bytescale
>>> img = np.array([[ 91.06794177, 3.39058326, 84.4221549 ],
... [ 73.88003259, 80.91433048, 4.88878881],
... [ 51.53875334, 34.45808177, 27.5873488 ]])
>>> bytescale(img)
array([[255, 0, 236],
[205, 225, 4],
[140, 90, 70]], dtype=uint8)
>>> bytescale(img, high=200, low=100)
array([[200, 100, 192],
[180, 188, 102],
[155, 135, 128]], dtype=uint8)
>>> bytescale(img, cmin=0, cmax=255)
array([[91, 3, 84],
[74, 81, 5],
[52, 34, 28]], dtype=uint8)
| 1.569642
| 1.743761
| 0.900147
|
im = toimage(arr, mode=mode)
ts = type(size)
if issubdtype(ts, int):
percent = size / 100.0
size = tuple((array(im.size)*percent).astype(int))
elif issubdtype(type(size), float):
size = tuple((array(im.size)*size).astype(int))
else:
size = (size[1], size[0])
func = {'nearest': 0, 'lanczos': 1, 'bilinear': 2, 'bicubic': 3, 'cubic': 3}
imnew = im.resize(size, resample=func[interp])
return fromimage(imnew)
|
def imresize(arr, size, interp='bilinear', mode=None)
|
Resize an image.
Parameters
----------
arr : ndarray
The array of image to be resized.
size : int, float or tuple
* int - Percentage of current size.
* float - Fraction of current size.
* tuple - Size of the output image.
interp : str, optional
Interpolation to use for re-sizing ('nearest', 'lanczos', 'bilinear', 'bicubic'
or 'cubic').
mode : str, optional
The PIL image mode ('P', 'L', etc.) to convert `arr` before resizing.
Returns
-------
imresize : ndarray
The resized array of image.
See Also
--------
toimage : Implicitly used to convert `arr` according to `mode`.
scipy.ndimage.zoom : More generic implementation that does not use PIL.
| 2.693618
| 2.981876
| 0.90333
|
self.stop_on_empty_queue[channel] = True
# by joining the we wait until its loop finishes.
# it won't loop forever since we've set self.stop_on_empty_queue=True
write_thread = self.thread_write_instances[channel]
thread_join_non_blocking(write_thread)
|
def _end_channel(self, channel)
|
Soft end of ssh channel. End the writing thread as soon as the message queue is empty.
| 13.622161
| 11.034434
| 1.234514
|
if self.active and self.online is not False:
self.logger.debug("client sends last %s messages ..."
% ([str(i) + ':' + str(len(x)) for i, x in six.iteritems(self.queues)],))
for channel, messages in six.iteritems(self.queues):
for idx, message in enumerate(messages):
self.logger.debug("[%s] %d: %s" % (channel, idx, str(message)[0:120]))
# send all missing messages
# by joining we wait until its loop finish.
# it won't loop forever since we've set self.stop_on_empty_queue=True
for channel in six.iterkeys(self.ssh_channel):
if channel != '':
self._end_channel(channel)
# last is control channel
self._end_channel('')
|
def wait_sending_last_messages(self)
|
Requests all channels to close and waits for it.
| 7.795324
| 7.304128
| 1.067249
|
state = {'message': ''}
self.logger.debug("wait_until_queue_empty: report=%s %s"
% (str(report), str([channel+':'+str(len(self.queues[channel])) for channel in channels]), ))
queues = []
for channel in channels:
queues += self.queues[channel][:]
def print_progress():
if report:
self.logger.debug("all_empty=%s" % (str(all_empty),))
sys.__stderr__.write('\b' * len(state['message']))
sys.__stderr__.write("\033[K")
state['message'] = "%.2f kB/s // %.2fkB of %.2fkB // %.2f%%" \
% (self.bytes_speed / 1024, self.bytes_sent / 1024, self.bytes_total / 1024,
(self.bytes_sent / self.bytes_total * 100) if self.bytes_total else 0)
sys.__stderr__.write(state['message'])
sys.__stderr__.flush()
while True:
all_empty = all(m['_sent'] for m in queues)
print_progress()
if all_empty:
break
time.sleep(0.2)
print_progress()
if report and clear_end:
sys.__stderr__.write('\b' * len(state['message']))
sys.__stderr__.write("\033[K")
sys.__stderr__.flush()
|
def wait_until_queue_empty(self, channels, report=True, clear_end=True)
|
Waits until all queues of channels are empty.
| 3.081735
| 3.042445
| 1.012914
|
if not self.is_connected(channel):
return False
message['_sending'] = True
if '_data' in message:
data = message['_data']
else:
data = msgpack.packb(message, default=invalid_json_values)
self.bytes_total += len(data)
message['_bytes_sent'] = 0
message['_id'] = -1
if is_debug2():
sys.__stderr__.write("[%s] send message: %s\n" % (channel, str(msgpack.unpackb(data))[0:180]))
try:
while data:
start = time.time()
bytes_sent = self.ssh_channel[channel].send(data)
data = data[bytes_sent:]
message['_bytes_sent'] += bytes_sent
self.bytes_sent += bytes_sent
end = time.time()
self.write_speeds.append(bytes_sent / (end-start))
speeds_len = len(self.write_speeds)
if speeds_len:
self.bytes_speed = sum(self.write_speeds) / speeds_len
if speeds_len > 10:
self.write_speeds = self.write_speeds[5:]
message['_sent'] = True
return True
except (KeyboardInterrupt, SystemExit):
if message['_sent']:
return message['_bytes_sent']
return False
except Exception as error:
self.connection_error(channel, error)
return False
|
def send_message(self, message, channel)
|
Internal. Sends the actual message from a queue entry.
| 3.244554
| 3.201363
| 1.013492
|
unpacker = msgpack.Unpacker(encoding='utf-8')
while True:
try:
start = time.time()
chunk = self.ssh_channel[channel].recv(1024)
end = time.time()
self.read_speeds.append( len(chunk) / (end-start) )
if len(self.read_speeds) > 20:
self.read_speeds = self.read_speeds[10:]
if chunk == b'':
# happens only when connection broke. If nothing is to be received, it hangs instead.
self.connection_error(channel, 'Connection broken w')
return False
except Exception as error:
self.connection_error(channel, error)
raise
unpacker.feed(chunk)
messages = [m for m in unpacker]
if messages:
return messages
|
def wait_for_at_least_one_message(self, channel)
|
Reads until we receive at least one message we can unpack. Return all found messages.
| 4.133142
| 4.054899
| 1.019296
|
if not self.ssh_channel[channel].recv_ready():
return
try:
start = time.time()
chunk = self.ssh_channel[channel].recv(1024)
end = time.time()
self.read_speeds.append(len(chunk) / (end-start))
if len(self.read_speeds) > 20:
self.read_speeds = self.read_speeds[10:]
except Exception as error:
self.connection_error(channel, error)
raise
if chunk == b'':
# socket connection broken
self.connection_error(channel, 'Connection broken')
return None
# self.read_buffer.seek(0, 2) #make sure we write at the end
self.read_unpacker.feed(chunk)
# self.read_buffer.seek(0)
messages = [m for m in self.read_unpacker]
return messages if messages else None
|
def read(self, channel)
|
Reads from the socket and tries to unpack the message. If successful (because msgpack was able to unpack)
then we return that message. Else None. Keep calling .read() when new data is available so we try it
again.
| 3.144205
| 3.06041
| 1.02738
|
if hasattr(signal, 'CTRL_C_EVENT'):
# windows. Need CTRL_C_EVENT to raise the signal in the whole process group
os.kill(os.getpid(), signal.CTRL_C_EVENT)
else:
# unix.
pgid = os.getpgid(os.getpid())
if pgid == 1:
os.kill(os.getpid(), signal.SIGINT)
else:
os.killpg(os.getpgid(os.getpid()), signal.SIGINT)
|
def raise_sigint()
|
Raising the SIGINT signal in the current process and all sub-processes.
os.kill() only issues a signal in the current process (without subprocesses).
CTRL+C on the console sends the signal to the process group (which we need).
| 2.547083
| 2.308829
| 1.103192
|
if size_bytes == 1:
# because I really hate unnecessary plurals
return "1 byte"
suffixes_table = [('bytes',0),('KB',0),('MB',1),('GB',2),('TB',2), ('PB',2)]
num = float(size_bytes)
for suffix, precision in suffixes_table:
if num < 1024.0:
break
num /= 1024.0
if precision == 0:
formatted_size = "%d" % num
else:
formatted_size = str(round(num, ndigits=precision))
return "%s %s" % (formatted_size, suffix)
|
def human_size(size_bytes, precision=0)
|
Format a size in bytes into a 'human' file size, e.g. bytes, KB, MB, GB, TB, PB
Note that bytes/KB will be reported in whole numbers but MB and above will have greater precision
e.g. 1 byte, 43 bytes, 443 KB, 4.3 MB, 4.43 GB, etc
| 2.097553
| 2.197204
| 0.954647
|
from PIL import Image
if x.ndim != 3:
raise Exception('Unsupported shape : ', str(x.shape), '. Need (channels, width, height)')
if scale:
x += max(-np.min(x), 0)
x /= np.max(x)
x *= 255
if x.shape[0] == 3:
# RGB
if x.dtype != 'uint8':
x = x.astype('uint8')
return Image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[0] == 1:
# grayscale
if x.dtype != 'uint8':
x = x.astype('uint8')
return Image.fromarray(x.reshape(x.shape[1], x.shape[2]), 'L')
else:
raise Exception('Unsupported channel number: ', x.shape[0])
|
def array_to_img(x, scale=True)
|
x should be shape (channels, width, height)
| 2.181331
| 2.039351
| 1.06962
|
self.nb_val_samples = number
diff_to_batch = number % self.get_batch_size()
if diff_to_batch > 0:
self.nb_val_samples += self.get_batch_size() - diff_to_batch
import keras
if '1' != keras.__version__[0]:
self.nb_val_samples = self.nb_val_samples // self.get_batch_size()
|
def set_generator_validation_nb(self, number)
|
sets self.nb_val_samples which is used in model.fit if input is a generator
:param number:
:return:
| 3.298131
| 2.879462
| 1.145398
|
self.samples_per_epoch = number
diff_to_batch = number % self.get_batch_size()
if diff_to_batch > 0:
self.samples_per_epoch += self.get_batch_size() - diff_to_batch
|
def set_generator_training_nb(self, number)
|
sets self.samples_per_epoch which is used in model.fit if input is a generator
:param number:
:return:
| 3.352218
| 2.72343
| 1.230881
|
bid = id(buffer)
self.attach_last_messages[bid] = b''
def reader():
current_line = b''
def handle_line(buf):
if chunk == b'':
return
if read_line and callable(read_line):
res = read_line(buf)
if res is False:
return False
elif res is not None:
buf = res
if hasattr(buf, 'encode'):
buf = buf.encode('utf-8')
self.attach_last_messages[bid] += buf
if len(self.attach_last_messages[bid]) > 21 * 1024:
self.attach_last_messages[bid] = self.attach_last_messages[bid][-20 * 1024:]
self.write(buf)
flush_char = b'\n'
while True:
try:
# needs to be 1 so we fetch data in near real-time
chunk = buffer.read(1)
if chunk == b'':
if current_line:
handle_line(current_line)
return
current_line += chunk
while flush_char in current_line:
pos = current_line.find(flush_char)
line = current_line[:pos+1]
current_line = current_line[pos+1:]
handle_line(line)
# todo, periodically flush by '\r' only (progress bars for example)
# and make sure only necessary data is sent (by applying \r and \b control characters)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
# we need to make sure, we continue to read otherwise the process of this buffer
# will block and we have a stuck process.
sys.__stderr__.write(traceback.format_exc() + '\n')
sys.__stderr__.flush()
thread = Thread(target=reader)
thread.daemon = True
thread.start()
def wait():
thread_join_non_blocking(thread)
self.send_buffer()
return wait
|
def attach(self, buffer, read_line=None)
|
Read buffer until end (read() returns '') and sends it to self.logger and self.job_backend.
:param buffer: a buffer instance with block read() or readline() method
:param read_line: callable or True to read line per line. If callable is given, it will be executed per line
and ignores does not redirect the line to stdout/logger when callable returns False.
| 4.25204
| 4.109287
| 1.034739
|
if not isinstance(image, np.ndarray):
raise ValueError('Expected ndarray')
if ratio < 1:
raise ValueError('Ratio must be greater than 1 (ratio=%f)' % ratio)
width = int(math.floor(image.shape[1] * ratio))
height = int(math.floor(image.shape[0] * ratio))
channels = image.shape[2]
out = np.ndarray((height, width, channels), dtype=np.uint8)
for x, y in np.ndindex((width, height)):
out[y, x] = image[int(math.floor(y / ratio)), int(math.floor(x / ratio))]
return out
|
def upscale(image, ratio)
|
return upscaled image array
Arguments:
image -- a (H,W,C) numpy.ndarray
ratio -- scaling factor (>1)
| 2.085211
| 2.21364
| 0.941983
|
if image is None:
return None
elif isinstance(image, PIL.Image.Image):
pass
elif isinstance(image, np.ndarray):
image = PIL.Image.fromarray(image)
else:
raise ValueError('image must be a PIL.Image or a np.ndarray')
# Read format from the image
fmt = image.format
if not fmt:
# default to JPEG
fmt = 'jpeg'
else:
fmt = fmt.lower()
string_buf = StringIO()
image.save(string_buf, format=fmt)
data = string_buf.getvalue().encode('base64').replace('\n', '')
return 'data:image/%s;base64,%s' % (fmt, data)
|
def embed_image_html(image)
|
Returns an image embedded in HTML base64 format
(Based on Caffe's web_demo)
Arguments:
image -- a PIL.Image or np.ndarray
| 2.426758
| 2.325336
| 1.043616
|
def expanded_bbox(bbox, n):
l = min(bbox[0][0], bbox[1][0])
r = max(bbox[0][0], bbox[1][0])
t = min(bbox[0][1], bbox[1][1])
b = max(bbox[0][1], bbox[1][1])
return ((l - n, t - n), (r + n, b + n))
from PIL import Image, ImageDraw
draw = ImageDraw.Draw(image)
for bbox in bboxes:
for n in range(width):
draw.rectangle(expanded_bbox(bbox, n), outline=color)
return image
|
def add_bboxes_to_image(image, bboxes, color='red', width=1)
|
Draw rectangles on the image for the bounding boxes
Returns a PIL.Image
Arguments:
image -- input image
bboxes -- bounding boxes in the [((l, t), (r, b)), ...] format
Keyword arguments:
color -- color to draw the rectangles
width -- line width of the rectangles
Example:
image = Image.open(filename)
add_bboxes_to_image(image, bboxes[filename], width=2, color='#FF7700')
image.show()
| 2.116793
| 1.994944
| 1.061079
|
if channel_order not in ['RGB', 'BGR']:
raise ValueError('Unsupported channel_order %s' % channel_order)
if data.ndim == 1:
# interpret as 1x1 grayscale images
# (N, 1, 1)
data = data[:, np.newaxis, np.newaxis]
elif data.ndim == 2:
# interpret as 1x1 grayscale images
# (N, 1, 1)
data = data.reshape((data.shape[0] * data.shape[1], 1, 1))
elif data.ndim == 3:
if data.shape[0] == 3:
# interpret as a color image
# (1, H, W,3)
if channel_order == 'BGR':
data = data[[2, 1, 0], ...] # BGR to RGB (see issue #59)
data = data.transpose(1, 2, 0)
data = data[np.newaxis, ...]
else:
# interpret as grayscale images
# (N, H, W)
pass
elif data.ndim == 4:
if data.shape[0] == 3:
# interpret as HxW color images
# (N, H, W, 3)
data = data.transpose(1, 2, 3, 0)
if channel_order == 'BGR':
data = data[:, :, :, [2, 1, 0]] # BGR to RGB (see issue #59)
elif data.shape[1] == 3:
# interpret as HxW color images
# (N, H, W, 3)
data = data.transpose(0, 2, 3, 1)
if channel_order == 'BGR':
data = data[:, :, :, [2, 1, 0]] # BGR to RGB (see issue #59)
else:
# interpret as HxW grayscale images
# (N, H, W)
data = data.reshape((data.shape[0] * data.shape[1], data.shape[2], data.shape[3]))
else:
raise RuntimeError('unrecognized data shape: %s' % (data.shape,))
return get_layer_vis_square_raw(data,
allow_heatmap,
normalize,
min_img_dim,
max_width,
colormap,
)
|
def get_layer_vis_square(data,
allow_heatmap=True,
normalize=True,
min_img_dim=100,
max_width=1200,
channel_order='RGB',
colormap='jet',
)
|
Returns a vis_square for the given layer data
Arguments:
data -- a np.ndarray
Keyword arguments:
allow_heatmap -- if True, convert single channel images to heatmaps
normalize -- whether to normalize the data when visualizing
max_width -- maximum width for the vis_square
| 1.757838
| 1.795011
| 0.979291
|
if os.getenv('AETROS_GIT_INDEX_FILE'):
self.index_path = os.getenv('AETROS_GIT_INDEX_FILE')
return
import tempfile
h, path = tempfile.mkstemp('aetros-git', '', self.temp_path)
self.index_path = path
# we give git a unique file path for that index. However, git expect it to be non-existent for empty indexes.
# empty file would lead to "fatal: index file smaller than expected"
os.close(h)
os.unlink(self.index_path)
self.logger.debug('GIT_INDEX_FILE created at ' + self.index_path)
|
def prepare_index_file(self)
|
Makes sure that GIT index file we use per job (by modifying environment variable GIT_INDEX_FILE)
is not locked and empty. Git.fetch_job uses `git read-tree` to updates this index. For new jobs, we start
with an empty index - that's why we delete it every time.
| 6.221183
| 5.424818
| 1.1468
|
self.job_id = job_id
self.logger.debug("Git fetch job reference %s" % (self.ref_head, ))
out, code, err = self.command_exec(['ls-remote', 'origin', self.ref_head])
if code:
self.logger.error('Could not find the job ' + job_id + ' on the server. Are you online and does the job exist?')
sys.exit(1)
try:
self.command_exec(['fetch', '-f', '-n', 'origin', self.ref_head+':'+self.ref_head])
except Exception:
self.logger.error("Could not load job information for " + job_id + '. You need to be online to start pre-configured jobs.')
raise
self.read_job(job_id, checkout)
|
def fetch_job(self, job_id, checkout=False)
|
Fetch the current job reference (refs/aetros/job/<id>) from origin and (when checkout=True)read its tree to
the current git index and checkout into working director.
| 4.98996
| 4.501575
| 1.108492
|
self.job_id = job_id
commit = self.get_head_commit()
self.logger.debug('Job ref points to ' + commit)
self.command_exec(['read-tree', self.ref_head])
if checkout:
self.logger.debug('Working directory in ' + self.work_tree)
# make sure we have checked out all files we have added until now. Important for simple models,
# so we have the actual model.py and dataset scripts.
if os.path.exists(self.work_tree):
shutil.rmtree(self.work_tree)
os.makedirs(self.work_tree)
# make the working tree reflect exactly the tree of ref_head.
# since we removed the dir before, we have exactly the tree of the reference
# '--', '.' is important to not update HEAD
self.command_exec(['--work-tree', self.work_tree, 'checkout', self.ref_head, '--', '.'])
|
def read_job(self, job_id, checkout=False)
|
Reads head and reads the tree into index,
and checkout the work-tree when checkout=True.
This does not fetch the job from the actual server. It needs to be in the local git already.
| 7.698386
| 7.080559
| 1.087257
|
self.add_file('aetros/job.json', simplejson.dumps(data, indent=4))
tree_id = self.write_tree()
self.job_id = self.command_exec(['commit-tree', '-m', "JOB_CREATED", tree_id])[0].decode('utf-8').strip()
out, code, err = self.command_exec(['show-ref', self.ref_head], allowed_to_fail=True)
if not code:
self.logger.warning("Generated job id already exists, because exact same experiment values given. Ref " + self.ref_head)
self.command_exec(['update-ref', self.ref_head, self.job_id])
# make sure we have checkedout all files we have added until now. Important for simple models, so we have the
# actual model.py and dataset scripts.
if not os.path.exists(self.work_tree):
os.makedirs(self.work_tree)
# updates index and working tree
# '--', '.' is important to not update HEAD
self.command_exec(['--work-tree', self.work_tree, 'checkout', self.ref_head, '--', '.'])
# every caller needs to make sure to call git.push
return self.job_id
|
def create_job_id(self, data)
|
Create a new job id and reference (refs/aetros/job/<id>) by creating a new commit with empty tree. That
root commit is the actual job id. A reference is then created to the newest (head) commit of this commit history.
The reference will always be updated once a new commit is added.
| 7.883118
| 6.900421
| 1.142411
|
self.active_thread = True
self.active_push = True
self.thread_push_instance = Thread(target=self.thread_push)
self.thread_push_instance.daemon = True
self.thread_push_instance.start()
|
def start_push_sync(self)
|
Starts the detection of unsynced Git data.
| 3.107989
| 2.89864
| 1.072223
|
self.active_thread = False
if self.thread_push_instance and self.thread_push_instance.isAlive():
self.thread_push_instance.join()
with self.batch_commit('STREAM_END'):
for path, handle in six.iteritems(self.streamed_files.copy()):
# open again and read full content
full_path = os.path.normpath(self.temp_path + '/stream-blob/' + self.job_id + '/' + path)
self.logger.debug('Git stream end for file: ' + full_path)
del self.streamed_files[path]
# make sure its written to the disk
try:
self.stream_files_lock.acquire()
if not handle.closed:
handle.flush()
handle.close()
finally:
self.stream_files_lock.release()
with open(full_path, 'r') as f:
self.commit_file(path, path, f.read())
if not self.keep_stream_files:
os.unlink(full_path)
with self.batch_commit('STORE_END'):
for path, bar in six.iteritems(self.store_files.copy()):
full_path = os.path.normpath(self.temp_path + '/store-blob/' + self.job_id + '/' + path)
self.logger.debug('Git store end for file: ' + full_path)
del self.store_files[path]
try:
self.stream_files_lock.acquire()
self.commit_file(path, path, open(full_path, 'r').read())
finally:
self.stream_files_lock.release()
if not self.keep_stream_files:
os.unlink(full_path)
|
def stop(self)
|
Stops the `git push` thread and commits all streamed files (Git.store_file and Git.stream_file), followed
by a final git push.
You can not start the process again.
| 2.914707
| 2.682006
| 1.086764
|
class controlled_execution:
def __init__(self, git, message):
self.git = git
self.message = message
def __enter__(self):
self.git.git_batch_commit = True
if self.git.job_id:
# make sure we're always on the tip tree
self.git.read_tree(self.git.ref_head)
def __exit__(self, type, value, traceback):
self.git.git_batch_commit = False
# if nothing committed, we return early
if not self.git.git_batch_commit_messages: return
commit_message = self.message
if self.git.git_batch_commit_messages:
commit_message = commit_message + "\n\n" + "\n".join(self.git.git_batch_commit_messages)
self.git.git_batch_commit_messages = []
self.git.commit_index(commit_message)
return controlled_execution(self, message)
|
def batch_commit(self, message)
|
Instead of committing a lot of small commits you can batch it together using this controller.
Example:
with git.batch_commit('BATCHED'):
git.commit_file('my commit 1', 'path/to/file', 'content from file')
git.commit_json_file('[1, 2, 3]', 'path/to/file2', 'json array')
Withing the `with` block you can use group the method calls of `commit_file` and `commit_json_file`, and every other
method calling this two methods.
:type message: str
:return: with controller to be used with Python's `with git.batch_commit():`
| 2.962157
| 2.923125
| 1.013353
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.