code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
self.coverage_lines.append('DA:{},0'.format(rule.source_line + offset)) | def record_coverage_zero(self, rule, offset) | Add entry to coverage saying this selector was parsed | 17.54384 | 18.437784 | 0.951516 |
log(DEBUG, u'Rule ({}): {}'.format(*rule).encode('utf-8'))
self.coverage_lines.append('DA:{},1'.format(rule[0])) | def record_coverage(self, rule) | Add entry to coverage saying this selector was matched | 12.303026 | 11.87431 | 1.036105 |
actions = self.state[self.state['current_step']]['actions']
if len(actions) > 0 and actions[-1][0] == 'target':
actions.pop()
actions.append(('target', Target(element.etree_element,
pseudo, element.parent.etree_element
))) | def push_target_elem(self, element, pseudo=None) | Place target element onto action stack. | 5.247975 | 4.608509 | 1.138758 |
self.push_target_elem(element, pseudo)
elem = etree.Element('div')
actions = self.state[self.state['current_step']]['actions']
actions.append(('move', elem))
actions.append(('target', Target(elem))) | def push_pending_elem(self, element, pseudo) | Create and place pending target element onto stack. | 7.469721 | 6.582369 | 1.134807 |
actions = self.state[self.state['current_step']]['actions']
elem = self.current_target().tree
if actions[-1][0] == ('target') and actions[-1][1].tree == elem:
actions.pop()
actions.pop()
actions.pop() | def pop_pending_if_empty(self, element) | Remove empty wrapper element. | 8.258729 | 7.80142 | 1.058619 |
actions = self.state[self.state['current_step']]['actions']
for action, value in reversed(actions):
if action == 'target':
return value | def current_target(self) | Return current target. | 7.38761 | 6.97427 | 1.059266 |
name = decl.name
method = None
try:
method = getattr(self, u'do_{}'.format(
(name).replace('-', '_')))
except AttributeError:
if name.startswith('data-'):
method = getattr(self, 'do_data_any')
elif name.startswith('attr-'):
method = getattr(self, 'do_attr_any')
else:
log(WARN, u'Missing method {}'.format(
(name).replace('-', '_')).encode('utf-8'))
if method:
self.record_coverage_line(decl.source_line)
return method
else:
return lambda x, y, z: None | def find_method(self, decl) | Find class method to call for declaration based on name. | 4.343029 | 4.090934 | 1.061623 |
nullvals = {'strings': '', 'counters': 0, 'pending': (None, None)}
nullval = nullvals[vtype]
vstyle = None
if vtype == 'counters':
if len(vname) > 1:
vname, vstyle = vname
else:
vname = vname[0]
if target_id is not None:
try:
state = self.state[vtype][target_id]
steps = self.state[vtype][target_id].keys()
except KeyError:
log(WARN, u'Bad ID target lookup {}'.format(
target_id).encode('utf-8'))
return nullval
else:
state = self.state
steps = self.state['scope']
for step in steps:
if vname in state[step][vtype]:
if vtype == 'pending':
return(state[step][vtype][vname], step)
else:
val = state[step][vtype][vname]
if vstyle is not None:
return self.counter_style(val, vstyle)
return val
else:
return nullval | def lookup(self, vtype, vname, target_id=None) | Return value of vname from the variable store vtype.
Valid vtypes are `strings` 'counters', and `pending`. If the value
is not found in the current steps store, earlier steps will be
checked. If not found, '', 0, or (None, None) is returned. | 3.572495 | 3.11536 | 1.146736 |
if style == 'decimal-leading-zero':
if val < 10:
valstr = "0{}".format(val)
else:
valstr = str(val)
elif style == 'lower-roman':
valstr = _to_roman(val).lower()
elif style == 'upper-roman':
valstr = _to_roman(val)
elif style == 'lower-latin' or style == 'lower-alpha':
if 1 <= val <= 26:
valstr = chr(val + 96)
else:
log(WARN, 'Counter out of range for latin (must be 1...26)')
valstr = str(val)
elif style == 'upper-latin' or style == 'upper-alpha':
if 1 <= val <= 26:
valstr = chr(val + 64)
else:
log(WARN, 'Counter out of range for latin (must be 1...26)')
valstr = str(val)
elif style == 'decimal':
valstr = str(val)
else:
log(WARN, u"ERROR: Counter numbering not supported for"
u" list type {}. Using decimal.".format(
style).encode('utf-8'))
valstr = str(val)
return valstr | def counter_style(self, val, style) | Return counter value in given style. | 2.34311 | 2.273964 | 1.030408 |
strval = ''
vals = []
for term in value:
if type(term) is ast.WhitespaceToken:
pass
elif type(term) is ast.StringToken:
strval += term.value
elif type(term) is ast.IdentToken:
log(DEBUG, u"IdentToken as string: {}".format(
term.value).encode('utf-8'))
strval += term.value
elif type(term) is ast.LiteralToken:
log(DEBUG, u"LiteralToken as string: {}".format(
term.value).encode('utf-8'))
strval += term.value
elif type(term) is ast.FunctionBlock:
if term.name == 'string':
str_args = split(term.arguments, ',')
str_name = self.eval_string_value(element,
str_args[0])[0]
val = self.lookup('strings', str_name)
if val == '':
if len(str_args) > 1:
val = self.eval_string_value(element,
str_args[1])[0]
else:
log(WARN, u"{} blank string"
.format(str_name).encode('utf-8'))
strval += val
elif term.name == u'attr':
att_args = split(term.arguments, ',')
att_name = self.eval_string_value(element,
att_args[0])[0]
att_def = ''
if len(att_args) > 1:
att_def = self.eval_string_value(element,
att_args[1])[0]
if '|' in att_name:
ns, att = att_name.split('|')
try:
ns = self.css_namespaces[ns]
except KeyError:
log(WARN, u"Undefined namespace prefix {}"
.format(ns).encode('utf-8'))
continue
att_name = etree.QName(ns, att)
strval += element.etree_element.get(att_name, att_def)
elif term.name == u'uuid':
strval += self.generate_id()
elif term.name == u'content':
strval += etree.tostring(element.etree_element,
encoding='unicode',
method='text',
with_tail=False)
elif term.name.startswith('target-'):
if strval:
vals.append(strval)
strval = ''
target_args = split(term.arguments, ',')
vref = self.eval_string_value(element,
target_args[0])[0]
vname = self.eval_string_value(element,
target_args[1])[0]
vtype = term.name[7:]+'s'
vals.append(TargetVal(self, vref[1:], vname, vtype))
elif term.name == u'first-letter':
tmpstr = self.eval_string_value(element, term.arguments)
if tmpstr:
if isinstance(tmpstr[0], basestring):
strval += tmpstr[0][0]
else:
log(WARN, u"Bad string value:"
u" nested target-* not allowed. "
u"{}".format(
serialize(value)).encode(
'utf-8'))
# FIXME can we do delayed first-letter
elif term.name == 'counter':
counterargs = [serialize(t).strip(" \'")
for t in split(term.arguments, ',')]
count = self.lookup('counters', counterargs)
strval += str(count)
elif term.name == u'pending':
log(WARN, u"Bad string value: pending() not allowed. "
u"{}".format(serialize(value)).encode(
'utf-8'))
else:
log(WARN, u"Bad string value: unknown function: {}. "
u"{}".format(term.name, serialize(value)).encode(
'utf-8'))
if strval:
vals.append(strval)
return vals | def eval_string_value(self, element, value) | Evaluate parsed string.
Returns a list of current and delayed values. | 2.768438 | 2.742868 | 1.009322 |
args = serialize(decl.value)
step = self.state[self.state['current_step']]
strval = ''
strname = None
for term in decl.value:
if type(term) is ast.WhitespaceToken:
continue
elif type(term) is ast.StringToken:
if strname is not None:
strval += term.value
else:
log(WARN, u"Bad string-set: {}".format(
args).encode('utf-8'))
elif type(term) is ast.IdentToken:
if strname is not None:
log(WARN, u"Bad string-set: {}".format(
args).encode('utf-8'))
else:
strname = term.value
elif type(term) is ast.LiteralToken:
if strname is None:
log(WARN, u"Bad string-set: {}".format(
args).encode('utf-8'))
else:
step['strings'][strname] = strval
strval = ''
strname = None
elif type(term) is ast.FunctionBlock:
if term.name == 'string':
str_args = split(term.arguments, ',')
str_name = self.eval_string_value(element,
str_args[0])[0]
val = self.lookup('strings', str_name)
if val == '':
if len(str_args) > 1:
val = self.eval_string_value(element,
str_args[1])[0]
else:
log(WARN, u"{} blank string"
.format(str_name).encode('utf-8'))
if strname is not None:
strval += val
else:
log(WARN, u"Bad string-set: {}".format(
args).encode('utf-8'))
elif term.name == 'counter':
counterargs = [serialize(t).strip(" \'")
for t in split(term.arguments, ',')]
count = self.lookup('counters', counterargs)
strval += str(count)
elif term.name == u'attr':
if strname is not None:
att_args = split(term.arguments, ',')
att_name = self.eval_string_value(element,
att_args[0])[0]
att_def = ''
if len(att_args) > 1:
att_def = self.eval_string_value(element,
att_args[1])[0]
if '|' in att_name:
ns, att = att_name.split('|')
try:
ns = self.css_namespaces[ns]
except KeyError:
log(WARN, u"Undefined namespace prefix {}"
.format(ns).encode('utf-8'))
continue
att_name = etree.QName(ns, att)
strval += element.etree_element.get(att_name, att_def)
else:
log(WARN, u"Bad string-set: {}".format(
args).encode('utf-8'))
elif term.name == u'content':
if strname is not None:
strval += etree.tostring(element.etree_element,
encoding='unicode',
method='text',
with_tail=False)
else:
log(WARN, u"Bad string-set: {}".format(
args).encode('utf-8'))
elif term.name == u'first-letter':
tmpstr = self.eval_string_value(element, term.arguments)
if tmpstr:
if isinstance(tmpstr[0], basestring):
strval += tmpstr[0][0]
else:
log(WARN, u"Bad string value:"
u" nested target-* not allowed. "
u"{}".format(serialize(
args)).encode('utf-8'))
elif term.name == u'pending':
log(WARN, u"Bad string-set:pending() not allowed. {}"
.format(args).encode('utf-8'))
if strname is not None:
step['strings'][strname] = strval | def do_string_set(self, element, decl, pseudo) | Implement string-set declaration. | 2.478369 | 2.460137 | 1.007411 |
step = self.state[self.state['current_step']]
counter_name = ''
for term in decl.value:
if type(term) is ast.WhitespaceToken:
continue
elif type(term) is ast.IdentToken:
if counter_name:
step['counters'][counter_name] = 0
counter_name = term.value
elif type(term) is ast.LiteralToken:
if counter_name:
step['counters'][counter_name] = 0
counter_name = ''
elif type(term) is ast.NumberToken:
if counter_name:
step['counters'][counter_name] = int(term.value)
counter_name = ''
else:
log(WARN, u"Unrecognized counter-reset term {}"
.format(type(term)).encode('utf-8'))
if counter_name:
step['counters'][counter_name] = 0 | def do_counter_reset(self, element, decl, pseudo) | Clear specified counters. | 2.599385 | 2.49345 | 1.042485 |
target = serialize(decl.value).strip()
step = self.state[self.state['current_step']]
elem = self.current_target().tree
_, valstep = self.lookup('pending', target)
if not valstep:
step['pending'][target] = [('nodeset', elem)]
else:
self.state[valstep]['pending'][target] = [('nodeset', elem)] | def do_node_set(self, element, decl, pseudo) | Implement node-set declaration. | 8.717621 | 8.288582 | 1.051763 |
target = serialize(decl.value).strip()
step = self.state[self.state['current_step']]
elem = self.current_target().tree
# Find if the current node already has a move, and remove it.
actions = step['actions']
for pos, action in enumerate(reversed(actions)):
if action[0] == 'move' and action[1] == elem:
target_index = - pos - 1
actions[target_index:] = actions[target_index+1:]
break
_, valstep = self.lookup('pending', target)
if not valstep:
step['pending'][target] = [('move', elem)]
else:
self.state[valstep]['pending'][target].append(('move', elem)) | def do_move_to(self, element, decl, pseudo) | Implement move-to declaration. | 6.232069 | 6.109485 | 1.020065 |
value = serialize(decl.value).strip()
if '|' in value:
namespace, tag = value.split('|', 1)
try:
namespace = self.css_namespaces[namespace]
except KeyError:
log(WARN, u'undefined namespace prefix: {}'.format(
namespace).encode('utf-8'))
value = tag
else:
value = etree.QName(namespace, tag)
step = self.state[self.state['current_step']]
actions = step['actions']
actions.append(('tag', value)) | def do_container(self, element, decl, pseudo) | Implement setting tag for new wrapper element. | 5.46315 | 5.304971 | 1.029817 |
step = self.state[self.state['current_step']]
actions = step['actions']
strval = self.eval_string_value(element, decl.value)
actions.append(('attrib', ('class', strval))) | def do_class(self, element, decl, pseudo) | Implement class declaration - pre-match. | 8.744608 | 7.94013 | 1.101318 |
step = self.state[self.state['current_step']]
actions = step['actions']
strval = self.eval_string_value(element, decl.value)
actions.append(('attrib', (decl.name[5:], strval))) | def do_attr_any(self, element, decl, pseudo) | Implement generic attribute setting. | 8.501488 | 7.324996 | 1.160613 |
sort_css = groupby_css = flags = ''
if ',' in decl.value:
if decl.value.count(',') == 2:
sort_css, groupby_css, flags = \
map(serialize, split(decl.value, ','))
else:
sort_css, groupby_css = map(serialize, split(decl.value, ','))
else:
sort_css = serialize(decl.value)
if groupby_css.strip() == 'nocase':
flags = groupby_css
groupby_css = ''
sort = css_to_func(sort_css, flags,
self.css_namespaces, self.state['lang'])
groupby = css_to_func(groupby_css, flags,
self.css_namespaces, self.state['lang'])
step = self.state[self.state['current_step']]
target = self.current_target()
target.sort = sort
target.lang = self.state['lang']
target.isgroup = True
target.groupby = groupby
# Find current target, set its sort/grouping as well
for pos, action in \
enumerate(reversed(step['actions'])):
if action[0] == 'target' and \
action[1].tree == element.etree_element:
action[1].sort = sort
action[1].isgroup = True
action[1].groupby = groupby
break | def do_group_by(self, element, decl, pseudo) | Implement group-by declaration - pre-match. | 4.009039 | 4.001901 | 1.001784 |
if ',' in decl.value:
css, flags = split(decl.value, ',')
else:
css = decl.value
flags = None
sort = css_to_func(serialize(css), serialize(flags or ''),
self.css_namespaces, self.state['lang'])
step = self.state[self.state['current_step']]
target = self.current_target()
target.sort = sort
target.lang = self.state['lang']
target.isgroup = False
target.groupby = None
# Find current target, set its sort as well
for pos, action in \
enumerate(reversed(step['actions'])):
if action[0] == 'target' and \
action[1].tree == element.etree_element:
action[1].sort = sort
action[1].isgroup = False
action[1].groupby = None
break | def do_sort_by(self, element, decl, pseudo) | Implement sort-by declaration - pre-match. | 6.237548 | 6.062825 | 1.028819 |
log(WARN, u"Old-style pass as declaration not allowed.{}"
.format(decl.value).encpde('utf-8')) | def do_pass(self, element, decl, pseudo) | No longer valid way to set processing pass. | 53.840004 | 44.872074 | 1.199855 |
cache = dict()
lock = RLock()
@wraps(func)
def func_wrapper(host: str, username: str, *args, **kwargs):
key = "{h}-{u}".format(h=host, u=username)
if key in cache:
# connection exists, check if it is still valid before
# returning it.
conn = cache[key]
if conn and conn.is_active() and conn.is_authenticated():
return conn
else:
# try to close a bad connection and remove it from
# the cache.
if conn:
try_close(conn)
del cache[key]
# key is not in the cache, so try to recreate it
# it may have been removed just above.
if key not in cache:
conn = func(host, username, *args, **kwargs)
if conn is not None:
cache[key] = conn
return conn
# not sure how to reach this point, but just in case.
return None
def get_cache() -> dict:
return cache
def purge(key: str=None):
with lock:
if key is None:
conns = [(k, v) for k, v in cache.items()]
elif key in cache:
conns = ((key, cache[key]), )
else:
conns = list()
for k, v in conns:
try_close(v)
del cache[k]
func_wrapper.get_cache = get_cache
func_wrapper.purge = purge
return func_wrapper | def connection_cache(func: callable) | Connection cache for SSH sessions. This is to prevent opening a
new, expensive connection on every command run. | 2.833171 | 2.745648 | 1.031877 |
if not username:
username = shakedown.cli.ssh_user
if not key_path:
key_path = shakedown.cli.ssh_key_file
key = validate_key(key_path)
transport = get_transport(host, username, key)
if transport:
transport = start_transport(transport, username, key)
if transport.is_authenticated():
return transport
else:
print("error: unable to authenticate {}@{} with key {}".format(username, host, key_path))
else:
print("error: unable to connect to {}".format(host))
return None | def _get_connection(host, username: str, key_path: str) \
-> paramiko.Transport or None | Return an authenticated SSH connection.
:param host: host or IP of the machine
:type host: str
:param username: SSH username
:type username: str
:param key_path: path to the SSH private key for SSH auth
:type key_path: str
:return: SSH connection
:rtype: paramiko.Transport or None | 2.995142 | 3.148138 | 0.951401 |
with HostSession(host, username, key_path, noisy) as s:
if noisy:
print("\n{}{} $ {}\n".format(shakedown.fchr('>>'), host, command))
s.run(command)
ec, output = s.get_result()
return ec == 0, output | def run_command(
host,
command,
username=None,
key_path=None,
noisy=True
) | Run a command via SSH, proxied through the mesos master
:param host: host or IP of the machine to execute the command on
:type host: str
:param command: the command to execute
:type command: str
:param username: SSH username
:type username: str
:param key_path: path to the SSH private key to use for SSH authentication
:type key_path: str
:return: True if successful, False otherwise
:rtype: bool
:return: Output of command
:rtype: string | 8.130809 | 11.566982 | 0.702933 |
return run_command(shakedown.master_ip(), command, username, key_path, noisy) | def run_command_on_master(
command,
username=None,
key_path=None,
noisy=True
) | Run a command on the Mesos master | 5.597495 | 5.707703 | 0.980691 |
return run_command(shakedown.master_leader_ip(), command, username, key_path, noisy) | def run_command_on_leader(
command,
username=None,
key_path=None,
noisy=True
) | Run a command on the Mesos leader. Important for Multi-Master. | 6.858814 | 7.108848 | 0.964828 |
return run_command(shakedown.marathon_leader_ip(), command, username, key_path, noisy) | def run_command_on_marathon_leader(
command,
username=None,
key_path=None,
noisy=True
) | Run a command on the Marathon leader | 4.709929 | 5.008065 | 0.940469 |
return run_command(host, command, username, key_path, noisy) | def run_command_on_agent(
host,
command,
username=None,
key_path=None,
noisy=True
) | Run a command on a Mesos agent, proxied through the master | 3.387763 | 4.915148 | 0.689249 |
call = shlex.split(command)
call.insert(0, 'dcos')
print("\n{}{}\n".format(shakedown.fchr('>>'), ' '.join(call)))
proc = subprocess.Popen(call, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = proc.communicate()
return_code = proc.wait()
stdout = output.decode('utf-8')
stderr = error.decode('utf-8')
if print_output:
print(stdout, stderr, return_code)
if return_code != 0 and raise_on_error:
raise DCOSException(
'Got error code {} when running command "dcos {}":\nstdout: "{}"\nstderr: "{}"'.format(
return_code, command, stdout, stderr))
return stdout, stderr, return_code | def run_dcos_command(command, raise_on_error=False, print_output=True) | Run `dcos {command}` via DC/OS CLI
:param command: the command to execute
:type command: str
:param raise_on_error: whether to raise a DCOSException if the return code is nonzero
:type raise_on_error: bool
:param print_output: whether to print the resulting stdout/stderr from running the command
:type print_output: bool
:return: (stdout, stderr, return_code)
:rtype: tuple | 2.792755 | 2.778799 | 1.005022 |
while True:
time.sleep(0.2)
if self.session.recv_ready() or self.session.closed:
return | def _wait_for_recv(self) | After executing a command, wait for results.
Because `recv_ready()` can return False, but still have a
valid, open connection, it is not enough to ensure output
from a command execution is properly captured.
:return: None | 4.754651 | 5.075216 | 0.936837 |
with shakedown.stdchannel_redirected(sys.stderr, os.devnull):
clusters = [c.dict() for c in dcos.cluster.get_clusters()]
for c in clusters:
if url == c['url']:
try:
dcos.cluster.set_attached(dcos.cluster.get_cluster(c['name']).get_cluster_path())
return True
except:
return False
return False | def attach_cluster(url) | Attach to an already set-up cluster
:return: True if successful, else False | 4.911439 | 4.812373 | 1.020586 |
url = _gen_url('dcos-metadata/dcos-version.json')
response = dcos.http.request('get', url)
if response.status_code == 200:
return response.json()['version']
else:
return None | def dcos_version() | Return the version of the running cluster.
:return: DC/OS cluster version as a string | 3.507412 | 3.955061 | 0.886816 |
url = _gen_url('acs/api/v1/auth/login')
creds = {
'uid': username,
'password': password
}
response = dcos.http.request('post', url, json=creds)
if response.status_code == 200:
return response.json()['token']
else:
return None | def authenticate(username, password) | Authenticate with a DC/OS cluster and return an ACS token.
return: ACS token | 3.569048 | 3.336891 | 1.069573 |
url = _gen_url('acs/api/v1/auth/login')
creds = {
'token': oauth_token
}
response = dcos.http.request('post', url, json=creds)
if response.status_code == 200:
return response.json()['token']
else:
return None | def authenticate_oauth(oauth_token) | Authenticate by checking for a valid OAuth token.
return: ACS token | 3.975642 | 3.921599 | 1.013781 |
from six.moves import urllib
return urllib.parse.urljoin(dcos_url(), url_path) | def _gen_url(url_path) | Return an absolute URL by combining DC/OS URL and url_path.
:param url_path: path to append to DC/OS URL
:type url_path: str
:return: absolute URL
:rtype: str | 6.460515 | 5.171762 | 1.24919 |
echo('Partitioning master. Incoming:{} | Outgoing:{}'.format(incoming, outgoing))
network.save_iptables(shakedown.master_ip())
network.flush_all_rules(shakedown.master_ip())
network.allow_all_traffic(shakedown.master_ip())
if incoming and outgoing:
network.run_iptables(shakedown.master_ip(), DISABLE_MASTER_INCOMING)
network.run_iptables(shakedown.master_ip(), DISABLE_MASTER_OUTGOING)
elif incoming:
network.run_iptables(shakedown.master_ip(), DISABLE_MASTER_INCOMING)
elif outgoing:
network.run_iptables(shakedown.master_ip(), DISABLE_MASTER_OUTGOING)
else:
pass | def partition_master(incoming=True, outgoing=True) | Partition master's port alone. To keep DC/OS cluster running.
:param incoming: Partition incoming traffic to master process. Default True.
:param outgoing: Partition outgoing traffic from master process. Default True. | 2.642918 | 2.651555 | 0.996743 |
masters = []
for master in __master_zk_nodes_keys():
master_zk_str = get_zk_node_data(master)['str']
masters.append(json.loads(master_zk_str))
return masters | def get_all_masters() | Returns the json object that represents each of the masters. | 5.349031 | 5.059746 | 1.057174 |
public_ip_list = []
agents = get_public_agents()
for agent in agents:
status, public_ip = shakedown.run_command_on_agent(agent, "/opt/mesosphere/bin/detect_ip_public")
public_ip_list.append(public_ip)
return public_ip_list | def get_public_agents_public_ip() | Provides a list public IPs for public agents in the cluster | 3.169429 | 3.016459 | 1.050712 |
agent_list = []
agents = __get_all_agents()
for agent in agents:
for reservation in agent["reserved_resources"]:
if "slave_public" in reservation:
agent_list.append(agent["hostname"])
return agent_list | def get_public_agents() | Provides a list of hostnames / private IPs that are public agents in the cluster | 4.54357 | 3.816317 | 1.190564 |
agent_list = []
agents = __get_all_agents()
for agent in agents:
if(len(agent["reserved_resources"]) == 0):
agent_list.append(agent["hostname"])
else:
private = True
for reservation in agent["reserved_resources"]:
if("slave_public" in reservation):
private = False
if(private):
agent_list.append(agent["hostname"])
return agent_list | def get_private_agents() | Provides a list of hostnames / IPs that are private agents in the cluster | 3.124599 | 2.837039 | 1.101359 |
agent_list = []
agents = __get_all_agents()
for agent in agents:
agent_list.append(agent["hostname"])
return agent_list | def get_agents() | Provides a list of hostnames / IPs of all agents in the cluster | 4.07109 | 2.952685 | 1.378776 |
network.save_iptables(host)
network.flush_all_rules(host)
network.allow_all_traffic(host)
network.run_iptables(host, ALLOW_SSH)
network.run_iptables(host, ALLOW_PING)
network.run_iptables(host, DISALLOW_MESOS)
network.run_iptables(host, DISALLOW_INPUT) | def partition_agent(host) | Partition a node from all network traffic except for SSH and loopback
:param hostname: host or IP of the machine to partition from the cluster | 4.077212 | 4.600371 | 0.886279 |
status, stdout = run_command_on_agent(hostname, "ps aux | grep -v grep | grep '{}'".format(pattern))
pids = [p.strip().split()[1] for p in stdout.splitlines()]
for pid in pids:
status, stdout = run_command_on_agent(hostname, "sudo kill -9 {}".format(pid))
if status:
print("Killed pid: {}".format(pid))
else:
print("Unable to killed pid: {}".format(pid)) | def kill_process_on_host(
hostname,
pattern
) | Kill the process matching pattern at ip
:param hostname: the hostname or ip address of the host on which the process will be killed
:param pattern: a regular expression matching the name of the process to kill | 2.572583 | 2.82271 | 0.911388 |
status, pid = run_command_on_agent(hostname, 'cat {}'.format(pid_file))
status, stdout = run_command_on_agent(hostname, "sudo kill -9 {}".format(pid))
if status:
print("Killed pid: {}".format(pid))
run_command_on_agent(hostname, 'rm {}'.format(pid_file))
else:
print("Unable to killed pid: {}".format(pid)) | def kill_process_from_pid_file_on_host(hostname, pid_file='app.pid') | Retrieves the PID of a process from a pid file on host and kills it.
:param hostname: the hostname or ip address of the host on which the process will be killed
:param pid_file: pid file to use holding the pid number to kill | 2.629039 | 2.965863 | 0.886433 |
count = 0
start_time = time_module.time()
timeout = Deadline.create_deadline(timeout_seconds)
while True:
try:
result = predicate()
except Exception as e:
if ignore_exceptions:
if noisy:
logger.exception("Ignoring error during wait.")
else:
count = 0
raise # preserve original stack
else:
if (not inverse_predicate and result) or (inverse_predicate and not result):
count = count + 1
if count >= required_consecutive_success_count:
return result
if timeout.is_expired():
funname = __stringify_predicate(predicate)
raise TimeoutExpired(timeout_seconds, funname)
if noisy:
header = '{}[{}/{}]'.format(
shakedown.cli.helpers.fchr('>>'),
pretty_duration(time_module.time() - start_time),
pretty_duration(timeout_seconds)
)
if required_consecutive_success_count > 1:
header = '{} [{} of {} times]'.format(
header,
count,
required_consecutive_success_count)
print('{} spinning...'.format(header))
time_module.sleep(sleep_seconds) | def wait_for(
predicate,
timeout_seconds=120,
sleep_seconds=1,
ignore_exceptions=True,
inverse_predicate=False,
noisy=False,
required_consecutive_success_count=1) | waits or spins for a predicate, returning the result.
Predicate is a function that returns a truthy or falsy value.
An exception in the function will be returned.
A timeout will throw a TimeoutExpired Exception. | 3.72861 | 3.606665 | 1.033811 |
funname = getsource(predicate).strip().split(' ')[2].rstrip(',')
params = 'None'
# if args dig in the stack
if '()' not in funname:
stack = getouterframes(currentframe())
for frame in range(0, len(stack)):
if funname in str(stack[frame]):
_, _, _, params = getargvalues(stack[frame][0])
return "function: {} params: {}".format(funname, params) | def __stringify_predicate(predicate) | Reflection of function name and parameters of the predicate being used. | 7.672884 | 6.579829 | 1.166122 |
start = time_module.time()
wait_for(predicate, timeout_seconds, sleep_seconds, ignore_exceptions, inverse_predicate, noisy, required_consecutive_success_count)
return elapse_time(start) | def time_wait(
predicate,
timeout_seconds=120,
sleep_seconds=1,
ignore_exceptions=True,
inverse_predicate=False,
noisy=True,
required_consecutive_success_count=1) | waits or spins for a predicate and returns the time of the wait.
An exception in the function will be returned.
A timeout will throw a TimeoutExpired Exception. | 3.522165 | 3.10068 | 1.135933 |
start_time = time_module.time()
timeout = Deadline.create_deadline(timeout_seconds)
while True:
try:
result = predicate()
return result
except Exception as e:
if noisy:
logger.exception("Ignoring error during wait.")
if timeout.is_expired():
funname = __stringify_predicate(predicate)
raise TimeoutExpired(timeout_seconds, funname)
if noisy:
header = '{}[{}/{}]'.format(
shakedown.cli.helpers.fchr('>>'),
pretty_duration(time_module.time() - start_time),
pretty_duration(timeout_seconds)
)
print('{} spinning...'.format(header))
time_module.sleep(sleep_seconds) | def wait_while_exceptions(
predicate,
timeout_seconds=120,
sleep_seconds=1,
noisy=False) | waits for a predicate, ignoring exceptions, returning the result.
Predicate is a function.
Exceptions will trigger the sleep and retry; any non-exception result
will be returned.
A timeout will throw a TimeoutExpired Exception. | 5.225746 | 4.985447 | 1.0482 |
if end is None:
end = time_module.time()
return round(end-start, precision) | def elapse_time(start, end=None, precision=3) | Simple time calculation utility. Given a start time, it will provide an elapse time. | 3.741666 | 3.251898 | 1.15061 |
if seconds is None:
return ''
ret = ''
if seconds >= 86400:
ret += '{:.0f}d'.format(int(seconds / 86400))
seconds = seconds % 86400
if seconds >= 3600:
ret += '{:.0f}h'.format(int(seconds / 3600))
seconds = seconds % 3600
if seconds >= 60:
ret += '{:.0f}m'.format(int(seconds / 60))
seconds = seconds % 60
if seconds > 0:
ret += '{:.1f}s'.format(seconds)
return ret | def pretty_duration(seconds) | Returns a user-friendly representation of the provided duration in seconds.
For example: 62.8 => "1m2.8s", or 129837.8 => "2d12h4m57.8s" | 1.404245 | 1.367745 | 1.026686 |
configfile = os.path.expanduser('~/.shakedown')
if os.path.isfile(configfile):
with open(configfile, 'r') as f:
config = toml.loads(f.read())
for key in config:
param = key.replace('-', '_')
if not param in args or args[param] in [False, None]:
args[param] = config[key]
return args | def read_config(args) | Read configuration options from ~/.shakedown (if exists)
:param args: a dict of arguments
:type args: dict
:return: a dict of arguments
:rtype: dict | 2.81322 | 2.449681 | 1.148403 |
defaults = {
'fail': 'fast',
'stdout': 'fail'
}
for key in defaults:
if not args[key]:
args[key] = defaults[key]
return args | def set_config_defaults(args) | Set configuration defaults
:param args: a dict of arguments
:type args: dict
:return: a dict of arguments
:rtype: dict | 4.812538 | 6.045343 | 0.796074 |
return {
'step-maj': click.style("\n" + '> ' + text, fg='yellow', bold=True),
'step-min': click.style(' - ' + text + ' ', bold=True),
'item-maj': click.style(' - ' + text + ' '),
'item-min': click.style(' - ' + text + ' '),
'quote-head-fail': click.style("\n" + chr(9485) + (chr(9480)*2) + ' ' + text, fg='red'),
'quote-head-pass': click.style("\n" + chr(9485) + (chr(9480)*2) + ' ' + text, fg='green'),
'quote-head-skip': click.style("\n" + chr(9485) + (chr(9480)*2) + ' ' + text, fg='yellow'),
'quote-fail': re.sub('^', click.style(chr(9482) + ' ', fg='red'), text, flags=re.M),
'quote-pass': re.sub('^', click.style(chr(9482) + ' ', fg='green'), text, flags=re.M),
'quote-skip': re.sub('^', click.style(chr(9482) + ' ', fg='yellow'), text, flags=re.M),
'fail': click.style(text + ' ', fg='red'),
'pass': click.style(text + ' ', fg='green'),
'skip': click.style(text + ' ', fg='yellow')
}.get(style, '') | def decorate(text, style) | Console decoration style definitions
:param text: the text string to decorate
:type text: str
:param style: the style used to decorate the string
:type style: str
:return: a decorated string
:rtype: str | 2.135902 | 2.154384 | 0.991421 |
if shakedown.cli.quiet:
return
if not 'n' in kwargs:
kwargs['n'] = True
if 'd' in kwargs:
text = decorate(text, kwargs['d'])
if 'TERM' in os.environ and os.environ['TERM'] == 'velocity':
if text:
print(text, end="", flush=True)
if kwargs.get('n'):
print()
else:
click.echo(text, nl=kwargs.get('n')) | def echo(text, **kwargs) | Print results to the console
:param text: the text string to print
:type text: str
:return: a string
:rtype: str | 4.175744 | 4.340423 | 0.962059 |
try:
desc = uid if desc is None else desc
user_object = {"description": desc, "password": password}
acl_url = urljoin(_acl_url(), 'users/{}'.format(uid))
r = http.put(acl_url, json=user_object)
assert r.status_code == 201
except DCOSHTTPException as e:
# already exists
if e.response.status_code != 409:
raise | def add_user(uid, password, desc=None) | Adds user to the DCOS Enterprise. If not description
is provided the uid will be used for the description.
:param uid: user id
:type uid: str
:param password: password
:type password: str
:param desc: description of user
:type desc: str | 3.851601 | 3.636096 | 1.059268 |
try:
acl_url = urljoin(_acl_url(), 'users/{}'.format(uid))
r = http.get(acl_url)
return r.json()
# assert r.status_code == 201
except DCOSHTTPException as e:
if e.response.status_code == 400:
return None
else:
raise | def get_user(uid) | Returns a user from the DCOS Enterprise. It returns None if none exists.
:param uid: user id
:type uid: str
:return: User
:rtype: dict | 3.686959 | 3.713836 | 0.992763 |
try:
acl_url = urljoin(_acl_url(), 'users/{}'.format(uid))
r = http.delete(acl_url)
assert r.status_code == 204
except DCOSHTTPException as e:
# doesn't exist
if e.response.status_code != 400:
raise | def remove_user(uid) | Removes a user from the DCOS Enterprise.
:param uid: user id
:type uid: str | 3.844565 | 4.05669 | 0.94771 |
try:
acl_url = urljoin(_acl_url(), 'acls/{}'.format(rid))
r = http.put(acl_url, json={'description': 'jope'})
assert r.status_code == 201
except DCOSHTTPException as e:
if e.response.status_code != 409:
raise | def ensure_resource(rid) | Creates or confirms that a resource is added into the DCOS Enterprise System.
Example: dcos:service:marathon:marathon:services:/example-secure
:param rid: resource ID
:type rid: str | 4.592653 | 4.717948 | 0.973443 |
rid = rid.replace('/', '%252F')
# Create ACL if it does not yet exist.
ensure_resource(rid)
# Set the permission triplet.
try:
acl_url = urljoin(_acl_url(), 'acls/{}/users/{}/{}'.format(rid, uid, action))
r = http.put(acl_url)
assert r.status_code == 204
except DCOSHTTPException as e:
if e.response.status_code != 409:
raise | def set_user_permission(rid, uid, action='full') | Sets users permission on a given resource. The resource will be created
if it doesn't exist. Actions are: read, write, update, delete, full.
:param uid: user id
:type uid: str
:param rid: resource ID
:type rid: str
:param action: read, write, update, delete or full
:type action: str | 4.661942 | 5.252592 | 0.887551 |
rid = rid.replace('/', '%252F')
try:
acl_url = urljoin(_acl_url(), 'acls/{}/users/{}/{}'.format(rid, uid, action))
r = http.delete(acl_url)
assert r.status_code == 204
except DCOSHTTPException as e:
if e.response.status_code != 400:
raise | def remove_user_permission(rid, uid, action='full') | Removes user permission on a given resource.
:param uid: user id
:type uid: str
:param rid: resource ID
:type rid: str
:param action: read, write, update, delete or full
:type action: str | 3.670142 | 4.420679 | 0.830221 |
o_token = dcos_acs_token()
dcos.config.set_val('core.dcos_acs_token', '')
yield
dcos.config.set_val('core.dcos_acs_token', o_token) | def no_user() | Provides a context with no logged in user. | 4.351773 | 4.333772 | 1.004154 |
o_token = dcos_acs_token()
shakedown.add_user(user_id, password, user_id)
token = shakedown.authenticate(user_id, password)
dcos.config.set_val('core.dcos_acs_token', token)
yield
dcos.config.set_val('core.dcos_acs_token', o_token)
shakedown.remove_user(user_id) | def new_dcos_user(user_id, password) | Provides a context with a newly created user. | 3.191517 | 3.167892 | 1.007458 |
o_token = dcos_acs_token()
token = shakedown.authenticate(user_id, password)
dcos.config.set_val('core.dcos_acs_token', token)
yield
dcos.config.set_val('core.dcos_acs_token', o_token) | def dcos_user(user_id, password) | Provides a context with user otherthan super | 4.120798 | 4.214866 | 0.977682 |
if not description:
description = id
data = {
'description': description
}
acl_url = urljoin(_acl_url(), 'groups/{}'.format(id))
try:
r = http.put(acl_url, json=data)
assert r.status_code == 201
except DCOSHTTPException as e:
if e.response.status_code != 409:
raise | def add_group(id, description=None) | Adds group to the DCOS Enterprise. If not description
is provided the id will be used for the description.
:param id: group id
:type id: str
:param desc: description of user
:type desc: str | 3.286414 | 3.231758 | 1.016912 |
acl_url = urljoin(_acl_url(), 'groups/{}'.format(id))
try:
r = http.get(acl_url)
return r.json()
except DCOSHTTPException as e:
if e.response.status_code != 400:
raise | def get_group(id) | Returns a group from the DCOS Enterprise. It returns None if none exists.
:param id: group id
:type id: str
:return: Group
:rtype: dict | 3.5346 | 3.861705 | 0.915295 |
acl_url = urljoin(_acl_url(), 'groups/{}'.format(id))
try:
r = http.delete(acl_url)
print(r.status_code)
except DCOSHTTPException as e:
if e.response.status_code != 400:
raise | def remove_group(id) | Removes a group from the DCOS Enterprise. The group is
removed regardless of associated users.
:param id: group id
:type id: str | 3.754571 | 4.411589 | 0.85107 |
acl_url = urljoin(_acl_url(), 'groups/{}/users/{}'.format(gid, uid))
try:
r = http.put(acl_url)
assert r.status_code == 204
except DCOSHTTPException as e:
if e.response.status_code == 409 and exist_ok:
pass
else:
raise | def add_user_to_group(uid, gid, exist_ok=True) | Adds a user to a group within DCOS Enterprise. The group and
user must exist.
:param uid: user id
:type uid: str
:param gid: group id
:type gid: str
:param exist_ok: True if it is ok for the relationship to pre-exist.
:type exist_ok: bool | 2.957715 | 3.227407 | 0.916437 |
acl_url = urljoin(_acl_url(), 'groups/{}/users/{}'.format(gid, uid))
try:
r = http.delete(acl_url)
assert r.status_code == 204
except dcos.errors.DCOSBadRequest:
pass | def remove_user_from_group(uid, gid) | Removes a user from a group within DCOS Enterprise.
:param uid: user id
:type uid: str
:param gid: group id
:type gid: str | 4.343451 | 5.179684 | 0.838555 |
html_doc = etree.parse(html_in)
oven = Oven(css_in, use_repeatable_ids)
oven.bake(html_doc, last_step)
# serialize out HTML
print(etree.tostring(html_doc, method="xml").decode('utf-8'),
file=html_out)
# generate CSS coverage_file file
if coverage_file:
print('SF:{}'.format(css_in.name), file=coverage_file)
print(oven.get_coverage_report(), file=coverage_file)
print('end_of_record', file=coverage_file) | def easybake(css_in, html_in=sys.stdin, html_out=sys.stdout, last_step=None,
coverage_file=None, use_repeatable_ids=False) | Process the given HTML file stream with the css stream. | 3.744205 | 3.883217 | 0.964202 |
parser = argparse.ArgumentParser(description="Process raw HTML to baked"
" (embedded numbering and"
" collation)")
parser.add_argument('-v', '--version', action="version",
version=__version__, help='Report the library version')
parser.add_argument("css_rules",
type=argparse.FileType('rb'),
help="CSS3 ruleset stylesheet recipe")
parser.add_argument("html_in", nargs="?",
type=argparse.FileType('r'),
help="raw HTML file to bake (default stdin)",
default=sys.stdin)
parser.add_argument("html_out", nargs="?",
type=argparse.FileType('w'),
help="baked HTML file output (default stdout)",
default=sys.stdout)
parser.add_argument('-s', '--stop-at', action='store', metavar='<pass>',
help='Stop baking just before given pass name')
parser.add_argument('-d', '--debug', action='store_true',
help='Send debugging info to stderr')
parser.add_argument('-q', '--quiet', action='store_true',
help="Quiet all on stderr except errors")
parser.add_argument('-c', '--coverage-file', metavar='coverage.lcov',
type=FileTypeExt('w'),
help="output coverage file (lcov format). If "
"filename starts with '+', append coverage info.")
parser.add_argument('--use-repeatable-ids', action='store_true',
help="use repeatable id attributes instead of uuids "
"which is useful for diffing")
args = parser.parse_args(argv)
formatter = logging.Formatter('%(name)s %(levelname)s %(message)s')
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(formatter)
logger.addHandler(handler)
use_quiet_log = (args.quiet and logging.ERROR)
use_debug_log = (args.debug and logging.DEBUG)
# Debug option takes higher priority than quiet warnings option
logger.setLevel(use_debug_log or use_quiet_log or logging.WARNING)
try:
easybake(args.css_rules, args.html_in, args.html_out, args.stop_at,
args.coverage_file, args.use_repeatable_ids)
finally:
if args.css_rules:
args.css_rules.close()
if args.html_in:
args.html_in.close()
if args.html_out:
args.html_out.close()
if args.coverage_file:
args.coverage_file.close() | def main(argv=None) | Commandline script wrapping Baker. | 3.272071 | 3.213978 | 1.018075 |
if service_available_predicate(name):
with marathon_on_marathon(name):
return marathon_version()
else:
# We can either skip the corresponding test by returning False
# or raise an exception.
print('WARN: {} MoM not found. Version is None'.format(name))
return None | def mom_version(name='marathon-user') | Returns the version of marathon on marathon. | 10.688791 | 10.062827 | 1.062206 |
if service_available_predicate(name):
return mom_version() < LooseVersion(version)
else:
# We can either skip the corresponding test by returning False
# or raise an exception.
print('WARN: {} MoM not found. mom_version_less_than({}) is False'.format(name, version))
return False | def mom_version_less_than(version, name='marathon-user') | Returns True if MoM with the given {name} exists and has a version less
than {version}. Note that if MoM does not exist False is returned.
:param version: required version
:type: string
:param name: MoM name, default is 'marathon-user'
:type: string
:return: True if version < MoM version
:rtype: bool | 7.989393 | 9.010473 | 0.886679 |
toml_config_o = config.get_config()
dcos_url = config.get_config_val('core.dcos_url', toml_config_o)
service_name = 'service/{}/'.format(name)
marathon_url = urllib.parse.urljoin(dcos_url, service_name)
config.set_val('marathon.url', marathon_url)
try:
yield
finally:
# return config to previous state
config.save(toml_config_o) | def marathon_on_marathon(name='marathon-user') | Context manager for altering the marathon client for MoM
:param name: service name of MoM to use
:type name: str | 4.030641 | 3.872389 | 1.040867 |
if host == shakedown.master_ip():
transport = paramiko.Transport(host)
else:
transport_master = paramiko.Transport(shakedown.master_ip())
transport_master = start_transport(transport_master, username, key)
if not transport_master.is_authenticated():
print("error: unable to authenticate {}@{} with key {}".format(username, shakedown.master_ip(), key))
return False
try:
channel = transport_master.open_channel('direct-tcpip', (host, 22), ('127.0.0.1', 0))
except paramiko.SSHException:
print("error: unable to connect to {}".format(host))
return False
transport = paramiko.Transport(channel)
return transport | def get_transport(host, username, key) | Create a transport object
:param host: the hostname to connect to
:type host: str
:param username: SSH username
:type username: str
:param key: key object used for authentication
:type key: paramiko.RSAKey
:return: a transport object
:rtype: paramiko.Transport | 2.549093 | 2.653889 | 0.960513 |
transport.start_client()
agent = paramiko.agent.Agent()
keys = itertools.chain((key,) if key else (), agent.get_keys())
for test_key in keys:
try:
transport.auth_publickey(username, test_key)
break
except paramiko.AuthenticationException as e:
pass
else:
raise ValueError('No valid key supplied')
return transport | def start_transport(transport, username, key) | Begin a transport client and authenticate it
:param transport: the transport object to start
:type transport: paramiko.Transport
:param username: SSH username
:type username: str
:param key: key object used for authentication
:type key: paramiko.RSAKey
:return: the transport object passed
:rtype: paramiko.Transport | 4.05484 | 3.835248 | 1.057256 |
key_path = os.path.expanduser(key_path)
if not os.path.isfile(key_path):
return False
return paramiko.RSAKey.from_private_key_file(key_path) | def validate_key(key_path) | Validate a key
:param key_path: path to a key to use for authentication
:type key_path: str
:return: key object used for authentication
:rtype: paramiko.RSAKey | 2.333311 | 2.436495 | 0.95765 |
services = mesos.get_master().frameworks(inactive=inactive, completed=completed)
for service in services:
if service['name'] == service_name:
return service
return None | def get_service(
service_name,
inactive=False,
completed=False
) | Get a dictionary describing a service
:param service_name: the service name
:type service_name: str
:param inactive: whether to include inactive services
:type inactive: bool
:param completed: whether to include completed services
:type completed: bool
:return: a dict describing a service
:rtype: dict, or None | 4.306139 | 5.995755 | 0.718198 |
service = get_service(service_name, inactive, completed)
if service is not None and service['id']:
return service['id']
return None | def get_service_framework_id(
service_name,
inactive=False,
completed=False
) | Get the framework ID for a service
:param service_name: the service name
:type service_name: str
:param inactive: whether to include inactive services
:type inactive: bool
:param completed: whether to include completed services
:type completed: bool
:return: a framework id
:rtype: str, or None | 3.274238 | 4.794412 | 0.682928 |
service = get_service(service_name, inactive, completed)
if service is not None and service['tasks']:
return service['tasks']
return [] | def get_service_tasks(
service_name,
inactive=False,
completed=False
) | Get a list of tasks associated with a service
:param service_name: the service name
:type service_name: str
:param inactive: whether to include inactive services
:type inactive: bool
:param completed: whether to include completed services
:type completed: bool
:return: a list of task objects
:rtye: [dict], or None | 3.653129 | 5.051552 | 0.72317 |
tasks = get_service_tasks(service_name, inactive, completed)
if task_predicate:
return [t['id'] for t in tasks if task_predicate(t)]
else:
return [t['id'] for t in tasks] | def get_service_task_ids(
service_name,
task_predicate=None,
inactive=False,
completed=False
) | Get a list of task IDs associated with a service
:param service_name: the service name
:type service_name: str
:param task_predicate: filter function which accepts a task object and returns a boolean
:type task_predicate: function, or None
:param inactive: whether to include inactive services
:type inactive: bool
:param completed: whether to include completed services
:type completed: bool
:return: a list of task ids
:rtye: [str], or None | 1.981973 | 2.465252 | 0.803963 |
service = get_service_tasks(service_name, inactive, completed)
if service is not None:
for task in service:
if task['name'] == task_name:
return task
return None | def get_service_task(
service_name,
task_name,
inactive=False,
completed=False
) | Get a dictionary describing a service task, or None
:param service_name: the service name
:type service_name: str
:param task_name: the task name
:type task_name: str
:param inactive: whether to include inactive services
:type inactive: bool
:param completed: whether to include completed services
:type completed: bool
:return: a dictionary describing the service
:rtye: dict, or None | 2.500741 | 3.539208 | 0.706582 |
return get_service_task('marathon', task_name, inactive, completed) | def get_marathon_task(
task_name,
inactive=False,
completed=False
) | Get a dictionary describing a named marathon task | 5.996627 | 6.696618 | 0.895471 |
tasks = get_mesos_tasks()
if tasks is not None:
for task in tasks:
if task['name'] == task_name:
return task
return None | def get_mesos_task(task_name) | Get a mesos task with a specific task name | 2.516747 | 2.415813 | 1.04178 |
service_tasks = get_service_tasks(service_name, inactive, completed)
ips = set([])
for task in service_tasks:
if task_name is None or task['name'] == task_name:
for status in task['statuses']:
# Only the TASK_RUNNING status will have correct IP information.
if status["state"] != "TASK_RUNNING":
continue
for ip in status['container_status']['network_infos'][0]['ip_addresses']:
ips.add(ip['ip_address'])
return ips | def get_service_ips(
service_name,
task_name=None,
inactive=False,
completed=False
) | Get a set of the IPs associated with a service
:param service_name: the service name
:type service_name: str
:param task_name: the task name
:type task_name: str
:param inactive: wehther to include inactive services
:type inactive: bool
:param completed: whether to include completed services
:type completed: bool
:return: a list of IP addresses
:rtype: [str] | 3.054128 | 3.407115 | 0.896397 |
marathon_client = marathon.create_client()
apps = marathon_client.get_apps_for_framework(service_name)
if apps:
for app in apps:
if (app_id is not None) and (app['id'] != "/{}".format(str(app_id))):
continue
if (app['tasksHealthy']) \
and (app['tasksRunning']) \
and (not app['tasksStaged']) \
and (not app['tasksUnhealthy']):
return True
return False | def service_healthy(service_name, app_id=None) | Check whether a named service is healthy
:param service_name: the service name
:type service_name: str
:param app_id: app_id to filter
:type app_id: str
:return: True if healthy, False otherwise
:rtype: bool | 3.086154 | 3.212252 | 0.960745 |
if role:
destroy_volumes(role)
unreserve_resources(role)
if zk_node:
delete_zk_node(zk_node) | def delete_persistent_data(role, zk_node) | Deletes any persistent data associated with the specified role, and zk node.
:param role: the mesos role to delete, or None to omit this
:type role: str
:param zk_node: the zookeeper node to be deleted, or None to skip this deletion
:type zk_node: str | 6.39242 | 7.536697 | 0.848173 |
state = dcos_agents_state()
if not state or 'slaves' not in state.keys():
return False
all_success = True
for agent in state['slaves']:
if not destroy_volume(agent, role):
all_success = False
return all_success | def destroy_volumes(role) | Destroys all volumes on all the slaves in the cluster for the role. | 4.095484 | 3.755913 | 1.09041 |
volumes = []
agent_id = agent['id']
reserved_resources_full = agent.get('reserved_resources_full', None)
if not reserved_resources_full:
# doesn't exist
return True
reserved_resources = reserved_resources_full.get(role, None)
if not reserved_resources:
# doesn't exist
return True
for reserved_resource in reserved_resources:
name = reserved_resource.get('name', None)
disk = reserved_resource.get('disk', None)
if name == 'disk' and disk is not None and 'persistence' in disk:
volumes.append(reserved_resource)
req_url = urljoin(master_url(), 'destroy-volumes')
data = {
'slaveId': agent_id,
'volumes': json.dumps(volumes)
}
success = False
try:
response = http.post(req_url, data=data)
success = 200 <= response.status_code < 300
if response.status_code == 409:
# thoughts on what to do here? throw exception
# i would rather not print
print('''###\nIs a framework using these resources still installed?\n###''')
except DCOSHTTPException as e:
print("HTTP {}: Unabled to delete volume based on: {}".format(
e.response.status_code,
e.response.text))
return success | def destroy_volume(agent, role) | Deletes the volumes on the specific agent for the role | 4.273227 | 4.219153 | 1.012816 |
state = dcos_agents_state()
if not state or 'slaves' not in state.keys():
return False
all_success = True
for agent in state['slaves']:
if not unreserve_resource(agent, role):
all_success = False
return all_success | def unreserve_resources(role) | Unreserves all the resources for all the slaves for the role. | 4.09352 | 3.768588 | 1.086221 |
master_count = len(get_all_masters())
return time_wait(lambda: service_available_predicate(service_name),
timeout_seconds=timeout_sec,
required_consecutive_success_count=master_count) | def wait_for_service_endpoint(service_name, timeout_sec=120) | Checks the service url if available it returns true, on expiration
it returns false | 7.816927 | 7.68334 | 1.017387 |
try:
tasks = get_service_tasks(service_name)
except (DCOSConnectionError, DCOSHTTPException):
tasks = []
matching_tasks = []
other_tasks = []
for t in tasks:
name = t.get('name', 'UNKNOWN_NAME')
state = t.get('state', None)
if state and state in expected_task_states:
matching_tasks.append(name)
else:
other_tasks.append('{}={}'.format(name, state))
print('expected {} tasks in {}:\n- {} in expected {}: {}\n- {} in other states: {}'.format(
expected_task_count, ', '.join(expected_task_states),
len(matching_tasks), ', '.join(expected_task_states), ', '.join(matching_tasks),
len(other_tasks), ', '.join(other_tasks)))
return len(matching_tasks) >= expected_task_count | def task_states_predicate(service_name, expected_task_count, expected_task_states) | Returns whether the provided service_names's tasks have expected_task_count tasks
in any of expected_task_states. For example, if service 'foo' has 5 tasks which are
TASK_STAGING or TASK_RUNNING.
:param service_name: the service name
:type service_name: str
:param expected_task_count: the number of tasks which should have an expected state
:type expected_task_count: int
:param expected_task_states: the list states to search for among the service's tasks
:type expected_task_states: [str]
:return: True if expected_task_count tasks have any of expected_task_states, False otherwise
:rtype: bool | 2.557434 | 2.567832 | 0.995951 |
return time_wait(
lambda: task_states_predicate(service_name, expected_task_count, expected_task_states),
timeout_seconds=timeout_sec) | def wait_for_service_tasks_state(
service_name,
expected_task_count,
expected_task_states,
timeout_sec=120
) | Returns once the service has at least N tasks in one of the specified state(s)
:param service_name: the service name
:type service_name: str
:param expected_task_count: the expected number of tasks in the specified state(s)
:type expected_task_count: int
:param expected_task_states: the expected state(s) for tasks to be in, e.g. 'TASK_RUNNING'
:type expected_task_states: [str]
:param timeout_sec: duration to wait
:type timeout_sec: int
:return: the duration waited in seconds
:rtype: int | 4.744294 | 5.178762 | 0.916106 |
try:
task_ids = get_service_task_ids(service_name, task_predicate)
except DCOSHTTPException:
print('failed to get task ids for service {}'.format(service_name))
task_ids = []
print('waiting for all task ids in "{}" to change:\n- old tasks: {}\n- current tasks: {}'.format(
service_name, old_task_ids, task_ids))
for id in task_ids:
if id in old_task_ids:
return False # old task still present
if len(task_ids) < len(old_task_ids): # new tasks haven't fully replaced old tasks
return False
return True | def tasks_all_replaced_predicate(
service_name,
old_task_ids,
task_predicate=None
) | Returns whether ALL of old_task_ids have been replaced with new tasks
:param service_name: the service name
:type service_name: str
:param old_task_ids: list of original task ids as returned by get_service_task_ids
:type old_task_ids: [str]
:param task_predicate: filter to use when searching for tasks
:type task_predicate: func
:return: True if none of old_task_ids are still present in the service
:rtype: bool | 3.482612 | 3.278526 | 1.062249 |
try:
task_ids = get_service_task_ids(service_name, task_predicate)
except DCOSHTTPException:
print('failed to get task ids for service {}'.format(service_name))
task_ids = []
print('checking whether old tasks in "{}" are missing:\n- old tasks: {}\n- current tasks: {}'.format(
service_name, old_task_ids, task_ids))
for id in old_task_ids:
if id not in task_ids:
return True # an old task was not present
return False | def tasks_missing_predicate(
service_name,
old_task_ids,
task_predicate=None
) | Returns whether any of old_task_ids are no longer present
:param service_name: the service name
:type service_name: str
:param old_task_ids: list of original task ids as returned by get_service_task_ids
:type old_task_ids: [str]
:param task_predicate: filter to use when searching for tasks
:type task_predicate: func
:return: True if any of old_task_ids are no longer present in the service
:rtype: bool | 3.374454 | 3.204986 | 1.052876 |
return time_wait(
lambda: tasks_all_replaced_predicate(service_name, old_task_ids, task_predicate),
timeout_seconds=timeout_sec) | def wait_for_service_tasks_all_changed(
service_name,
old_task_ids,
task_predicate=None,
timeout_sec=120
) | Returns once ALL of old_task_ids have been replaced with new tasks
:param service_name: the service name
:type service_name: str
:param old_task_ids: list of original task ids as returned by get_service_task_ids
:type old_task_ids: [str]
:param task_predicate: filter to use when searching for tasks
:type task_predicate: func
:param timeout_sec: duration to wait
:type timeout_sec: int
:return: the duration waited in seconds
:rtype: int | 5.594495 | 5.456535 | 1.025283 |
try:
time_wait(
lambda: tasks_missing_predicate(service_name, old_task_ids, task_predicate),
timeout_seconds=timeout_sec)
# shouldn't have exited successfully: raise below
except TimeoutExpired:
return timeout_sec # no changes occurred within timeout, as expected
raise DCOSException("One or more of the following tasks were no longer found: {}".format(old_task_ids)) | def wait_for_service_tasks_all_unchanged(
service_name,
old_task_ids,
task_predicate=None,
timeout_sec=30
) | Returns after verifying that NONE of old_task_ids have been removed or replaced from the service
:param service_name: the service name
:type service_name: str
:param old_task_ids: list of original task ids as returned by get_service_task_ids
:type old_task_ids: [str]
:param task_predicate: filter to use when searching for tasks
:type task_predicate: func
:param timeout_sec: duration to wait until assuming tasks are unchanged
:type timeout_sec: int
:return: the duration waited in seconds (the timeout value)
:rtype: int | 8.629027 | 8.268252 | 1.043634 |
if component.lower() == 'client':
component = 'Client'
else:
component = 'Server'
# sudo is required for non-coreOS installs
command = 'sudo docker version -f {{.{}.Version}}'.format(component)
if host is None:
success, output = shakedown.run_command_on_master(command, None, None, False)
else:
success, output = shakedown.run_command_on_host(host, command, None, None, False)
if success:
return output
else:
return 'unknown' | def docker_version(host=None, component='server') | Return the version of Docker [Server]
:param host: host or IP of the machine Docker is running on
:type host: str
:param component: Docker component
:type component: str
:return: Docker version
:rtype: str | 3.848986 | 4.061497 | 0.947677 |
import base64
auth_hash = base64.b64encode(
'{}:{}'.format(username, password).encode()).decode()
config_json = {
"auths": {
"https://index.docker.io/v1/": {"auth": auth_hash}
}
}
config_json_filename = 'config.json'
# Write config.json to file
with open(config_json_filename, 'w') as f:
json.dump(config_json, f, indent=4)
try:
# Create a docker.tar.gz
import tarfile
with tarfile.open(file_name, 'w:gz') as tar:
tar.add(config_json_filename, arcname='.docker/config.json')
tar.close()
except Exception as e:
print('Failed to create a docker credentils file {}'.format(e))
raise e
finally:
os.remove(config_json_filename) | def create_docker_credentials_file(
username,
password,
file_name='docker.tar.gz') | Create a docker credentials file.
Docker username and password are used to create a `{file_name}`
with `.docker/config.json` containing the credentials.
:param username: docker username
:type username: str
:param password: docker password
:type password: str
:param file_name: credentials file name `docker.tar.gz` by default
:type file_name: str | 2.142003 | 2.144653 | 0.998764 |
create_docker_credentials_file(username, password, file_name)
try:
__distribute_docker_credentials_file()
finally:
os.remove(file_name) | def distribute_docker_credentials_to_private_agents(
username,
password,
file_name='docker.tar.gz') | Create and distributes a docker credentials file to all private agents
:param username: docker username
:type username: str
:param password: docker password
:type password: str
:param file_name: credentials file name `docker.tar.gz` by default
:type file_name: str | 3.340485 | 5.707412 | 0.585289 |
agents = len(shakedown.get_private_agents())
app = {
"id": "/prefetch",
"instances": agents,
"container": {
"type": "DOCKER",
"docker": {"image": image}
},
"cpus": 0.1,
"mem": 128
}
client = marathon.create_client()
client.add_app(app)
shakedown.deployment_wait(timeout)
shakedown.delete_all_apps()
shakedown.deployment_wait(timeout) | def prefetch_docker_image_on_private_agents(
image,
timeout=timedelta(minutes=5).total_seconds()) | Given a docker image. An app with the image is scale across the private
agents to ensure that the image is prefetched to all nodes.
:param image: docker image name
:type image: str
:param timeout: timeout for deployment wait in secs (default: 5m)
:type password: int | 3.558028 | 3.516488 | 1.011813 |
if options_file is not None:
with open(options_file, 'r') as opt_file:
options = json.loads(opt_file.read())
else:
options = {}
return options | def _get_options(options_file=None) | Read in options_file as JSON.
:param options_file: filename to return
:type options_file: str
:return: options as dictionary
:rtype: dict | 2.224803 | 2.417382 | 0.920336 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.