sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def gid(self):
"""Return the group id that the daemon will run with
:rtype: int
"""
if not self._gid:
if self.controller.config.daemon.group:
self._gid = grp.getgrnam(self.config.daemon.group).gr_gid
else:
self._gid = os.getgid()
return self._gid
|
Return the group id that the daemon will run with
:rtype: int
|
entailment
|
def uid(self):
"""Return the user id that the process will run as
:rtype: int
"""
if not self._uid:
if self.config.daemon.user:
self._uid = pwd.getpwnam(self.config.daemon.user).pw_uid
else:
self._uid = os.getuid()
return self._uid
|
Return the user id that the process will run as
:rtype: int
|
entailment
|
def _daemonize(self):
"""Fork into a background process and setup the process, copied in part
from http://www.jejik.com/files/examples/daemon3x.py
"""
LOGGER.info('Forking %s into the background', sys.argv[0])
# Write the pidfile if current uid != final uid
if os.getuid() != self.uid:
fd = open(self.pidfile_path, 'w')
os.fchmod(fd.fileno(), 0o644)
os.fchown(fd.fileno(), self.uid, self.gid)
fd.close()
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError as error:
raise OSError('Could not fork off parent: %s', error)
# Set the user id
if self.uid != os.getuid():
os.setuid(self.uid)
# Set the group id
if self.gid != os.getgid():
try:
os.setgid(self.gid)
except OSError as error:
LOGGER.error('Could not set group: %s', error)
# Decouple from parent environment
os.chdir('/')
os.setsid()
os.umask(0o022)
# Fork again
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError as error:
raise OSError('Could not fork child: %s', error)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = open(os.devnull, 'r')
so = open(os.devnull, 'a+')
se = open(os.devnull, 'a+')
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# Automatically call self._remove_pidfile when the app exits
atexit.register(self._remove_pidfile)
self._write_pidfile()
|
Fork into a background process and setup the process, copied in part
from http://www.jejik.com/files/examples/daemon3x.py
|
entailment
|
def _get_exception_log_path():
"""Return the normalized path for the connection log, raising an
exception if it can not written to.
:return: str
"""
app = sys.argv[0].split('/')[-1]
for exception_log in ['/var/log/%s.errors' % app,
'/var/tmp/%s.errors' % app,
'/tmp/%s.errors' % app]:
if os.access(path.dirname(exception_log), os.W_OK):
return exception_log
return None
|
Return the normalized path for the connection log, raising an
exception if it can not written to.
:return: str
|
entailment
|
def _get_pidfile_path(self):
"""Return the normalized path for the pidfile, raising an
exception if it can not written to.
:return: str
:raises: ValueError
:raises: OSError
"""
if self.config.daemon.pidfile:
pidfile = path.abspath(self.config.daemon.pidfile)
if not os.access(path.dirname(pidfile), os.W_OK):
raise ValueError('Cannot write to specified pid file path'
' %s' % pidfile)
return pidfile
app = sys.argv[0].split('/')[-1]
for pidfile in ['%s/pids/%s.pid' % (os.getcwd(), app),
'/var/run/%s.pid' % app,
'/var/run/%s/%s.pid' % (app, app),
'/var/tmp/%s.pid' % app,
'/tmp/%s.pid' % app,
'%s.pid' % app]:
if os.access(path.dirname(pidfile), os.W_OK):
return pidfile
raise OSError('Could not find an appropriate place for a pid file')
|
Return the normalized path for the pidfile, raising an
exception if it can not written to.
:return: str
:raises: ValueError
:raises: OSError
|
entailment
|
def _is_already_running(self):
"""Check to see if the process is running, first looking for a pidfile,
then shelling out in either case, removing a pidfile if it exists but
the process is not running.
"""
# Look for the pidfile, if exists determine if the process is alive
pidfile = self._get_pidfile_path()
if os.path.exists(pidfile):
pid = open(pidfile).read().strip()
try:
os.kill(int(pid), 0)
sys.stderr.write('Process already running as pid # %s\n' % pid)
return True
except OSError as error:
LOGGER.debug('Found pidfile, no process # %s', error)
os.unlink(pidfile)
# Check the os for a process that is not this one that looks the same
pattern = ' '.join(sys.argv)
pattern = '[%s]%s' % (pattern[0], pattern[1:])
try:
output = subprocess.check_output('ps a | grep "%s"' % pattern,
shell=True)
except AttributeError:
# Python 2.6
stdin, stdout, stderr = os.popen3('ps a | grep "%s"' % pattern)
output = stdout.read()
except subprocess.CalledProcessError:
return False
pids = [int(pid) for pid in (re.findall(r'^([0-9]+)\s',
output.decode('latin-1')))]
if os.getpid() in pids:
pids.remove(os.getpid())
if not pids:
return False
if len(pids) == 1:
pids = pids[0]
sys.stderr.write('Process already running as pid # %s\n' % pids)
return True
|
Check to see if the process is running, first looking for a pidfile,
then shelling out in either case, removing a pidfile if it exists but
the process is not running.
|
entailment
|
def _remove_pidfile(self):
"""Remove the pid file from the filesystem"""
LOGGER.debug('Removing pidfile: %s', self.pidfile_path)
try:
os.unlink(self.pidfile_path)
except OSError:
pass
|
Remove the pid file from the filesystem
|
entailment
|
def _write_pidfile(self):
"""Write the pid file out with the process number in the pid file"""
LOGGER.debug('Writing pidfile: %s', self.pidfile_path)
with open(self.pidfile_path, "w") as handle:
handle.write(str(os.getpid()))
|
Write the pid file out with the process number in the pid file
|
entailment
|
def to_camel_case(snake_case_string):
"""
Convert a string from snake case to camel case. For example, "some_var" would become "someVar".
:param snake_case_string: Snake-cased string to convert to camel case.
:returns: Camel-cased version of snake_case_string.
"""
parts = snake_case_string.lstrip('_').split('_')
return parts[0] + ''.join([i.title() for i in parts[1:]])
|
Convert a string from snake case to camel case. For example, "some_var" would become "someVar".
:param snake_case_string: Snake-cased string to convert to camel case.
:returns: Camel-cased version of snake_case_string.
|
entailment
|
def to_capitalized_camel_case(snake_case_string):
"""
Convert a string from snake case to camel case with the first letter capitalized. For example, "some_var"
would become "SomeVar".
:param snake_case_string: Snake-cased string to convert to camel case.
:returns: Camel-cased version of snake_case_string.
"""
parts = snake_case_string.split('_')
return ''.join([i.title() for i in parts])
|
Convert a string from snake case to camel case with the first letter capitalized. For example, "some_var"
would become "SomeVar".
:param snake_case_string: Snake-cased string to convert to camel case.
:returns: Camel-cased version of snake_case_string.
|
entailment
|
def to_snake_case(camel_case_string):
"""
Convert a string from camel case to snake case. From example, "someVar" would become "some_var".
:param camel_case_string: Camel-cased string to convert to snake case.
:return: Snake-cased version of camel_case_string.
"""
first_pass = _first_camel_case_regex.sub(r'\1_\2', camel_case_string)
return _second_camel_case_regex.sub(r'\1_\2', first_pass).lower()
|
Convert a string from camel case to snake case. From example, "someVar" would become "some_var".
:param camel_case_string: Camel-cased string to convert to snake case.
:return: Snake-cased version of camel_case_string.
|
entailment
|
def keys_to_snake_case(camel_case_dict):
"""
Make a copy of a dictionary with all keys converted to snake case. This is just calls to_snake_case on
each of the keys in the dictionary and returns a new dictionary.
:param camel_case_dict: Dictionary with the keys to convert.
:type camel_case_dict: Dictionary.
:return: Dictionary with the keys converted to snake case.
"""
return dict((to_snake_case(key), value) for (key, value) in camel_case_dict.items())
|
Make a copy of a dictionary with all keys converted to snake case. This is just calls to_snake_case on
each of the keys in the dictionary and returns a new dictionary.
:param camel_case_dict: Dictionary with the keys to convert.
:type camel_case_dict: Dictionary.
:return: Dictionary with the keys converted to snake case.
|
entailment
|
def list_functions(awsclient):
"""List the deployed lambda functions and print configuration.
:return: exit_code
"""
client_lambda = awsclient.get_client('lambda')
response = client_lambda.list_functions()
for function in response['Functions']:
log.info(function['FunctionName'])
log.info('\t' 'Memory: ' + str(function['MemorySize']))
log.info('\t' 'Timeout: ' + str(function['Timeout']))
log.info('\t' 'Role: ' + str(function['Role']))
log.info('\t' 'Current Version: ' + str(function['Version']))
log.info('\t' 'Last Modified: ' + str(function['LastModified']))
log.info('\t' 'CodeSha256: ' + str(function['CodeSha256']))
log.info('\n')
return 0
|
List the deployed lambda functions and print configuration.
:return: exit_code
|
entailment
|
def deploy_lambda(awsclient, function_name, role, handler_filename,
handler_function,
folders, description, timeout, memory, subnet_ids=None,
security_groups=None, artifact_bucket=None,
zipfile=None,
fail_deployment_on_unsuccessful_ping=False,
runtime='python2.7', settings=None, environment=None,
retention_in_days=None
):
"""Create or update a lambda function.
:param awsclient:
:param function_name:
:param role:
:param handler_filename:
:param handler_function:
:param folders:
:param description:
:param timeout:
:param memory:
:param subnet_ids:
:param security_groups:
:param artifact_bucket:
:param zipfile:
:param environment: environment variables
:param retention_in_days: retention time of the cloudwatch logs
:return: exit_code
"""
# TODO: the signature of this function is too big, clean this up
# also consolidate create, update, config and add waiters!
if lambda_exists(awsclient, function_name):
function_version = _update_lambda(awsclient, function_name,
handler_filename,
handler_function, folders, role,
description, timeout, memory,
subnet_ids, security_groups,
artifact_bucket=artifact_bucket,
zipfile=zipfile,
environment=environment
)
else:
if not zipfile:
return 1
log.info('buffer size: %0.2f MB' % float(len(zipfile) / 1000000.0))
function_version = _create_lambda(awsclient, function_name, role,
handler_filename, handler_function,
folders, description, timeout,
memory, subnet_ids, security_groups,
artifact_bucket, zipfile,
runtime=runtime,
environment=environment)
# configure cloudwatch logs
if retention_in_days:
log_group_name = '/aws/lambda/%s' % function_name
put_retention_policy(awsclient, log_group_name, retention_in_days)
pong = ping(awsclient, function_name, version=function_version)
if 'alive' in str(pong):
log.info(colored.green('Great you\'re already accepting a ping ' +
'in your Lambda function'))
elif fail_deployment_on_unsuccessful_ping and not 'alive' in pong:
log.info(colored.red('Pinging your lambda function failed'))
# we do not deploy alias and fail command
return 1
else:
log.info(colored.red('Please consider adding a reaction to a ' +
'ping event to your lambda function'))
_deploy_alias(awsclient, function_name, function_version)
return 0
|
Create or update a lambda function.
:param awsclient:
:param function_name:
:param role:
:param handler_filename:
:param handler_function:
:param folders:
:param description:
:param timeout:
:param memory:
:param subnet_ids:
:param security_groups:
:param artifact_bucket:
:param zipfile:
:param environment: environment variables
:param retention_in_days: retention time of the cloudwatch logs
:return: exit_code
|
entailment
|
def bundle_lambda(zipfile):
"""Write zipfile contents to file.
:param zipfile:
:return: exit_code
"""
# TODO have 'bundle.zip' as default config
if not zipfile:
return 1
with open('bundle.zip', 'wb') as zfile:
zfile.write(zipfile)
log.info('Finished - a bundle.zip is waiting for you...')
return 0
|
Write zipfile contents to file.
:param zipfile:
:return: exit_code
|
entailment
|
def get_metrics(awsclient, name):
"""Print out cloudformation metrics for a lambda function.
:param awsclient
:param name: name of the lambda function
:return: exit_code
"""
metrics = ['Duration', 'Errors', 'Invocations', 'Throttles']
client_cw = awsclient.get_client('cloudwatch')
for metric in metrics:
response = client_cw.get_metric_statistics(
Namespace='AWS/Lambda',
MetricName=metric,
Dimensions=[
{
'Name': 'FunctionName',
'Value': name
},
],
# StartTime=datetime.now() + timedelta(days=-1),
# EndTime=datetime.now(),
StartTime=maya.now().subtract(days=1).datetime(),
EndTime=maya.now().datetime(),
Period=3600,
Statistics=[
'Sum',
],
Unit=unit(metric)
)
log.info('\t%s %s' % (metric,
repr(aggregate_datapoints(response['Datapoints']))))
return 0
|
Print out cloudformation metrics for a lambda function.
:param awsclient
:param name: name of the lambda function
:return: exit_code
|
entailment
|
def rollback(awsclient, function_name, alias_name=ALIAS_NAME, version=None):
"""Rollback a lambda function to a given version.
:param awsclient:
:param function_name:
:param alias_name:
:param version:
:return: exit_code
"""
if version:
log.info('rolling back to version {}'.format(version))
else:
log.info('rolling back to previous version')
version = _get_previous_version(awsclient, function_name, alias_name)
if version == '0':
log.error('unable to find previous version of lambda function')
return 1
log.info('new version is %s' % str(version))
_update_alias(awsclient, function_name, version, alias_name)
return 0
|
Rollback a lambda function to a given version.
:param awsclient:
:param function_name:
:param alias_name:
:param version:
:return: exit_code
|
entailment
|
def delete_lambda(awsclient, function_name, events=None, delete_logs=False):
"""Delete a lambda function.
:param awsclient:
:param function_name:
:param events: list of events
:param delete_logs:
:return: exit_code
"""
if events is not None:
unwire(awsclient, events, function_name, alias_name=ALIAS_NAME)
client_lambda = awsclient.get_client('lambda')
response = client_lambda.delete_function(FunctionName=function_name)
if delete_logs:
log_group_name = '/aws/lambda/%s' % function_name
delete_log_group(awsclient, log_group_name)
# TODO remove event source first and maybe also needed for permissions
log.info(json2table(response))
return 0
|
Delete a lambda function.
:param awsclient:
:param function_name:
:param events: list of events
:param delete_logs:
:return: exit_code
|
entailment
|
def delete_lambda_deprecated(awsclient, function_name, s3_event_sources=[],
time_event_sources=[], delete_logs=False):
# FIXME: mutable default arguments!
"""Deprecated: please use delete_lambda!
:param awsclient:
:param function_name:
:param s3_event_sources:
:param time_event_sources:
:param delete_logs:
:return: exit_code
"""
unwire_deprecated(awsclient, function_name, s3_event_sources=s3_event_sources,
time_event_sources=time_event_sources,
alias_name=ALIAS_NAME)
client_lambda = awsclient.get_client('lambda')
response = client_lambda.delete_function(FunctionName=function_name)
if delete_logs:
log_group_name = '/aws/lambda/%s' % function_name
delete_log_group(awsclient, log_group_name)
# TODO remove event source first and maybe also needed for permissions
log.info(json2table(response))
return 0
|
Deprecated: please use delete_lambda!
:param awsclient:
:param function_name:
:param s3_event_sources:
:param time_event_sources:
:param delete_logs:
:return: exit_code
|
entailment
|
def cleanup_bundle():
"""Deletes files used for creating bundle.
* vendored/*
* bundle.zip
"""
paths = ['./vendored', './bundle.zip']
for path in paths:
if os.path.exists(path):
log.debug("Deleting %s..." % path)
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
|
Deletes files used for creating bundle.
* vendored/*
* bundle.zip
|
entailment
|
def ping(awsclient, function_name, alias_name=ALIAS_NAME, version=None):
"""Send a ping request to a lambda function.
:param awsclient:
:param function_name:
:param alias_name:
:param version:
:return: ping response payload
"""
log.debug('sending ping to lambda function: %s', function_name)
payload = '{"ramuda_action": "ping"}' # default to ping event
# reuse invoke
return invoke(awsclient, function_name, payload, invocation_type=None,
alias_name=alias_name, version=version)
|
Send a ping request to a lambda function.
:param awsclient:
:param function_name:
:param alias_name:
:param version:
:return: ping response payload
|
entailment
|
def invoke(awsclient, function_name, payload, invocation_type=None,
alias_name=ALIAS_NAME, version=None, outfile=None):
"""Send a ping request to a lambda function.
:param awsclient:
:param function_name:
:param payload:
:param invocation_type:
:param alias_name:
:param version:
:param outfile: write response to file
:return: ping response payload
"""
log.debug('invoking lambda function: %s', function_name)
client_lambda = awsclient.get_client('lambda')
if invocation_type is None:
invocation_type = 'RequestResponse'
if payload.startswith('file://'):
log.debug('reading payload from file: %s' % payload)
with open(payload[7:], 'r') as pfile:
payload = pfile.read()
if version:
response = client_lambda.invoke(
FunctionName=function_name,
InvocationType=invocation_type,
Payload=payload,
Qualifier=version
)
else:
response = client_lambda.invoke(
FunctionName=function_name,
InvocationType=invocation_type,
Payload=payload,
Qualifier=alias_name
)
results = response['Payload'].read() # payload is a 'StreamingBody'
log.debug('invoke completed')
# write to file
if outfile:
with open(outfile, 'w') as ofile:
ofile.write(str(results))
ofile.flush()
return
else:
return results
|
Send a ping request to a lambda function.
:param awsclient:
:param function_name:
:param payload:
:param invocation_type:
:param alias_name:
:param version:
:param outfile: write response to file
:return: ping response payload
|
entailment
|
def logs(awsclient, function_name, start_dt, end_dt=None, tail=False):
"""Send a ping request to a lambda function.
:param awsclient:
:param function_name:
:param start_dt:
:param end_dt:
:param tail:
:return:
"""
log.debug('Getting cloudwatch logs for: %s', function_name)
log_group_name = '/aws/lambda/%s' % function_name
current_date = None
start_ts = datetime_to_timestamp(start_dt)
if end_dt:
end_ts = datetime_to_timestamp(end_dt)
else:
end_ts = None
# tail mode
# we assume that logs can arrive late but not out of order
# so we hold the timestamp of the last logentry and start the next iteration
# from there
while True:
logentries = filter_log_events(awsclient, log_group_name,
start_ts=start_ts, end_ts=end_ts)
if logentries:
for e in logentries:
actual_date, actual_time = decode_format_timestamp(e['timestamp'])
if current_date != actual_date:
# print the date only when it changed
current_date = actual_date
log.info(current_date)
log.info('%s %s' % (actual_time, e['message'].strip()))
if tail:
if logentries:
start_ts = logentries[-1]['timestamp'] + 1
time.sleep(2)
continue
break
|
Send a ping request to a lambda function.
:param awsclient:
:param function_name:
:param start_dt:
:param end_dt:
:param tail:
:return:
|
entailment
|
def __validate_datetime_string(self):
"""
This will require validating version string (such as "3.3.5").
A version string could be converted to a datetime value if this
validation is not executed.
"""
try:
try:
StrictVersion(self._value)
raise TypeConversionError(
"invalid datetime string: version string found {}".format(self._value)
)
except ValueError:
pass
except TypeError:
raise TypeConversionError("invalid datetime string: type={}".format(type(self._value)))
|
This will require validating version string (such as "3.3.5").
A version string could be converted to a datetime value if this
validation is not executed.
|
entailment
|
def ADR(self, params):
"""
ADR Ra, [PC, #imm10_4]
ADR Ra, label
Load the address of label or the PC offset into Ra
Ra must be a low register
"""
# TODO may need to rethink how I do PC, may need to be byte alligned
# TODO This is wrong as each address is a word, not a byte. The filled value with its location (Do we want that, or the value at that location [Decompiled instruction])
try:
Ra, Rb, Rc = self.get_three_parameters(self.THREE_PARAMETER_WITH_BRACKETS, params)
except iarm.exceptions.ParsingError:
Ra, label = self.get_two_parameters(self.TWO_PARAMETER_COMMA_SEPARATED, params)
# TODO the address must be within 1020 bytes of current PC
self.check_arguments(low_registers=(Ra,), label_exists=(label,))
def ADR_func():
self.register[Ra] = self.labels[label] # TODO is this correct?
return ADR_func
self.check_arguments(low_registers=(Ra,), imm10_4=(Rc,))
if Rb != 'PC':
raise iarm.exceptions.IarmError("Second position argument is not PC: {}".format(Rb))
def ADR_func():
self.register[Ra] = self.register[Rb] + self.convert_to_integer(Rc[1:])
return ADR_func
|
ADR Ra, [PC, #imm10_4]
ADR Ra, label
Load the address of label or the PC offset into Ra
Ra must be a low register
|
entailment
|
def LDR(self, params):
"""
LDR Ra, [PC, #imm10_4]
LDR Ra, label
LDR Ra, =equate
LDR Ra, [Rb, Rc]
LDR Ra, [Rb, #imm7_4]
LDR Ra, [SP, #imm10_4]
Load a word from memory into Ra
Ra, Rb, and Rc must be low registers
"""
# TODO definition for PC is Ra <- M[PC + Imm10_4], Imm10_4 = PC - label, need to figure this one out
try:
Ra, Rb, Rc = self.get_three_parameters(self.THREE_PARAMETER_WITH_BRACKETS, params)
except iarm.exceptions.ParsingError:
Ra, label_name = self.get_two_parameters(self.TWO_PARAMETER_COMMA_SEPARATED, params)
if label_name.startswith('='):
# This is a pseudoinstructions
label_name = label_name[1:]
# TODO add check that label is a 32 bit number
# TODO This does not work on instruction loading. This interpreter follows a harvard like architecture,
# TODO while ARMv6-M (Cortex-M0+) is a Von Neumann architeture. Instructions will not be decompiled
self.check_arguments(low_registers=(Ra,))
if label_name in self.labels:
label_value = self.labels[label_name]
elif label_name in self.equates:
label_value = self.equates[label_name]
else:
try:
label_value = int(self.convert_to_integer(label_name))
except ValueError:
warnings.warn(iarm.exceptions.LabelDoesNotExist("Label `{}` does not exist or is not a parsable number. If it is a label, make sure it exists before running".format(label_name)))
label_value = None
if label_value is not None and int(label_value) % 4 != 0:
# Make sure we are word aligned
raise iarm.exceptions.IarmError("Memory access not word aligned; Immediate: {}".format(int(label_value)))
elif label_name.startswith('[') and label_name.endswith(']'):
# TODO improve this
Rb = label_name[1:-1]
if Rb == 'SP' or Rb == 'R13':
self.check_arguments(low_registers=(Ra,))
else:
self.check_arguments(low_registers=(Ra, label_name))
def LDR_func():
if self.memory[Rb] % 4 != 0:
raise iarm.exceptions.HardFault(
"Memory access not word aligned; Register: {} Immediate: {}".format(self.register[Rb],
self.convert_to_integer(
Rc[1:])))
self.register[Ra] = 0
for i in range(4):
self.register[Ra] |= (self.memory[self.register[Rb] + i] << (8 * i))
return LDR_func
else:
self.check_arguments(low_registers=(Ra,), label_exists=(label_name,))
try:
label_value = self.labels[label_name]
if label_value >= 1024:
raise iarm.exceptions.IarmError("Label {} has value {} and is greater than 1020".format(label_name, label_value))
if label_value % 4 != 0:
raise iarm.exceptions.IarmError("Label {} has value {} and is not word aligned".format(label_name, label_value))
except KeyError:
# Label doesn't exist, nothing we can do about that except maybe raise an exception now,
# But we're avoiding that elsewhere, might as well avoid it here too
pass
def LDR_func():
nonlocal label_value
# Since we can get a label that didn't exist in the creation step, We need to check it here
# TODO is there a way for label_value to not exist?
if label_value is None:
# Try to get it again
if label_name in self.labels:
label_value = self.labels[label_name]
elif label_name in self.equates:
label_value = self.equates[label_name]
# If it is still None, then it never got allocated
if label_value is None:
raise iarm.exceptions.IarmError("label `{}` does not exist. Was space allocated?".format(label_name))
# It does exist, make sure its word aligned
if int(label_value) % 4 != 0:
raise iarm.exceptions.IarmError("Memory access not word aligned; Immediate: {}".format(int(label_value)))
try:
self.register[Ra] = int(label_value)
except ValueError:
# TODO Can we even get to this path now?
self.register[Ra] = self.labels[label_name]
return LDR_func
if self.is_immediate(Rc):
if Rb == 'SP' or Rb == 'R15':
self.check_arguments(low_registers=(Ra,), imm10_4=(Rc,))
else:
self.check_arguments(low_registers=(Ra, Rb), imm7_4=(Rc,))
def LDR_func():
# TODO does memory read up?
if (self.register[Rb] + self.convert_to_integer(Rc[1:])) % 4 != 0:
raise iarm.exceptions.HardFault("Memory access not word aligned; Register: {} Immediate: {}".format(self.register[Rb], self.convert_to_integer(Rc[1:])))
self.register[Ra] = 0
for i in range(4):
self.register[Ra] |= (self.memory[self.register[Rb] + self.convert_to_integer(Rc[1:]) + i] << (8 * i))
else:
self.check_arguments(low_registers=(Ra, Rb, Rc))
def LDR_func():
# TODO does memory read up?
if (self.register[Rb] + self.register[Rc]) % 4 != 0:
raise iarm.exceptions.HardFault(
"Memory access not word aligned; Register: {} Immediate: {}".format(self.register[Rb],
self.convert_to_integer(
Rc[1:])))
self.register[Ra] = 0
for i in range(4):
self.register[Ra] |= (self.memory[self.register[Rb] + self.register[Rc] + i] << (8 * i))
return LDR_func
|
LDR Ra, [PC, #imm10_4]
LDR Ra, label
LDR Ra, =equate
LDR Ra, [Rb, Rc]
LDR Ra, [Rb, #imm7_4]
LDR Ra, [SP, #imm10_4]
Load a word from memory into Ra
Ra, Rb, and Rc must be low registers
|
entailment
|
def LDRB(self, params):
"""
LDRB Ra, [Rb, Rc]
LDRB Ra, [Rb, #imm5]
Load a byte from memory into Ra
Ra, Rb, and Rc must be low registers
"""
try:
Ra, Rb, Rc = self.get_three_parameters(self.THREE_PARAMETER_WITH_BRACKETS, params)
except iarm.exceptions.ParsingError:
# LDRB Rn, [Rk] translates to an offset of zero
Ra, Rb = self.get_two_parameters(r'\s*([^\s,]*),\s*\[([^\s,]*)\](,\s*[^\s,]*)*\s*', params)
Rc = '#0'
if self.is_immediate(Rc):
self.check_arguments(low_registers=(Ra, Rb), imm5=(Rc,))
def LDRB_func():
self.register[Ra] = self.memory[self.register[Rb] + self.convert_to_integer(Rc[1:])]
else:
self.check_arguments(low_registers=(Ra, Rb, Rc))
def LDRB_func():
self.register[Ra] = self.memory[self.register[Rb] + self.register[Rc]]
return LDRB_func
|
LDRB Ra, [Rb, Rc]
LDRB Ra, [Rb, #imm5]
Load a byte from memory into Ra
Ra, Rb, and Rc must be low registers
|
entailment
|
def LDRH(self, params):
"""
LDRH Ra, [Rb, Rc]
LDRH Ra, [Rb, #imm6_2]
Load a half word from memory into Ra
Ra, Rb, and Rc must be low registers
"""
try:
Ra, Rb, Rc = self.get_three_parameters(self.THREE_PARAMETER_WITH_BRACKETS, params)
except iarm.exceptions.ParsingError:
# LDRB Rn, [Rk] translates to an offset of zero
Ra, Rb = self.get_two_parameters(r'\s*([^\s,]*),\s*\[([^\s,]*)\](,\s*[^\s,]*)*\s*', params)
Rc = '#0'
if self.is_immediate(Rc):
self.check_arguments(low_registers=(Ra, Rb), imm6_2=(Rc,))
def LDRH_func():
# TODO does memory read up?
if (self.register[Rb]) % 2 != 0:
raise iarm.exceptions.HardFault(
"Memory access not half word aligned; Register: {} Immediate: {}".format(self.register[Rb],
self.convert_to_integer(
Rc[1:])))
self.register[Ra] = 0
for i in range(2):
self.register[Ra] |= (self.memory[self.register[Rb] + self.convert_to_integer(Rc[1:]) + i] << (8 * i))
else:
self.check_arguments(low_registers=(Ra, Rb, Rc))
def LDRH_func():
# TODO does memory read up?
if (self.register[Rb] + self.register[Rc]) % 2 != 0:
raise iarm.exceptions.HardFault(
"Memory access not half word aligned; Register: {} Immediate: {}".format(self.register[Rb],
self.register[Rc]))
self.register[Ra] = 0
for i in range(2):
self.register[Ra] |= (self.memory[self.register[Rb] + self.register[Rc] + i] << (8 * i))
return LDRH_func
|
LDRH Ra, [Rb, Rc]
LDRH Ra, [Rb, #imm6_2]
Load a half word from memory into Ra
Ra, Rb, and Rc must be low registers
|
entailment
|
def LDRSB(self, params):
"""
LDRSB Ra, [Rb, Rc]
Load a byte from memory, sign extend, and put into Ra
Ra, Rb, and Rc must be low registers
"""
# TODO LDRSB cant use immediates
Ra, Rb, Rc = self.get_three_parameters(self.THREE_PARAMETER_WITH_BRACKETS, params)
self.check_arguments(low_registers=(Ra, Rb, Rc))
def LDRSB_func():
# TODO does memory read up?
self.register[Ra] = 0
self.register[Ra] |= self.memory[self.register[Rb] + self.register[Rc]]
if self.register[Ra] & (1 << 7):
self.register[Ra] |= (0xFFFFFF << 8)
return LDRSB_func
|
LDRSB Ra, [Rb, Rc]
Load a byte from memory, sign extend, and put into Ra
Ra, Rb, and Rc must be low registers
|
entailment
|
def LDRSH(self, params):
"""
LDRSH Ra, [Rb, Rc]
Load a half word from memory, sign extend, and put into Ra
Ra, Rb, and Rc must be low registers
"""
# TODO LDRSH cant use immediates
Ra, Rb, Rc = self.get_three_parameters(self.THREE_PARAMETER_WITH_BRACKETS, params)
self.check_arguments(low_registers=(Ra, Rb, Rc))
def LDRSH_func():
# TODO does memory read up?
if (self.register[Rb] + self.register[Rc]) % 2 != 0:
raise iarm.exceptions.HardFault(
"Memory access not half word aligned\nR{}: {}\nR{}: {}".format(Rb, self.register[Rb],
Rc, self.register[Rc]))
self.register[Ra] = 0
for i in range(2):
self.register[Ra] |= (self.memory[self.register[Rb] + self.register[Rc] + i] << (8 * i))
if self.register[Ra] & (1 << 15):
self.register[Ra] |= (0xFFFF << 16)
return LDRSH_func
|
LDRSH Ra, [Rb, Rc]
Load a half word from memory, sign extend, and put into Ra
Ra, Rb, and Rc must be low registers
|
entailment
|
def POP(self, params):
"""
POP {RPopList}
Pop from the stack into the list of registers
List must contain only low registers or PC
"""
# TODO verify pop order
# TODO pop list is comma separate, right?
# TODO what registeres are allowed to POP to? Low Registers and PC
# TODO need to support ranges, ie {R2, R5-R7}
# TODO PUSH should reverse the list, not POP
RPopList = self.get_one_parameter(r'\s*{(.*)}(.*)', params).split(',')
RPopList.reverse()
RPopList = [i.strip() for i in RPopList]
def POP_func():
for register in RPopList:
# Get 4 bytes
value = 0
for i in range(4):
# TODO use memory width instead of constants
value |= self.memory[self.register['SP'] + i] << (8 * i)
self.register[register] = value
self.register['SP'] += 4
return POP_func
|
POP {RPopList}
Pop from the stack into the list of registers
List must contain only low registers or PC
|
entailment
|
def PUSH(self, params):
"""
PUSH {RPushList}
Push to the stack from a list of registers
List must contain only low registers or LR
"""
# TODO what registers are allowed to PUSH to? Low registers and LR
# TODO PUSH should reverse the list, not POP
RPushList = self.get_one_parameter(r'\s*{(.*)}(.*)', params).split(',')
RPushList = [i.strip() for i in RPushList]
# TODO should we make sure the register exists? probably not
def PUSH_func():
for register in RPushList:
self.register['SP'] -= 4
for i in range(4):
# TODO is this the same as with POP?
self.memory[self.register['SP'] + i] = ((self.register[register] >> (8 * i)) & 0xFF)
return PUSH_func
|
PUSH {RPushList}
Push to the stack from a list of registers
List must contain only low registers or LR
|
entailment
|
def STM(self, params):
"""
STM Ra!, {RLoList}
Store multiple registers into memory
"""
# TODO what registers can be stored?
Ra, RLoList = self.get_two_parameters(r'\s*([^\s,]*)!,\s*{(.*)}(.*)', params).split(',')
RLoList = RLoList.split(',')
RLoList = [i.strip() for i in RLoList]
self.check_arguments(low_registers=[Ra] + RLoList)
def STM_func():
for i in range(len(RLoList)):
for j in range(4):
self.memory[self.register[Ra] + 4*i + j] = ((self.register[RLoList[i]] >> (8 * j)) & 0xFF)
self.register[Ra] += 4*len(RLoList)
return STM_func
|
STM Ra!, {RLoList}
Store multiple registers into memory
|
entailment
|
def STR(self, params):
"""
STR Ra, [Rb, Rc]
STR Ra, [Rb, #imm7_4]
STR Ra, [SP, #imm10_4]
Store Ra into memory as a word
Ra, Rb, and Rc must be low registers
"""
Ra, Rb, Rc = self.get_three_parameters(self.THREE_PARAMETER_WITH_BRACKETS, params)
if self.is_immediate(Rc):
if Rb == 'SP' or Rb == 'FP':
self.check_arguments(low_registers=(Ra,), imm10_4=(Rc,))
else:
self.check_arguments(low_registers=(Ra, Rb), imm7_4=(Rc,))
def STR_func():
for i in range(4):
self.memory[self.register[Rb] + self.convert_to_integer(Rc[1:]) + i] = ((self.register[Ra] >> (8 * i)) & 0xFF)
else:
self.check_arguments(low_registers=(Ra, Rb, Rc))
def STR_func():
for i in range(4):
self.memory[self.register[Rb] + self.register[Rc] + i] = ((self.register[Ra] >> (8 * i)) & 0xFF)
return STR_func
|
STR Ra, [Rb, Rc]
STR Ra, [Rb, #imm7_4]
STR Ra, [SP, #imm10_4]
Store Ra into memory as a word
Ra, Rb, and Rc must be low registers
|
entailment
|
def STRB(self, params):
"""
STRB Ra, [Rb, Rc]
STRB Ra, [Rb, #imm5]
Store Ra into memory as a byte
Ra, Rb, and Rc must be low registers
"""
Ra, Rb, Rc = self.get_three_parameters(self.THREE_PARAMETER_WITH_BRACKETS, params)
if self.is_immediate(Rc):
self.check_arguments(low_registers=(Ra, Rb), imm5=(Rc,))
def STRB_func():
self.memory[self.register[Rb] + self.convert_to_integer(Rc[1:])] = (self.register[Ra] & 0xFF)
else:
self.check_arguments(low_registers=(Ra, Rb, Rc))
def STRB_func():
self.memory[self.register[Rb] + self.register[Rc]] = (self.register[Ra] & 0xFF)
return STRB_func
|
STRB Ra, [Rb, Rc]
STRB Ra, [Rb, #imm5]
Store Ra into memory as a byte
Ra, Rb, and Rc must be low registers
|
entailment
|
def STRH(self, params):
"""
STRH Ra, [Rb, Rc]
STRH Ra, [Rb, #imm6_2]
Store Ra into memory as a half word
Ra, Rb, and Rc must be low registers
"""
Ra, Rb, Rc = self.get_three_parameters(self.THREE_PARAMETER_WITH_BRACKETS, params)
if self.is_immediate(Rc):
self.check_arguments(low_registers=(Ra, Rb), imm5=(Rc,))
def STRH_func():
for i in range(2):
self.memory[self.register[Rb] + self.convert_to_integer(Rc[1:]) + i] = ((self.register[Ra] >> (8 * i)) & 0xFF)
else:
self.check_arguments(low_registers=(Ra, Rb, Rc))
def STRH_func():
for i in range(2):
self.memory[self.register[Rb] + self.register[Rc] + i] = ((self.register[Ra] >> (8 * i)) & 0xFF)
return STRH_func
|
STRH Ra, [Rb, Rc]
STRH Ra, [Rb, #imm6_2]
Store Ra into memory as a half word
Ra, Rb, and Rc must be low registers
|
entailment
|
def _stop_ec2_instances(awsclient, ec2_instances, wait=True):
"""Helper to stop ec2 instances.
By default it waits for instances to stop.
:param awsclient:
:param ec2_instances:
:param wait: waits for instances to stop
:return:
"""
if len(ec2_instances) == 0:
return
client_ec2 = awsclient.get_client('ec2')
# get running instances
running_instances = all_pages(
client_ec2.describe_instance_status,
{
'InstanceIds': ec2_instances,
'Filters': [{
'Name': 'instance-state-name',
'Values': ['pending', 'running']
}]
},
lambda r: [i['InstanceId'] for i in r.get('InstanceStatuses', [])],
)
if running_instances:
log.info('Stopping EC2 instances: %s', running_instances)
client_ec2.stop_instances(InstanceIds=running_instances)
if wait:
# wait for instances to stop
waiter_inst_stopped = client_ec2.get_waiter('instance_stopped')
waiter_inst_stopped.wait(InstanceIds=running_instances)
|
Helper to stop ec2 instances.
By default it waits for instances to stop.
:param awsclient:
:param ec2_instances:
:param wait: waits for instances to stop
:return:
|
entailment
|
def _start_ec2_instances(awsclient, ec2_instances, wait=True):
"""Helper to start ec2 instances
:param awsclient:
:param ec2_instances:
:param wait: waits for instances to start
:return:
"""
if len(ec2_instances) == 0:
return
client_ec2 = awsclient.get_client('ec2')
# get stopped instances
stopped_instances = all_pages(
client_ec2.describe_instance_status,
{
'InstanceIds': ec2_instances,
'Filters': [{
'Name': 'instance-state-name',
'Values': ['stopping', 'stopped']
}],
'IncludeAllInstances': True
},
lambda r: [i['InstanceId'] for i in r.get('InstanceStatuses', [])],
)
if stopped_instances:
# start all stopped instances
log.info('Starting EC2 instances: %s', stopped_instances)
client_ec2.start_instances(InstanceIds=stopped_instances)
if wait:
# wait for instances to come up
waiter_inst_running = client_ec2.get_waiter('instance_running')
waiter_inst_running.wait(InstanceIds=stopped_instances)
# wait for status checks
waiter_status_ok = client_ec2.get_waiter('instance_status_ok')
waiter_status_ok.wait(InstanceIds=stopped_instances)
|
Helper to start ec2 instances
:param awsclient:
:param ec2_instances:
:param wait: waits for instances to start
:return:
|
entailment
|
def _filter_db_instances_by_status(awsclient, db_instances, status_list):
"""helper to select dbinstances.
:param awsclient:
:param db_instances:
:param status_list:
:return: list of db_instances that match the filter
"""
client_rds = awsclient.get_client('rds')
db_instances_with_status = []
for db in db_instances:
response = client_rds.describe_db_instances(
DBInstanceIdentifier=db
)
for entry in response.get('DBInstances', []):
if entry['DBInstanceStatus'] in status_list:
db_instances_with_status.append(db)
return db_instances_with_status
|
helper to select dbinstances.
:param awsclient:
:param db_instances:
:param status_list:
:return: list of db_instances that match the filter
|
entailment
|
def _stop_ecs_services(awsclient, services, template, parameters, wait=True):
"""Helper to change desiredCount of ECS services to zero.
By default it waits for this to complete.
Docs here: http://docs.aws.amazon.com/cli/latest/reference/ecs/update-service.html
:param awsclient:
:param services:
:param template: the cloudformation template
:param parameters: the parameters used for the cloudformation template
:param wait: waits for services to stop
:return:
"""
if len(services) == 0:
return
client_ecs = awsclient.get_client('ecs')
for service in services:
log.info('Resize ECS service \'%s\' to desiredCount=0',
service['LogicalResourceId'])
cluster, desired_count = _get_service_cluster_desired_count(
template, parameters, service['LogicalResourceId'])
log.debug('cluster: %s' % cluster)
response = client_ecs.update_service(
cluster=cluster,
service=service['PhysicalResourceId'],
desiredCount=0
)
|
Helper to change desiredCount of ECS services to zero.
By default it waits for this to complete.
Docs here: http://docs.aws.amazon.com/cli/latest/reference/ecs/update-service.html
:param awsclient:
:param services:
:param template: the cloudformation template
:param parameters: the parameters used for the cloudformation template
:param wait: waits for services to stop
:return:
|
entailment
|
def stop_stack(awsclient, stack_name, use_suspend=False):
"""Stop an existing stack on AWS cloud.
:param awsclient:
:param stack_name:
:param use_suspend: use suspend and resume on the autoscaling group
:return: exit_code
"""
exit_code = 0
# check for DisableStop
#disable_stop = conf.get('deployment', {}).get('DisableStop', False)
#if disable_stop:
# log.warn('\'DisableStop\' is set - nothing to do!')
#else:
if not stack_exists(awsclient, stack_name):
log.warn('Stack \'%s\' not deployed - nothing to do!', stack_name)
else:
client_cfn = awsclient.get_client('cloudformation')
client_autoscaling = awsclient.get_client('autoscaling')
client_rds = awsclient.get_client('rds')
client_ec2 = awsclient.get_client('ec2')
resources = all_pages(
client_cfn.list_stack_resources,
{ 'StackName': stack_name },
lambda r: r['StackResourceSummaries']
)
autoscaling_groups = [
r for r in resources
if r['ResourceType'] == 'AWS::AutoScaling::AutoScalingGroup'
]
# lookup all types of scaling processes
# [Launch, Terminate, HealthCheck, ReplaceUnhealthy, AZRebalance
# AlarmNotification, ScheduledActions, AddToLoadBalancer]
response = client_autoscaling.describe_scaling_process_types()
scaling_process_types = [t['ProcessName'] for t in response.get('Processes', [])]
for asg in autoscaling_groups:
# find instances in autoscaling group
ec2_instances = all_pages(
client_autoscaling.describe_auto_scaling_instances,
{},
lambda r: [i['InstanceId'] for i in r.get('AutoScalingInstances', [])
if i['AutoScalingGroupName'] == asg['PhysicalResourceId']],
)
if use_suspend:
# alternative implementation to speed up start
# only problem is that instances must survive stop & start
# suspend all autoscaling processes
log.info('Suspending all autoscaling processes for \'%s\'',
asg['LogicalResourceId'])
response = client_autoscaling.suspend_processes(
AutoScalingGroupName=asg['PhysicalResourceId'],
ScalingProcesses=scaling_process_types
)
_stop_ec2_instances(awsclient, ec2_instances)
else:
# resize autoscaling group (min, max = 0)
log.info('Resize autoscaling group \'%s\' to minSize=0, maxSize=0',
asg['LogicalResourceId'])
response = client_autoscaling.update_auto_scaling_group(
AutoScalingGroupName=asg['PhysicalResourceId'],
MinSize=0,
MaxSize=0
)
if ec2_instances:
running_instances = all_pages(
client_ec2.describe_instance_status,
{
'InstanceIds': ec2_instances,
'Filters': [{
'Name': 'instance-state-name',
'Values': ['pending', 'running']
}]
},
lambda r: [i['InstanceId'] for i in r.get('InstanceStatuses', [])],
)
if running_instances:
# wait for instances to terminate
waiter_inst_terminated = client_ec2.get_waiter('instance_terminated')
waiter_inst_terminated.wait(InstanceIds=running_instances)
# setting ECS desiredCount to zero
services = [
r for r in resources
if r['ResourceType'] == 'AWS::ECS::Service'
]
if services:
template, parameters = _get_template_parameters(awsclient, stack_name)
_stop_ecs_services(awsclient, services, template, parameters)
# stopping ec2 instances
instances = [
r['PhysicalResourceId'] for r in resources
if r['ResourceType'] == 'AWS::EC2::Instance'
]
_stop_ec2_instances(awsclient, instances)
# stopping db instances
db_instances = [
r['PhysicalResourceId'] for r in resources
if r['ResourceType'] == 'AWS::RDS::DBInstance'
]
running_db_instances = _filter_db_instances_by_status(
awsclient, db_instances, ['available']
)
for db in running_db_instances:
log.info('Stopping RDS instance \'%s\'', db)
client_rds.stop_db_instance(DBInstanceIdentifier=db)
return exit_code
|
Stop an existing stack on AWS cloud.
:param awsclient:
:param stack_name:
:param use_suspend: use suspend and resume on the autoscaling group
:return: exit_code
|
entailment
|
def _get_autoscaling_min_max(template, parameters, asg_name):
"""Helper to extract the configured MinSize, MaxSize attributes from the
template.
:param template: cloudformation template (json)
:param parameters: list of {'ParameterKey': 'x1', 'ParameterValue': 'y1'}
:param asg_name: logical resource name of the autoscaling group
:return: MinSize, MaxSize
"""
params = {e['ParameterKey']: e['ParameterValue'] for e in parameters}
asg = template.get('Resources', {}).get(asg_name, None)
if asg:
assert asg['Type'] == 'AWS::AutoScaling::AutoScalingGroup'
min = asg.get('Properties', {}).get('MinSize', None)
max = asg.get('Properties', {}).get('MaxSize', None)
if 'Ref' in min:
min = params.get(min['Ref'], None)
if 'Ref' in max:
max = params.get(max['Ref'], None)
if min and max:
return int(min), int(max)
|
Helper to extract the configured MinSize, MaxSize attributes from the
template.
:param template: cloudformation template (json)
:param parameters: list of {'ParameterKey': 'x1', 'ParameterValue': 'y1'}
:param asg_name: logical resource name of the autoscaling group
:return: MinSize, MaxSize
|
entailment
|
def _get_service_cluster_desired_count(template, parameters, service_name):
"""Helper to extract the configured desiredCount attribute from the
template.
:param template: cloudformation template (json)
:param parameters: list of {'ParameterKey': 'x1', 'ParameterValue': 'y1'}
:param service_name: logical resource name of the ECS service
:return: cluster, desiredCount
"""
params = {e['ParameterKey']: e['ParameterValue'] for e in parameters}
service = template.get('Resources', {}).get(service_name, None)
if service:
assert service['Type'] == 'AWS::ECS::Service'
cluster = service.get('Properties', {}).get('Cluster', None)
desired_count = service.get('Properties', {}).get('DesiredCount', None)
if 'Ref' in cluster:
cluster = params.get(cluster['Ref'], None)
if not isinstance(desired_count, int) and 'Ref' in desired_count:
desired_count = params.get(desired_count['Ref'], None)
return cluster, int(desired_count)
|
Helper to extract the configured desiredCount attribute from the
template.
:param template: cloudformation template (json)
:param parameters: list of {'ParameterKey': 'x1', 'ParameterValue': 'y1'}
:param service_name: logical resource name of the ECS service
:return: cluster, desiredCount
|
entailment
|
def start_stack(awsclient, stack_name, use_suspend=False):
"""Start an existing stack on AWS cloud.
:param awsclient:
:param stack_name:
:param use_suspend: use suspend and resume on the autoscaling group
:return: exit_code
"""
exit_code = 0
# check for DisableStop
#disable_stop = conf.get('deployment', {}).get('DisableStop', False)
#if disable_stop:
# log.warn('\'DisableStop\' is set - nothing to do!')
#else:
if not stack_exists(awsclient, stack_name):
log.warn('Stack \'%s\' not deployed - nothing to do!', stack_name)
else:
client_cfn = awsclient.get_client('cloudformation')
client_autoscaling = awsclient.get_client('autoscaling')
client_rds = awsclient.get_client('rds')
resources = all_pages(
client_cfn.list_stack_resources,
{ 'StackName': stack_name },
lambda r: r['StackResourceSummaries']
)
autoscaling_groups = [
r for r in resources
if r['ResourceType'] == 'AWS::AutoScaling::AutoScalingGroup'
]
# lookup all types of scaling processes
# [Launch, Terminate, HealthCheck, ReplaceUnhealthy, AZRebalance
# AlarmNotification, ScheduledActions, AddToLoadBalancer]
response = client_autoscaling.describe_scaling_process_types()
scaling_process_types = [t['ProcessName'] for t in response.get('Processes', [])]
# starting db instances
db_instances = [
r['PhysicalResourceId'] for r in resources
if r['ResourceType'] == 'AWS::RDS::DBInstance'
]
stopped_db_instances = _filter_db_instances_by_status(
awsclient, db_instances, ['stopped']
)
for db in stopped_db_instances:
log.info('Starting RDS instance \'%s\'', db)
client_rds.start_db_instance(DBInstanceIdentifier=db)
# wait for db instances to become available
for db in stopped_db_instances:
waiter_db_available = client_rds.get_waiter('db_instance_available')
waiter_db_available.wait(DBInstanceIdentifier=db)
# starting ec2 instances
instances = [
r['PhysicalResourceId'] for r in resources
if r['ResourceType'] == 'AWS::EC2::Instance'
]
_start_ec2_instances(awsclient, instances)
services = [
r for r in resources
if r['ResourceType'] == 'AWS::ECS::Service'
]
if (autoscaling_groups and not use_suspend) or services:
template, parameters = _get_template_parameters(awsclient, stack_name)
# setting ECS desiredCount back
if services:
_start_ecs_services(awsclient, services, template, parameters)
for asg in autoscaling_groups:
if use_suspend:
# alternative implementation to speed up start
# only problem is that instances must survive stop & start
# find instances in autoscaling group
instances = all_pages(
client_autoscaling.describe_auto_scaling_instances,
{},
lambda r: [i['InstanceId'] for i in r.get('AutoScalingInstances', [])
if i['AutoScalingGroupName'] == asg['PhysicalResourceId']],
)
_start_ec2_instances(awsclient, instances)
# resume all autoscaling processes
log.info('Resuming all autoscaling processes for \'%s\'',
asg['LogicalResourceId'])
response = client_autoscaling.resume_processes(
AutoScalingGroupName=asg['PhysicalResourceId'],
ScalingProcesses=scaling_process_types
)
else:
# resize autoscaling group back to its original values
log.info('Resize autoscaling group \'%s\' back to original values',
asg['LogicalResourceId'])
min, max = _get_autoscaling_min_max(
template, parameters, asg['LogicalResourceId'])
response = client_autoscaling.update_auto_scaling_group(
AutoScalingGroupName=asg['PhysicalResourceId'],
MinSize=min,
MaxSize=max
)
return exit_code
|
Start an existing stack on AWS cloud.
:param awsclient:
:param stack_name:
:param use_suspend: use suspend and resume on the autoscaling group
:return: exit_code
|
entailment
|
def is_running(self):
"""Property method that returns a bool specifying if the process is
currently running. This will return true if the state is active, idle
or initializing.
:rtype: bool
"""
return self._state in [self.STATE_ACTIVE,
self.STATE_IDLE,
self.STATE_INITIALIZING]
|
Property method that returns a bool specifying if the process is
currently running. This will return true if the state is active, idle
or initializing.
:rtype: bool
|
entailment
|
def process_signal(self, signum):
"""Invoked whenever a signal is added to the stack.
:param int signum: The signal that was added
"""
if signum == signal.SIGTERM:
LOGGER.info('Received SIGTERM, initiating shutdown')
self.stop()
elif signum == signal.SIGHUP:
LOGGER.info('Received SIGHUP')
if self.config.reload():
LOGGER.info('Configuration reloaded')
logging.config.dictConfig(self.config.logging)
self.on_configuration_reloaded()
elif signum == signal.SIGUSR1:
self.on_sigusr1()
elif signum == signal.SIGUSR2:
self.on_sigusr2()
|
Invoked whenever a signal is added to the stack.
:param int signum: The signal that was added
|
entailment
|
def run(self):
"""The core method for starting the application. Will setup logging,
toggle the runtime state flag, block on loop, then call shutdown.
Redefine this method if you intend to use an IO Loop or some other
long running process.
"""
LOGGER.info('%s v%s started', self.APPNAME, self.VERSION)
self.setup()
while not any([self.is_stopping, self.is_stopped]):
self.set_state(self.STATE_SLEEPING)
try:
signum = self.pending_signals.get(True, self.wake_interval)
except queue.Empty:
pass
else:
self.process_signal(signum)
if any([self.is_stopping, self.is_stopped]):
break
self.set_state(self.STATE_ACTIVE)
self.process()
|
The core method for starting the application. Will setup logging,
toggle the runtime state flag, block on loop, then call shutdown.
Redefine this method if you intend to use an IO Loop or some other
long running process.
|
entailment
|
def start(self):
"""Important:
Do not extend this method, rather redefine Controller.run
"""
for signum in [signal.SIGHUP, signal.SIGTERM,
signal.SIGUSR1, signal.SIGUSR2]:
signal.signal(signum, self._on_signal)
self.run()
|
Important:
Do not extend this method, rather redefine Controller.run
|
entailment
|
def set_state(self, state):
"""Set the runtime state of the Controller. Use the internal constants
to ensure proper state values:
- :attr:`Controller.STATE_INITIALIZING`
- :attr:`Controller.STATE_ACTIVE`
- :attr:`Controller.STATE_IDLE`
- :attr:`Controller.STATE_SLEEPING`
- :attr:`Controller.STATE_STOP_REQUESTED`
- :attr:`Controller.STATE_STOPPING`
- :attr:`Controller.STATE_STOPPED`
:param int state: The runtime state
:raises: ValueError
"""
if state == self._state:
return
elif state not in self._STATES.keys():
raise ValueError('Invalid state {}'.format(state))
# Check for invalid transitions
if self.is_waiting_to_stop and state not in [self.STATE_STOPPING,
self.STATE_STOPPED]:
LOGGER.warning('Attempt to set invalid state while waiting to '
'shutdown: %s ', self._STATES[state])
return
elif self.is_stopping and state != self.STATE_STOPPED:
LOGGER.warning('Attempt to set invalid post shutdown state: %s',
self._STATES[state])
return
elif self.is_running and state not in [self.STATE_ACTIVE,
self.STATE_IDLE,
self.STATE_SLEEPING,
self.STATE_STOP_REQUESTED,
self.STATE_STOPPING]:
LOGGER.warning('Attempt to set invalid post running state: %s',
self._STATES[state])
return
elif self.is_sleeping and state not in [self.STATE_ACTIVE,
self.STATE_IDLE,
self.STATE_STOP_REQUESTED,
self.STATE_STOPPING]:
LOGGER.warning('Attempt to set invalid post sleeping state: %s',
self._STATES[state])
return
LOGGER.debug('State changed from %s to %s',
self._STATES[self._state], self._STATES[state])
self._state = state
|
Set the runtime state of the Controller. Use the internal constants
to ensure proper state values:
- :attr:`Controller.STATE_INITIALIZING`
- :attr:`Controller.STATE_ACTIVE`
- :attr:`Controller.STATE_IDLE`
- :attr:`Controller.STATE_SLEEPING`
- :attr:`Controller.STATE_STOP_REQUESTED`
- :attr:`Controller.STATE_STOPPING`
- :attr:`Controller.STATE_STOPPED`
:param int state: The runtime state
:raises: ValueError
|
entailment
|
def stop(self):
"""Override to implement shutdown steps."""
LOGGER.info('Attempting to stop the process')
self.set_state(self.STATE_STOP_REQUESTED)
# Call shutdown for classes to add shutdown steps
self.shutdown()
# Wait for the current run to finish
while self.is_running and self.is_waiting_to_stop:
LOGGER.info('Waiting for the process to finish')
time.sleep(self.SLEEP_UNIT)
# Change the state to shutting down
if not self.is_stopping:
self.set_state(self.STATE_STOPPING)
# Call a method that may be overwritten to cleanly shutdown
self.on_shutdown()
# Change our state
self.set_state(self.STATE_STOPPED)
|
Override to implement shutdown steps.
|
entailment
|
def parse_ts(ts):
"""
parse timestamp.
:param ts: timestamp in ISO8601 format
:return: tbd!!!
"""
# ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
# ISO8601_MS = '%Y-%m-%dT%H:%M:%S.%fZ'
# RFC1123 = '%a, %d %b %Y %H:%M:%S %Z'
dt = maya.parse(ts.strip())
return dt.datetime(naive=True)
|
parse timestamp.
:param ts: timestamp in ISO8601 format
:return: tbd!!!
|
entailment
|
def get_outputs_for_stack(awsclient, stack_name):
"""
Read environment from ENV and mangle it to a (lower case) representation
Note: gcdt.servicediscovery get_outputs_for_stack((awsclient, stack_name)
is used in many cloudformation.py templates!
:param awsclient:
:param stack_name:
:return: dictionary containing the stack outputs
"""
client_cf = awsclient.get_client('cloudformation')
response = client_cf.describe_stacks(StackName=stack_name)
if response['Stacks'] and 'Outputs' in response['Stacks'][0]:
result = {}
for output in response['Stacks'][0]['Outputs']:
result[output['OutputKey']] = output['OutputValue']
return result
|
Read environment from ENV and mangle it to a (lower case) representation
Note: gcdt.servicediscovery get_outputs_for_stack((awsclient, stack_name)
is used in many cloudformation.py templates!
:param awsclient:
:param stack_name:
:return: dictionary containing the stack outputs
|
entailment
|
def get_base_ami(awsclient, owners):
"""
DEPRECATED!!!
return the latest version of our base AMI
we can't use tags for this, so we have only the name as resource
note: this functionality is deprecated since this only works for "old"
baseami.
"""
client_ec2 = awsclient.get_client('ec2')
image_filter = [
{
'Name': 'state',
'Values': [
'available',
]
},
]
latest_ts = maya.MayaDT(0).datetime(naive=True)
latest_version = StrictVersion('0.0.0')
latest_id = None
for i in client_ec2.describe_images(
Owners=owners,
Filters=image_filter
)['Images']:
m = re.search(r'(Ops_Base-Image)_(\d+.\d+.\d+)_(\d+)$', i['Name'])
if m:
version = StrictVersion(m.group(2))
#timestamp = m.group(3)
creation_date = parse_ts(i['CreationDate'])
if creation_date > latest_ts and version >=latest_version:
latest_id = i['ImageId']
latest_ts = creation_date
latest_version = version
return latest_id
|
DEPRECATED!!!
return the latest version of our base AMI
we can't use tags for this, so we have only the name as resource
note: this functionality is deprecated since this only works for "old"
baseami.
|
entailment
|
def _add_default_arguments(parser):
"""Add the default arguments to the parser.
:param argparse.ArgumentParser parser: The argument parser
"""
parser.add_argument('-c', '--config', action='store', dest='config',
help='Path to the configuration file')
parser.add_argument('-f', '--foreground', action='store_true', dest='foreground',
help='Run the application interactively')
|
Add the default arguments to the parser.
:param argparse.ArgumentParser parser: The argument parser
|
entailment
|
def _str(val):
"""
Ensures that the val is the default str() type for python2 or 3
"""
if str == bytes:
if isinstance(val, str):
return val
else:
return str(val)
else:
if isinstance(val, str):
return val
else:
return str(val, 'ascii')
|
Ensures that the val is the default str() type for python2 or 3
|
entailment
|
def _prepare_long_request(self, url, api_query):
"""
Use requests.Request and requests.PreparedRequest to produce the
body (and boundary value) of a multipart/form-data; POST request as
detailed in https://www.mediawiki.org/wiki/API:Edit#Large_texts
"""
partlist = []
for k, v in iteritems(api_query):
if k in ('title', 'text', 'summary'):
# title, text and summary values in the request
# should be utf-8 encoded
part = (k,
(None, v.encode('utf-8'),
'text/plain; charset=UTF-8',
{'Content-Transfer-Encoding': '8bit'}
)
)
else:
part = (k, (None, v))
partlist.append(part)
auth1 = OAuth1(
self.consumer_token.key,
client_secret=self.consumer_token.secret,
resource_owner_key=session['mwoauth_access_token']['key'],
resource_owner_secret=session['mwoauth_access_token']['secret'])
return Request(
url=url, files=partlist, auth=auth1, method="post").prepare()
|
Use requests.Request and requests.PreparedRequest to produce the
body (and boundary value) of a multipart/form-data; POST request as
detailed in https://www.mediawiki.org/wiki/API:Edit#Large_texts
|
entailment
|
def request(self, api_query, url=None):
"""
e.g. {'action': 'query', 'meta': 'userinfo'}. format=json not required
function returns a python dict that resembles the api's json response
"""
api_query['format'] = 'json'
if url is not None:
api_url = url + "/api.php"
else:
api_url = self.api_url
size = sum([sys.getsizeof(v) for k, v in iteritems(api_query)])
if size > (1024 * 8):
# if request is bigger than 8 kB (the limit is somewhat arbitrary,
# see https://www.mediawiki.org/wiki/API:Edit#Large_texts) then
# transmit as multipart message
req = self._prepare_long_request(url=api_url,
api_query=api_query)
req.send()
if self.return_json:
return req.response.json()
else:
return req.response.text
else:
auth1 = OAuth1(
self.consumer_token.key,
client_secret=self.consumer_token.secret,
resource_owner_key=session['mwoauth_access_token']['key'],
resource_owner_secret=session['mwoauth_access_token']['secret'])
if self.return_json:
return requests.post(api_url, data=api_query, auth=auth1).json()
else:
return requests.post(api_url, data=api_query, auth=auth1).text
|
e.g. {'action': 'query', 'meta': 'userinfo'}. format=json not required
function returns a python dict that resembles the api's json response
|
entailment
|
def dump(pif, fp, **kwargs):
"""
Convert a single Physical Information Object, or a list of such objects, into a JSON-encoded text file.
:param pif: Object or list of objects to serialize.
:param fp: File-like object supporting .write() method to write the serialized object(s) to.
:param kwargs: Any options available to json.dump().
"""
return json.dump(pif, fp, cls=PifEncoder, **kwargs)
|
Convert a single Physical Information Object, or a list of such objects, into a JSON-encoded text file.
:param pif: Object or list of objects to serialize.
:param fp: File-like object supporting .write() method to write the serialized object(s) to.
:param kwargs: Any options available to json.dump().
|
entailment
|
def load(fp, class_=None, **kwargs):
"""
Convert content in a JSON-encoded text file to a Physical Information Object or a list of such objects.
:param fp: File-like object supporting .read() method to deserialize from.
:param class_: Subclass of :class:`.Pio` to produce, if not unambiguous
:param kwargs: Any options available to json.load().
:return: Single object derived from :class:`.Pio` or a list of such object.
"""
return loado(json.load(fp, **kwargs), class_=class_)
|
Convert content in a JSON-encoded text file to a Physical Information Object or a list of such objects.
:param fp: File-like object supporting .read() method to deserialize from.
:param class_: Subclass of :class:`.Pio` to produce, if not unambiguous
:param kwargs: Any options available to json.load().
:return: Single object derived from :class:`.Pio` or a list of such object.
|
entailment
|
def loads(s, class_=None, **kwargs):
"""
Convert content in a JSON-encoded string to a Physical Information Object or a list of such objects.
:param s: String to deserialize.
:param class_: Subclass of :class:`.Pio` to produce, if not unambiguous
:param kwargs: Any options available to json.loads().
:return: Single object derived from :class:`.Pio` or a list of such object.
"""
return loado(json.loads(s, **kwargs), class_=class_)
|
Convert content in a JSON-encoded string to a Physical Information Object or a list of such objects.
:param s: String to deserialize.
:param class_: Subclass of :class:`.Pio` to produce, if not unambiguous
:param kwargs: Any options available to json.loads().
:return: Single object derived from :class:`.Pio` or a list of such object.
|
entailment
|
def loado(obj, class_=None):
"""
Convert a dictionary or a list of dictionaries into a single Physical Information Object or a list of such objects.
:param obj: Dictionary or list to convert to Physical Information Objects.
:param class_: Subclass of :class:`.Pio` to produce, if not unambiguous
:return: Single object derived from :class:`.Pio` or a list of such object.
"""
if isinstance(obj, list):
return [_dict_to_pio(i, class_=class_) for i in obj]
elif isinstance(obj, dict):
return _dict_to_pio(obj, class_=class_)
else:
raise ValueError('expecting list or dictionary as outermost structure')
|
Convert a dictionary or a list of dictionaries into a single Physical Information Object or a list of such objects.
:param obj: Dictionary or list to convert to Physical Information Objects.
:param class_: Subclass of :class:`.Pio` to produce, if not unambiguous
:return: Single object derived from :class:`.Pio` or a list of such object.
|
entailment
|
def _dict_to_pio(d, class_=None):
"""
Convert a single dictionary object to a Physical Information Object.
:param d: Dictionary to convert.
:param class_: Subclass of :class:`.Pio` to produce, if not unambiguous
:return: Single object derived from :class:`.Pio`.
"""
d = keys_to_snake_case(d)
if class_:
return class_(**d)
if 'category' not in d:
raise ValueError('Dictionary does not contains a category field: ' + ', '.join(d.keys()))
elif d['category'] == 'system':
return System(**d)
elif d['category'] == 'system.chemical':
return ChemicalSystem(**d)
elif d['category'] == 'system.chemical.alloy': # Legacy support
return Alloy(**d)
elif d['category'] == 'system.chemical.alloy.phase': # Legacy support
return ChemicalSystem(**d)
raise ValueError('Dictionary does not contain a valid top-level category: ' + str(d['category']))
|
Convert a single dictionary object to a Physical Information Object.
:param d: Dictionary to convert.
:param class_: Subclass of :class:`.Pio` to produce, if not unambiguous
:return: Single object derived from :class:`.Pio`.
|
entailment
|
def get_command(arguments):
"""Utility function to extract command from docopt arguments.
:param arguments:
:return: command
"""
cmds = list(filter(lambda k: not (k.startswith('-') or
k.startswith('<')) and arguments[k],
arguments.keys()))
if len(cmds) != 1:
raise Exception('invalid command line!')
return cmds[0]
|
Utility function to extract command from docopt arguments.
:param arguments:
:return: command
|
entailment
|
def dispatch(cls, arguments, **kwargs):
"""Dispatch arguments parsed by docopt to the cmd with matching spec.
:param arguments:
:param kwargs:
:return: exit_code
"""
# first match wins
# spec: all '-' elements must match, all others are False;
# '<sth>' elements are converted to call args on order of
# appearance
#
# kwargs are provided to dispatch call and used in func call
for spec, func in cls._specs:
# if command and arguments.get(command) and match(args):
args = [] # specified args in order of appearance
options = list(filter(lambda k: k.startswith('-') and
(arguments[k] or k in spec),
arguments.keys()))
cmds = list(filter(lambda k: not (k.startswith('-') or
k.startswith('<')) and arguments[k],
arguments.keys()))
args_spec = list(filter(lambda k: k.startswith('<'), spec))
cmd_spec = list(filter(lambda k: not (k.startswith('-') or
k.startswith('<')), spec))
for element in spec:
if element.startswith('-'):
# element is an option
if element in options:
args.append(arguments.get(element, False))
options.remove(element)
elif element.startswith('<') and \
not arguments.get(element) is False:
# element is an argument
args.append(arguments.get(element))
if element in args_spec:
args_spec.remove(element)
else:
# element is a command
if element in cmds and element in cmd_spec:
cmds.remove(element)
cmd_spec.remove(element)
if options:
continue # not all options have been matched
if cmds:
continue # not all cmds from command line have been matched
if args_spec:
continue # not all args from spec have been provided
if cmd_spec:
continue # not all cmds from spec have been provided
# all options and cmds matched : call the cmd
# TODO leave out all args to deal with "empty" signature
exit_code = func(*args, **kwargs)
return exit_code
# no matching spec found
raise Exception('No implementation for spec: %s' % arguments)
|
Dispatch arguments parsed by docopt to the cmd with matching spec.
:param arguments:
:param kwargs:
:return: exit_code
|
entailment
|
def get_att(self, parameter, as_reference=True):
"""Retrieves an attribute from an existing stack
:param parameter: The output parameter which should be retrieved
:param as_reference: Is the parameter a reference (Default) or a string
:return: Value of parameter to retrieve
"""
if as_reference:
return troposphere.GetAtt(
self.__custom_stack_obj,
troposphere.Ref(parameter)
)
else:
return troposphere.GetAtt(
self.__custom_stack_obj,
parameter
)
|
Retrieves an attribute from an existing stack
:param parameter: The output parameter which should be retrieved
:param as_reference: Is the parameter a reference (Default) or a string
:return: Value of parameter to retrieve
|
entailment
|
def _get_notification_spec(self, lambda_arn):
lambda_name = base.get_lambda_name(lambda_arn)
notification_spec = {
'Id': self._make_notification_id(lambda_name),
'Events': [e for e in self._config['events']],
'LambdaFunctionArn': lambda_arn
}
# Add S3 key filters
filter_rules = []
# look for filter rules
for filter_type in ['prefix', 'suffix']:
if filter_type in self._config:
rule = {'Name': filter_type.capitalize(), 'Value': self._config[filter_type] }
filter_rules.append(rule)
if filter_rules:
notification_spec['Filter'] = {'Key': {'FilterRules': filter_rules } }
'''
if 'key_filters' in self._config:
filters_spec = {'Key': {'FilterRules': [] } }
# I do not think this is a useful structure:
for filter in self._config['key_filters']:
if 'type' in filter and 'value' in filter and filter['type'] in ('prefix', 'suffix'):
rule = {'Name': filter['type'].capitalize(), 'Value': filter['value'] }
filters_spec['Key']['FilterRules'].append(rule)
notification_spec['Filter'] = filters_spec
'''
return notification_spec
|
if 'key_filters' in self._config:
filters_spec = {'Key': {'FilterRules': [] } }
# I do not think this is a useful structure:
for filter in self._config['key_filters']:
if 'type' in filter and 'value' in filter and filter['type'] in ('prefix', 'suffix'):
rule = {'Name': filter['type'].capitalize(), 'Value': filter['value'] }
filters_spec['Key']['FilterRules'].append(rule)
notification_spec['Filter'] = filters_spec
|
entailment
|
def convert_representation(self, i):
"""
Return the proper representation for the given integer
"""
if self.number_representation == 'unsigned':
return i
elif self.number_representation == 'signed':
if i & (1 << self.interpreter._bit_width - 1):
return -((~i + 1) & (2**self.interpreter._bit_width - 1))
else:
return i
elif self.number_representation == 'hex':
return hex(i)
|
Return the proper representation for the given integer
|
entailment
|
def magic_generate_random(self, line):
"""
Set the generate random flag, unset registers and memory will return a random value.
Usage:
Call the magic by itself or with `true` to have registers and memory return a random value
if they are unset and read from, much like how real hardware would work.
Defaults to False, or to not generate random values
`%generate_random`
or
`%generate_random true`
or
`%generate_random false`
"""
line = line.strip().lower()
if not line or line == 'true':
self.interpreter.generate_random = True
elif line == 'false':
self.interpreter.generate_random = False
else:
stream_content = {'name': 'stderr', 'text': "unknwon value '{}'".format(line)}
self.send_response(self.iopub_socket, 'stream', stream_content)
return {'status': 'error',
'execution_count': self.execution_count,
'ename': ValueError.__name__,
'evalue': "unknwon value '{}'".format(line),
'traceback': '???'}
|
Set the generate random flag, unset registers and memory will return a random value.
Usage:
Call the magic by itself or with `true` to have registers and memory return a random value
if they are unset and read from, much like how real hardware would work.
Defaults to False, or to not generate random values
`%generate_random`
or
`%generate_random true`
or
`%generate_random false`
|
entailment
|
def magic_postpone_execution(self, line):
"""
Postpone execution of instructions until explicitly run
Usage:
Call this magic with `true` or nothing to postpone execution,
or call with `false` to execute each instruction when evaluated.
This defaults to True.
Note that each cell is executed only executed after all lines in
the cell have been evaluated properly.
`%postpone_execution`
or
`%postpone_execution true`
or
`%postpone_execution false`
"""
line = line.strip().lower()
if not line or line == 'true':
self.interpreter.postpone_execution = True
elif line == 'false':
self.interpreter.postpone_execution = False
else:
stream_content = {'name': 'stderr', 'text': "unknwon value '{}'".format(line)}
self.send_response(self.iopub_socket, 'stream', stream_content)
return {'status': 'error',
'execution_count': self.execution_count,
'ename': ValueError.__name__,
'evalue': "unknwon value '{}'".format(line),
'traceback': '???'}
|
Postpone execution of instructions until explicitly run
Usage:
Call this magic with `true` or nothing to postpone execution,
or call with `false` to execute each instruction when evaluated.
This defaults to True.
Note that each cell is executed only executed after all lines in
the cell have been evaluated properly.
`%postpone_execution`
or
`%postpone_execution true`
or
`%postpone_execution false`
|
entailment
|
def magic_register(self, line):
"""
Print out the current value of a register
Usage:
Pass in the register, or a list of registers separated by spaces
A list of registeres can be entered by separating them by a hyphen
`%reg R1`
or
`%reg R0 R5 R6`
or
`%reg R8-R12`
"""
message = ""
for reg in [i.strip() for i in line.replace(',', '').split()]:
if '-' in reg:
# We have a range (Rn-Rk)
r1, r2 = reg.split('-')
# TODO do we want to allow just numbers?
n1 = re.search(self.interpreter.REGISTER_REGEX, r1).groups()[0]
n2 = re.search(self.interpreter.REGISTER_REGEX, r2).groups()[0]
n1 = self.interpreter.convert_to_integer(n1)
n2 = self.interpreter.convert_to_integer(n2)
for i in range(n1, n2+1):
val = self.interpreter.register[r1[0] + str(i)]
val = self.convert_representation(val)
message += "{}: {}\n".format(r1[0] + str(i), val)
else:
val = self.interpreter.register[reg]
val = self.convert_representation(val)
message += "{}: {}\n".format(reg, val)
stream_content = {'name': 'stdout', 'text': message}
self.send_response(self.iopub_socket, 'stream', stream_content)
|
Print out the current value of a register
Usage:
Pass in the register, or a list of registers separated by spaces
A list of registeres can be entered by separating them by a hyphen
`%reg R1`
or
`%reg R0 R5 R6`
or
`%reg R8-R12`
|
entailment
|
def magic_memory(self, line):
"""
Print out the current value of memory
Usage:
Pass in the byte of memory to read, separated by spaced
A list of memory contents can be entered by separating them by a hyphen
`%mem 4 5`
or
`%mem 8-12`
"""
# TODO add support for directives
message = ""
for address in [i.strip() for i in line.replace(',', '').split()]:
if '-' in address:
# We have a range (n-k)
m1, m2 = address.split('-')
n1 = re.search(self.interpreter.IMMEDIATE_NUMBER, m1).groups()[0]
n2 = re.search(self.interpreter.IMMEDIATE_NUMBER, m2).groups()[0]
n1 = self.interpreter.convert_to_integer(n1)
n2 = self.interpreter.convert_to_integer(n2)
for i in range(n1, n2 + 1):
val = self.interpreter.memory[i]
val = self.convert_representation(val)
message += "{}: {}\n".format(str(i), val)
else:
# TODO fix what is the key for memory (currently it's an int, but registers are strings, should it be the same?)
val = self.interpreter.memory[self.interpreter.convert_to_integer(address)]
val = self.convert_representation(val)
message += "{}: {}\n".format(address, val)
stream_content = {'name': 'stdout', 'text': message}
self.send_response(self.iopub_socket, 'stream', stream_content)
|
Print out the current value of memory
Usage:
Pass in the byte of memory to read, separated by spaced
A list of memory contents can be entered by separating them by a hyphen
`%mem 4 5`
or
`%mem 8-12`
|
entailment
|
def magic_run(self, line):
"""
Run the current program
Usage:
Call with a numbe rto run that many steps,
or call with no arguments to run to the end of the current program
`%run`
or
`%run 1`
"""
i = float('inf')
if line.strip():
i = int(line)
try:
with warnings.catch_warnings(record=True) as w:
self.interpreter.run(i)
for warning_message in w:
# TODO should this be stdout or stderr
stream_content = {'name': 'stdout', 'text': 'Warning: ' + str(warning_message.message) + '\n'}
self.send_response(self.iopub_socket, 'stream', stream_content)
except iarm.exceptions.EndOfProgram as e:
f_name = self.interpreter.program[self.interpreter.register['PC'] - 1].__name__
f_name = f_name[:f_name.find('_')]
message = "Error in {}: ".format(f_name)
stream_content = {'name': 'stdout', 'text': message + str(e) + '\n'}
self.send_response(self.iopub_socket, 'stream', stream_content)
except Exception as e:
for err in e.args:
stream_content = {'name': 'stderr', 'text': str(err)}
self.send_response(self.iopub_socket, 'stream', stream_content)
return {'status': 'error',
'execution_count': self.execution_count,
'ename': type(e).__name__,
'evalue': str(e),
'traceback': '???'}
|
Run the current program
Usage:
Call with a numbe rto run that many steps,
or call with no arguments to run to the end of the current program
`%run`
or
`%run 1`
|
entailment
|
def magic_help(self, line):
"""
Print out the help for magics
Usage:
Call help with no arguments to list all magics,
or call it with a magic to print out it's help info.
`%help`
or
`%help run
"""
line = line.strip()
if not line:
for magic in self.magics:
stream_content = {'name': 'stdout', 'text': "%{}\n".format(magic)}
self.send_response(self.iopub_socket, 'stream', stream_content)
elif line in self.magics:
# its a magic
stream_content = {'name': 'stdout', 'text': "{}\n{}".format(line, self.magics[line].__doc__)}
self.send_response(self.iopub_socket, 'stream', stream_content)
elif line in self.interpreter.ops:
# it's an instruction
stream_content = {'name': 'stdout', 'text': "{}\n{}".format(line, self.interpreter.ops[line].__doc__)}
self.send_response(self.iopub_socket, 'stream', stream_content)
else:
stream_content = {'name': 'stderr', 'text': "'{}' not a known magic or instruction".format(line)}
self.send_response(self.iopub_socket, 'stream', stream_content)
|
Print out the help for magics
Usage:
Call help with no arguments to list all magics,
or call it with a magic to print out it's help info.
`%help`
or
`%help run
|
entailment
|
def export_to_swagger(awsclient, api_name, stage_name, api_description,
lambdas, custom_hostname=False, custom_base_path=False):
"""Export the API design as swagger file.
:param api_name:
:param stage_name:
:param api_description:
:param lambdas:
:param custom_hostname:
:param custom_base_path:
"""
print('Exporting to swagger...')
api = _api_by_name(awsclient, api_name)
if api is not None:
print(json2table(api))
api_id = api['id']
client_api = awsclient.get_client('apigateway')
template_variables = _template_variables_to_dict(
client_api,
api_name,
api_description,
stage_name,
api_id,
lambdas,
custom_hostname,
custom_base_path)
content = _compile_template(SWAGGER_FILE, template_variables)
swagger_file = open('swagger_export.yaml', 'w')
swagger_file.write(content)
else:
print('API name unknown')
|
Export the API design as swagger file.
:param api_name:
:param stage_name:
:param api_description:
:param lambdas:
:param custom_hostname:
:param custom_base_path:
|
entailment
|
def list_apis(awsclient):
"""List APIs in account."""
client_api = awsclient.get_client('apigateway')
apis = client_api.get_rest_apis()['items']
for api in apis:
print(json2table(api))
|
List APIs in account.
|
entailment
|
def deploy_api(awsclient, api_name, api_description, stage_name, api_key,
lambdas, cache_cluster_enabled, cache_cluster_size, method_settings=None):
"""Deploy API Gateway to AWS cloud.
:param awsclient:
:param api_name:
:param api_description:
:param stage_name:
:param api_key:
:param lambdas:
:param cache_cluster_enabled:
:param cache_cluster_size:
:param method_settings:
"""
if not _api_exists(awsclient, api_name):
if os.path.isfile(SWAGGER_FILE):
# this does an import from swagger file
# the next step does not make sense since there is a check in
# _import_from_swagger for if api is existent!
# _create_api(api_name=api_name, api_description=api_description)
_import_from_swagger(awsclient, api_name, api_description,
stage_name, lambdas)
else:
print('No swagger file (%s) found' % SWAGGER_FILE)
api = _api_by_name(awsclient, api_name)
if api is not None:
_ensure_lambdas_permissions(awsclient, lambdas, api)
_create_deployment(awsclient, api_name, stage_name, cache_cluster_enabled, cache_cluster_size)
_update_stage(awsclient, api['id'], stage_name, method_settings)
_wire_api_key(awsclient, api_name, api_key, stage_name)
else:
print('API name unknown')
else:
if os.path.isfile(SWAGGER_FILE):
_update_from_swagger(awsclient, api_name, api_description,
stage_name, lambdas)
else:
_update_api()
api = _api_by_name(awsclient, api_name)
if api is not None:
_ensure_lambdas_permissions(awsclient, lambdas, api)
_create_deployment(awsclient, api_name, stage_name, cache_cluster_enabled, cache_cluster_size)
_update_stage(awsclient, api['id'], stage_name, method_settings)
else:
print('API name unknown')
|
Deploy API Gateway to AWS cloud.
:param awsclient:
:param api_name:
:param api_description:
:param stage_name:
:param api_key:
:param lambdas:
:param cache_cluster_enabled:
:param cache_cluster_size:
:param method_settings:
|
entailment
|
def delete_api(awsclient, api_name):
"""Delete the API.
:param api_name:
"""
_sleep()
client_api = awsclient.get_client('apigateway')
print('deleting api: %s' % api_name)
api = _api_by_name(awsclient, api_name)
if api is not None:
print(json2table(api))
response = client_api.delete_rest_api(
restApiId=api['id']
)
print(json2table(response))
else:
print('API name unknown')
|
Delete the API.
:param api_name:
|
entailment
|
def create_api_key(awsclient, api_name, api_key_name):
"""Create a new API key as reference for api.conf.
:param api_name:
:param api_key_name:
:return: api_key
"""
_sleep()
client_api = awsclient.get_client('apigateway')
print('create api key: %s' % api_key_name)
response = client_api.create_api_key(
name=api_key_name,
description='Created for ' + api_name,
enabled=True
)
#print(json2table(response))
print('Add this api key \'%s\' to your api.conf' % response['id'])
return response['id']
|
Create a new API key as reference for api.conf.
:param api_name:
:param api_key_name:
:return: api_key
|
entailment
|
def delete_api_key(awsclient, api_key):
"""Remove API key.
:param api_key:
"""
_sleep()
client_api = awsclient.get_client('apigateway')
print('delete api key: %s' % api_key)
response = client_api.delete_api_key(
apiKey=api_key
)
print(json2table(response))
|
Remove API key.
:param api_key:
|
entailment
|
def list_api_keys(awsclient):
"""Print the defined API keys.
"""
_sleep()
client_api = awsclient.get_client('apigateway')
print('listing api keys')
response = client_api.get_api_keys()['items']
for item in response:
print(json2table(item))
|
Print the defined API keys.
|
entailment
|
def deploy_custom_domain(awsclient, api_name, api_target_stage,
api_base_path, domain_name, route_53_record,
cert_name, cert_arn, hosted_zone_id, ensure_cname):
"""Add custom domain to your API.
:param api_name:
:param api_target_stage:
:param api_base_path:
:param domain_name:
:param route_53_record:
:param ssl_cert:
:param cert_name:
:param cert_arn:
:param hosted_zone_id:
:return: exit_code
"""
api_base_path = _basepath_to_string_if_null(api_base_path)
api = _api_by_name(awsclient, api_name)
if not api:
print("Api %s does not exist, aborting..." % api_name)
# exit(1)
return 1
domain = _custom_domain_name_exists(awsclient, domain_name)
if not domain:
response = _create_custom_domain(awsclient, domain_name, cert_name, cert_arn)
cloudfront_distribution = response['distributionDomainName']
else:
response = _update_custom_domain(awsclient, domain_name, cert_name, cert_arn)
cloudfront_distribution = response['distributionDomainName']
if _base_path_mapping_exists(awsclient, domain_name, api_base_path):
_ensure_correct_base_path_mapping(awsclient, domain_name,
api_base_path, api['id'],
api_target_stage)
else:
_create_base_path_mapping(awsclient, domain_name, api_base_path,
api_target_stage, api['id'])
if ensure_cname:
record_exists, record_correct = \
_record_exists_and_correct(awsclient, hosted_zone_id,
route_53_record,
cloudfront_distribution)
if record_correct:
print('Route53 record correctly set: %s --> %s' % (route_53_record,
cloudfront_distribution))
else:
_ensure_correct_route_53_record(awsclient, hosted_zone_id,
record_name=route_53_record,
record_value=cloudfront_distribution)
print('Route53 record set: %s --> %s' % (route_53_record,
cloudfront_distribution))
else:
print('Skipping creating and checking DNS record')
return 0
|
Add custom domain to your API.
:param api_name:
:param api_target_stage:
:param api_base_path:
:param domain_name:
:param route_53_record:
:param ssl_cert:
:param cert_name:
:param cert_arn:
:param hosted_zone_id:
:return: exit_code
|
entailment
|
def get_lambdas(awsclient, config, add_arn=False):
"""Get the list of lambda functions.
:param config:
:param add_arn:
:return: list containing lambda entries
"""
if 'lambda' in config:
client_lambda = awsclient.get_client('lambda')
lambda_entries = config['lambda'].get('entries', [])
lmbdas = []
for lambda_entry in lambda_entries:
lmbda = {
'name': lambda_entry.get('name', None),
'alias': lambda_entry.get('alias', None),
'swagger_ref': lambda_entry.get('swaggerRef', None)
}
if add_arn:
_sleep()
response_lambda = client_lambda.get_function(
FunctionName=lmbda['name'])
lmbda['arn'] = response_lambda['Configuration']['FunctionArn']
lmbdas.append(lmbda)
return lmbdas
else:
return []
|
Get the list of lambda functions.
:param config:
:param add_arn:
:return: list containing lambda entries
|
entailment
|
def _update_stage(awsclient, api_id, stage_name, method_settings):
"""Helper to apply method_settings to stage
:param awsclient:
:param api_id:
:param stage_name:
:param method_settings:
:return:
"""
# settings docs in response: https://botocore.readthedocs.io/en/latest/reference/services/apigateway.html#APIGateway.Client.update_stage
client_api = awsclient.get_client('apigateway')
operations = _convert_method_settings_into_operations(method_settings)
if operations:
print('update method settings for stage')
_sleep()
response = client_api.update_stage(
restApiId=api_id,
stageName=stage_name,
patchOperations=operations)
|
Helper to apply method_settings to stage
:param awsclient:
:param api_id:
:param stage_name:
:param method_settings:
:return:
|
entailment
|
def _convert_method_settings_into_operations(method_settings=None):
"""Helper to handle the conversion of method_settings to operations
:param method_settings:
:return: list of operations
"""
# operations docs here: https://tools.ietf.org/html/rfc6902#section-4
operations = []
if method_settings:
for method in method_settings.keys():
for key, value in method_settings[method].items():
if isinstance(value, bool):
if value:
value = 'true'
else:
value = 'false'
operations.append({
'op': 'replace',
'path': method + _resolve_key(key),
'value': value
})
return operations
|
Helper to handle the conversion of method_settings to operations
:param method_settings:
:return: list of operations
|
entailment
|
def generate_settings():
"""
This command is run when ``default_path`` doesn't exist, or ``init`` is
run and returns a string representing the default data to put into their
settings file.
"""
conf_file = os.path.join(os.path.dirname(base_settings.__file__),
'example', 'conf.py')
conf_template = open(conf_file).read()
default_url = 'http://salmon.example.com'
site_url = raw_input("What will be the URL for Salmon? [{0}]".format(
default_url))
site_url = site_url or default_url
secret_key = base64.b64encode(os.urandom(KEY_LENGTH))
api_key = base64.b64encode(os.urandom(KEY_LENGTH))
output = conf_template.format(api_key=api_key, secret_key=secret_key,
site_url=site_url)
return output
|
This command is run when ``default_path`` doesn't exist, or ``init`` is
run and returns a string representing the default data to put into their
settings file.
|
entailment
|
def configure_app(**kwargs):
"""Builds up the settings using the same method as logan"""
sys_args = sys.argv
args, command, command_args = parse_args(sys_args[1:])
parser = OptionParser()
parser.add_option('--config', metavar='CONFIG')
(options, logan_args) = parser.parse_args(args)
config_path = options.config
logan_configure(config_path=config_path, **kwargs)
|
Builds up the settings using the same method as logan
|
entailment
|
def _reset_changes(self):
"""Stores current values for comparison later"""
self._original = {}
if self.last_updated is not None:
self._original['last_updated'] = self.last_updated
|
Stores current values for comparison later
|
entailment
|
def whisper_filename(self):
"""Build a file path to the Whisper database"""
source_name = self.source_id and self.source.name or ''
return get_valid_filename("{0}__{1}.wsp".format(source_name,
self.name))
|
Build a file path to the Whisper database
|
entailment
|
def get_value_display(self):
"""Human friendly value output"""
if self.display_as == 'percentage':
return '{0}%'.format(self.latest_value)
if self.display_as == 'boolean':
return bool(self.latest_value)
if self.display_as == 'byte':
return defaultfilters.filesizeformat(self.latest_value)
if self.display_as == 'second':
return time.strftime('%H:%M:%S', time.gmtime(self.latest_value))
return self.latest_value
|
Human friendly value output
|
entailment
|
def time_between_updates(self):
"""Time between current `last_updated` and previous `last_updated`"""
if 'last_updated' not in self._original:
return 0
last_update = self._original['last_updated']
this_update = self.last_updated
return this_update - last_update
|
Time between current `last_updated` and previous `last_updated`
|
entailment
|
def do_transform(self):
"""Apply the transformation (if it exists) to the latest_value"""
if not self.transform:
return
try:
self.latest_value = utils.Transform(
expr=self.transform, value=self.latest_value,
timedelta=self.time_between_updates().total_seconds()).result()
except (TypeError, ValueError):
logger.warn("Invalid transformation '%s' for metric %s",
self.transfrom, self.pk)
self.transform = ''
|
Apply the transformation (if it exists) to the latest_value
|
entailment
|
def do_counter_conversion(self):
"""Update latest value to the diff between it and the previous value"""
if self.is_counter:
if self._previous_counter_value is None:
prev_value = self.latest_value
else:
prev_value = self._previous_counter_value
self._previous_counter_value = self.latest_value
self.latest_value = self.latest_value - prev_value
|
Update latest value to the diff between it and the previous value
|
entailment
|
def source_group_receiver(self, sender, source, signal, **kwargs):
"""
Relay source group signals to the appropriate spec strategy.
"""
from imagekit.cachefiles import ImageCacheFile
source_group = sender
# Ignore signals from unregistered groups.
if source_group not in self._source_groups:
return
#OVERRIDE HERE -- pass specs into generator object
specs = [generator_registry.get(id, source=source, specs=spec_data_field_hash[id]) for id in
self._source_groups[source_group]]
callback_name = self._signals[signal]
#END OVERRIDE
for spec in specs:
file = ImageCacheFile(spec)
call_strategy_method(file, callback_name)
|
Relay source group signals to the appropriate spec strategy.
|
entailment
|
def replace_variable(self, variable):
"""Substitute variables with numeric values"""
if variable == 'x':
return self.value
if variable == 't':
return self.timedelta
raise ValueError("Invalid variable %s", variable)
|
Substitute variables with numeric values
|
entailment
|
def result(self):
"""Evaluate expression and return result"""
# Module(body=[Expr(value=...)])
return self.eval_(ast.parse(self.expr).body[0].value)
|
Evaluate expression and return result
|
entailment
|
def get_people(self, user_alias=None):
"""
获取用户信息
:param user_alias: 用户ID
:return:
"""
user_alias = user_alias or self.api.user_alias
content = self.api.req(API_PEOPLE_HOME % user_alias).content
xml = self.api.to_xml(re.sub(b'<br />', b'\n', content))
try:
xml_user = xml.xpath('//*[@id="profile"]')
if not xml_user:
return None
else:
xml_user = xml_user[0]
avatar = first(xml_user.xpath('.//img/@src'))
city = first(xml_user.xpath('.//div[@class="user-info"]/a/text()'))
city_url = first(xml_user.xpath('.//div[@class="user-info"]/a/@href'))
text_created_at = xml_user.xpath('.//div[@class="pl"]/text()')[1]
created_at = re.match(r'.+(?=加入)', text_created_at.strip()).group()
xml_intro = first(xml.xpath('//*[@id="intro_display"]'))
intro = xml_intro.xpath('string(.)') if xml_intro is not None else None
nickname = first(xml.xpath('//*[@id="db-usr-profile"]//h1/text()'), '').strip() or None
signature = first(xml.xpath('//*[@id="display"]/text()'))
xml_contact_count = xml.xpath('//*[@id="friend"]/h2')[0]
contact_count = int(re.search(r'成员(\d+)', xml_contact_count.xpath('string(.)')).groups()[0])
text_rev_contact_count = xml.xpath('//p[@class="rev-link"]/a/text()')[0]
rev_contact_count = int(re.search(r'(\d+)人关注', text_rev_contact_count.strip()).groups()[0])
return {
'alias': user_alias,
'url': API_PEOPLE_HOME % user_alias,
'avatar': avatar,
'city': city,
'city_url': city_url,
'created_at': created_at,
'intro': intro,
'nickname': nickname,
'signature': signature,
'contact_count': contact_count,
'rev_contact_count': rev_contact_count,
}
except Exception as e:
self.api.logger.exception('parse people meta error: %s' % e)
|
获取用户信息
:param user_alias: 用户ID
:return:
|
entailment
|
def email_login(request, *, email, **kwargs):
"""
Given a request, an email and optionally some additional data, ensure that
a user with the email address exists, and authenticate & login them right
away if the user is active.
Returns a tuple consisting of ``(user, created)`` upon success or ``(None,
None)`` when authentication fails.
"""
_u, created = auth.get_user_model()._default_manager.get_or_create(email=email)
user = auth.authenticate(request, email=email)
if user and user.is_active: # The is_active check is possibly redundant.
auth.login(request, user)
return user, created
return None, None
|
Given a request, an email and optionally some additional data, ensure that
a user with the email address exists, and authenticate & login them right
away if the user is active.
Returns a tuple consisting of ``(user, created)`` upon success or ``(None,
None)`` when authentication fails.
|
entailment
|
def dashboard(request):
"""Shows the latest results for each source"""
sources = (models.Source.objects.all().prefetch_related('metric_set')
.order_by('name'))
metrics = SortedDict([(src, src.metric_set.all()) for src in sources])
no_source_metrics = models.Metric.objects.filter(source__isnull=True)
if no_source_metrics:
metrics[''] = no_source_metrics
if request.META.get('HTTP_X_PJAX', False):
parent_template = 'pjax.html'
else:
parent_template = 'base.html'
return render(request, 'metrics/dashboard.html', {
'source_metrics': metrics,
'parent_template': parent_template
})
|
Shows the latest results for each source
|
entailment
|
def _create(self):
"""Create the Whisper file on disk"""
if not os.path.exists(settings.SALMON_WHISPER_DB_PATH):
os.makedirs(settings.SALMON_WHISPER_DB_PATH)
archives = [whisper.parseRetentionDef(retentionDef)
for retentionDef in settings.ARCHIVES.split(",")]
whisper.create(self.path, archives,
xFilesFactor=settings.XFILEFACTOR,
aggregationMethod=settings.AGGREGATION_METHOD)
|
Create the Whisper file on disk
|
entailment
|
def _update(self, datapoints):
"""
This method store in the datapoints in the current database.
:datapoints: is a list of tupple with the epoch timestamp and value
[(1368977629,10)]
"""
if len(datapoints) == 1:
timestamp, value = datapoints[0]
whisper.update(self.path, value, timestamp)
else:
whisper.update_many(self.path, datapoints)
|
This method store in the datapoints in the current database.
:datapoints: is a list of tupple with the epoch timestamp and value
[(1368977629,10)]
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.