sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def create(self, name, plugin_name, hadoop_version, description=None,
cluster_configs=None, node_groups=None, anti_affinity=None,
net_id=None, default_image_id=None, use_autoconfig=None,
shares=None, is_public=None, is_protected=None,
domain_name=None):
"""Create a Cluster Template."""
data = {
'name': name,
'plugin_name': plugin_name,
'hadoop_version': hadoop_version,
}
return self._do_create(data, description, cluster_configs,
node_groups, anti_affinity, net_id,
default_image_id, use_autoconfig, shares,
is_public, is_protected, domain_name) | Create a Cluster Template. | entailment |
def update(self, cluster_template_id, name=NotUpdated,
plugin_name=NotUpdated, plugin_version=NotUpdated,
description=NotUpdated, cluster_configs=NotUpdated,
node_groups=NotUpdated, anti_affinity=NotUpdated,
net_id=NotUpdated, default_image_id=NotUpdated,
use_autoconfig=NotUpdated, shares=NotUpdated,
is_public=NotUpdated, is_protected=NotUpdated,
domain_name=NotUpdated):
"""Update a Cluster Template."""
data = {}
self._copy_if_updated(data, name=name,
plugin_name=plugin_name,
plugin_version=plugin_version,
description=description,
cluster_configs=cluster_configs,
node_groups=node_groups,
anti_affinity=anti_affinity,
neutron_management_network=net_id,
default_image_id=default_image_id,
use_autoconfig=use_autoconfig,
shares=shares,
is_public=is_public,
is_protected=is_protected,
domain_name=domain_name)
return self._patch('/cluster-templates/%s' % cluster_template_id,
data, 'cluster_template') | Update a Cluster Template. | entailment |
def get(self, cluster_id, show_progress=False):
"""Get information about a Cluster."""
url = ('/clusters/%(cluster_id)s?%(params)s' %
{"cluster_id": cluster_id,
"params": parse.urlencode({"show_progress": show_progress})})
return self._get(url, 'cluster') | Get information about a Cluster. | entailment |
def update(self, cluster_id, name=NotUpdated, description=NotUpdated,
is_public=NotUpdated, is_protected=NotUpdated,
shares=NotUpdated):
"""Update a Cluster."""
data = {}
self._copy_if_updated(data, name=name, description=description,
is_public=is_public, is_protected=is_protected,
shares=shares)
return self._patch('/clusters/%s' % cluster_id, data) | Update a Cluster. | entailment |
def verification_update(self, cluster_id, status):
"""Start a verification for a Cluster."""
data = {'verification': {'status': status}}
return self._patch("/clusters/%s" % cluster_id, data) | Start a verification for a Cluster. | entailment |
def create(self, name, plugin_name, plugin_version,
cluster_template_id=None, default_image_id=None,
is_transient=None, description=None, cluster_configs=None,
node_groups=None, user_keypair_id=None,
anti_affinity=None, net_id=None, count=None,
use_autoconfig=None, shares=None,
is_public=None, is_protected=None):
"""Launch a Cluster."""
data = {
'name': name,
'plugin_name': plugin_name,
'plugin_version': plugin_version,
}
return self._do_create(data, cluster_template_id, default_image_id,
is_transient, description, cluster_configs,
node_groups, user_keypair_id, anti_affinity,
net_id, count, use_autoconfig, shares,
is_public, is_protected, api_ver=2) | Launch a Cluster. | entailment |
def _get_command(classes):
"""Associates each command class with command depending on setup.cfg
"""
commands = {}
setup_file = os.path.join(
os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')),
'setup.cfg')
for line in open(setup_file, 'r'):
for cl in classes:
if cl in line:
commands[cl] = line.split(' = ')[0].strip().replace('_', ' ')
return commands | Associates each command class with command depending on setup.cfg | entailment |
def get_json(response):
"""Provide backward compatibility with old versions of requests library."""
json_field_or_function = getattr(response, 'json', None)
if callable(json_field_or_function):
return response.json()
else:
return jsonutils.loads(response.content) | Provide backward compatibility with old versions of requests library. | entailment |
def create(self, name, data):
"""Create a Job Binary Internal.
:param str data: raw data of script text
"""
return self._update('/job-binary-internals/%s' %
urlparse.quote(name.encode('utf-8')), data,
'job_binary_internal', dump_json=False) | Create a Job Binary Internal.
:param str data: raw data of script text | entailment |
def update(self, job_binary_id, name=NotUpdated, is_public=NotUpdated,
is_protected=NotUpdated):
"""Update a Job Binary Internal."""
data = {}
self._copy_if_updated(data, name=name, is_public=is_public,
is_protected=is_protected)
return self._patch('/job-binary-internals/%s' % job_binary_id, data) | Update a Job Binary Internal. | entailment |
def autoconvert(string):
"""Try to convert variables into datatypes."""
for fn in (boolify, int, float):
try:
return fn(string)
except ValueError:
pass
return string | Try to convert variables into datatypes. | entailment |
def call_plugin(plugin, f, *args, **kwargs):
"""Calls function f from plugin, returns None if plugin does not implement f."""
try:
getattr(plugin, f)
except AttributeError:
return None
if kwargs:
getattr(plugin, f)(
*args,
**kwargs
)
else:
return getattr(plugin, f)(
*args
) | Calls function f from plugin, returns None if plugin does not implement f. | entailment |
def create(self, job_id, cluster_id, input_id=None,
output_id=None, configs=None, interface=None, is_public=None,
is_protected=None):
"""Launch a Job."""
url = "/jobs/%s/execute" % job_id
data = {
"cluster_id": cluster_id,
}
self._copy_if_defined(data, input_id=input_id, output_id=output_id,
job_configs=configs, interface=interface,
is_public=is_public, is_protected=is_protected)
return self._create(url, data, 'job_execution') | Launch a Job. | entailment |
def update(self, obj_id, is_public=NotUpdated, is_protected=NotUpdated):
"""Update a Job Execution."""
data = {}
self._copy_if_updated(data, is_public=is_public,
is_protected=is_protected)
return self._patch('/job-executions/%s' % obj_id, data) | Update a Job Execution. | entailment |
def visitTerminal(self, ctx):
"""Converts case insensitive keywords and identifiers to lowercase
Identifiers in quotes are not lowercased even though there is case sensitivity in quotes for identifiers,
to prevent lowercasing quoted values.
"""
text = str(super().visitTerminal(ctx))
quotes = ["'", '"']
if not (text[0] in quotes and text[-1] in quotes):
text = text.lower()
return Terminal.from_text(text, ctx) | Converts case insensitive keywords and identifiers to lowercase
Identifiers in quotes are not lowercased even though there is case sensitivity in quotes for identifiers,
to prevent lowercasing quoted values. | entailment |
def list(self, search_opts=None):
"""Get a list of Plugins."""
query = base.get_query_string(search_opts)
return self._list('/plugins%s' % query, 'plugins') | Get a list of Plugins. | entailment |
def convert_to_cluster_template(self, plugin_name, hadoop_version,
template_name, filecontent):
"""Convert to cluster template
Create Cluster Template directly, avoiding Cluster Template
mechanism.
"""
resp = self.api.post('/plugins/%s/%s/convert-config/%s' %
(plugin_name,
hadoop_version,
urlparse.quote(template_name)),
data=filecontent)
if resp.status_code != 202:
raise RuntimeError('Failed to upload template file for plugin "%s"'
' and version "%s"' %
(plugin_name, hadoop_version))
else:
return base.get_json(resp)['cluster_template'] | Convert to cluster template
Create Cluster Template directly, avoiding Cluster Template
mechanism. | entailment |
def create(self, name, type, mains=None, libs=None, description=None,
interface=None, is_public=None, is_protected=None):
"""Create a Job."""
data = {
'name': name,
'type': type
}
self._copy_if_defined(data, description=description, mains=mains,
libs=libs, interface=interface,
is_public=is_public, is_protected=is_protected)
return self._create('/jobs', data, 'job') | Create a Job. | entailment |
def list(self, search_opts=None, limit=None,
marker=None, sort_by=None, reverse=None):
"""Get a list of Jobs."""
query = base.get_query_string(search_opts, limit=limit, marker=marker,
sort_by=sort_by, reverse=reverse)
url = "/jobs%s" % query
return self._page(url, 'jobs', limit) | Get a list of Jobs. | entailment |
def update(self, job_id, name=NotUpdated, description=NotUpdated,
is_public=NotUpdated, is_protected=NotUpdated):
"""Update a Job."""
data = {}
self._copy_if_updated(data, name=name, description=description,
is_public=is_public, is_protected=is_protected)
return self._patch('/jobs/%s' % job_id, data) | Update a Job. | entailment |
def _query_for_reverse_geocoding(lat, lng):
"""
Given a lat & lng, what's the string search query.
If the API changes, change this function. Only for internal use.
"""
# have to do some stupid f/Decimal/str stuff to (a) ensure we get as much
# decimal places as the user already specified and (b) to ensure we don't
# get e-5 stuff
return "{0:f},{1:f}".format(Decimal(str(lat)), Decimal(str(lng))) | Given a lat & lng, what's the string search query.
If the API changes, change this function. Only for internal use. | entailment |
def floatify_latlng(input_value):
"""
Work around a JSON dict with string, not float, lat/lngs.
Given anything (list/dict/etc) it will return that thing again, *but* any
dict (at any level) that has only 2 elements lat & lng, will be replaced
with the lat & lng turned into floats.
If the API returns the lat/lng as strings, and not numbers, then this
function will 'clean them up' to be floats.
"""
if isinstance(input_value, collections.Mapping):
if len(input_value) == 2 and sorted(input_value.keys()) == ['lat', 'lng']:
# This dict has only 2 keys 'lat' & 'lon'
return {'lat': float_if_float(input_value["lat"]), 'lng': float_if_float(input_value["lng"])}
else:
return dict((key, floatify_latlng(value)) for key, value in input_value.items())
elif isinstance(input_value, collections.MutableSequence):
return [floatify_latlng(x) for x in input_value]
else:
return input_value | Work around a JSON dict with string, not float, lat/lngs.
Given anything (list/dict/etc) it will return that thing again, *but* any
dict (at any level) that has only 2 elements lat & lng, will be replaced
with the lat & lng turned into floats.
If the API returns the lat/lng as strings, and not numbers, then this
function will 'clean them up' to be floats. | entailment |
def geocode(self, query, **kwargs):
"""
Given a string to search for, return the results from OpenCage's Geocoder.
:param string query: String to search for
:returns: Dict results
:raises InvalidInputError: if the query string is not a unicode string
:raises RateLimitExceededError: if you have exceeded the number of queries you can make. Exception says when you can try again
:raises UnknownError: if something goes wrong with the OpenCage API
"""
if six.PY2:
# py3 doesn't have unicode() function, and instead we check the text_type later
try:
query = unicode(query)
except UnicodeDecodeError:
raise InvalidInputError(bad_value=query)
if not isinstance(query, six.text_type):
raise InvalidInputError(bad_value=query)
data = {
'q': query,
'key': self.key
}
# Add user parameters
data.update(kwargs)
url = self.url
response = requests.get(url, params=data)
if (response.status_code == 402 or response.status_code == 429):
# Rate limit exceeded
reset_time = datetime.utcfromtimestamp(response.json()['rate']['reset'])
raise RateLimitExceededError(reset_to=int(response.json()['rate']['limit']), reset_time=reset_time)
elif response.status_code == 500:
raise UnknownError("500 status code from API")
try:
response_json = response.json()
except ValueError:
raise UnknownError("Non-JSON result from server")
if 'results' not in response_json:
raise UnknownError("JSON from API doesn't have a 'results' key")
return floatify_latlng(response_json['results']) | Given a string to search for, return the results from OpenCage's Geocoder.
:param string query: String to search for
:returns: Dict results
:raises InvalidInputError: if the query string is not a unicode string
:raises RateLimitExceededError: if you have exceeded the number of queries you can make. Exception says when you can try again
:raises UnknownError: if something goes wrong with the OpenCage API | entailment |
def reverse_geocode(self, lat, lng, **kwargs):
"""
Given a latitude & longitude, return an address for that point from OpenCage's Geocoder.
:param lat: Latitude
:param lng: Longitude
:return: Results from OpenCageData
:rtype: dict
:raises RateLimitExceededError: if you have exceeded the number of queries you can make. Exception says when you can try again
:raises UnknownError: if something goes wrong with the OpenCage API
"""
return self.geocode(_query_for_reverse_geocoding(lat, lng), **kwargs) | Given a latitude & longitude, return an address for that point from OpenCage's Geocoder.
:param lat: Latitude
:param lng: Longitude
:return: Results from OpenCageData
:rtype: dict
:raises RateLimitExceededError: if you have exceeded the number of queries you can make. Exception says when you can try again
:raises UnknownError: if something goes wrong with the OpenCage API | entailment |
def acquire(self):
'''
Get a new connection from the pool.
This will return an existing connection, if one is available in the
pool, or create a new connection.
.. warning:: If the pool was created with `maxsize` and `block=True`,
this method may block until a connection is available in the pool.
'''
self._condition.acquire()
try:
# Wait for a connection if there is an upper bound to the pool.
if self._maxsize is not None and self._block:
while not self._pool and self._nconnections == self._maxsize:
self._condition.wait(timeout=None) # block indefinitely
# Check the pool for a non-stale connection.
while self._pool:
pooledconn = self._pool.pop(0) # get least recently used connection
if self._idlettl is not None and (pooledconn.released + self._idlettl) < time.time():
pooledconn.connection.close()
self._nconnections -= 1
else:
return pooledconn.connection
connection = self._dbapi2.connect(*(), **self._connection_args.copy())
self._nconnections += 1
return connection
finally:
self._condition.release() | Get a new connection from the pool.
This will return an existing connection, if one is available in the
pool, or create a new connection.
.. warning:: If the pool was created with `maxsize` and `block=True`,
this method may block until a connection is available in the pool. | entailment |
def release(self, connection):
'''
Return a connection back to the pool.
Prior to release, :py:meth:`ctds.Connection.rollback()` is called to
rollback any pending transaction.
.. note:: This must be called once for every successful call to
:py:meth:`.acquire()`.
:param connection: The connection object returned by
:py:meth:`.acquire()`.
'''
try:
# Rollback the existing connection, closing on failure.
connection.rollback()
except self._dbapi2.Error:
self._close(connection)
return
self._condition.acquire()
try:
if self._maxsize is None or self._maxsize > len(self._pool):
self._pool.append(PooledConnection(connection, time.time()))
self._condition.notify()
else:
self._close(connection)
finally:
self._condition.release() | Return a connection back to the pool.
Prior to release, :py:meth:`ctds.Connection.rollback()` is called to
rollback any pending transaction.
.. note:: This must be called once for every successful call to
:py:meth:`.acquire()`.
:param connection: The connection object returned by
:py:meth:`.acquire()`. | entailment |
def finalize(self):
'''
Release all connections contained in the pool.
.. note:: This should be called to cleanly shutdown the pool, i.e.
on process exit.
'''
self._condition.acquire()
try:
if self._nconnections != len(self._pool):
warnings.warn('finalize() called with unreleased connections', RuntimeWarning, 2)
while self._pool:
self._close(self._pool.pop().connection)
self._nconnections = 0
finally:
self._condition.release() | Release all connections contained in the pool.
.. note:: This should be called to cleanly shutdown the pool, i.e.
on process exit. | entailment |
def _dict_merge(dct, merge_dct):
"""Recursive dict merge.
Inspired by :meth:``dict.update()``, instead of updating only top-level
keys, dict_merge recurses down into dicts nested to an arbitrary depth,
updating keys. The ``merge_dct`` is merged into ``dct``.
From https://gist.github.com/angstwad/bf22d1822c38a92ec0a9
Arguments:
dct: dict onto which the merge is executed
merge_dct: dct merged into dct
"""
for k, v in merge_dct.items():
if (k in dct and isinstance(dct[k], dict)
and isinstance(merge_dct[k], collections.Mapping)):
_dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k] | Recursive dict merge.
Inspired by :meth:``dict.update()``, instead of updating only top-level
keys, dict_merge recurses down into dicts nested to an arbitrary depth,
updating keys. The ``merge_dct`` is merged into ``dct``.
From https://gist.github.com/angstwad/bf22d1822c38a92ec0a9
Arguments:
dct: dict onto which the merge is executed
merge_dct: dct merged into dct | entailment |
def _parse_schema(schema, method):
"""
Convert a Schema Object to a Python object.
Args:
schema: An ``OrderedDict`` representing the schema object.
"""
if method and schema.get('readOnly', False):
return _READONLY_PROPERTY
# allOf: Must be valid against all of the subschemas
if 'allOf' in schema:
schema_ = copy.deepcopy(schema['allOf'][0])
for x in schema['allOf'][1:]:
_dict_merge(schema_, x)
return _parse_schema(schema_, method)
# anyOf: Must be valid against any of the subschemas
# TODO(stephenfin): Handle anyOf
# oneOf: Must be valid against exactly one of the subschemas
if 'oneOf' in schema:
# we only show the first one since we can't show everything
return _parse_schema(schema['oneOf'][0], method)
if 'enum' in schema:
# we only show the first one since we can't show everything
return schema['enum'][0]
schema_type = schema.get('type', 'object')
if schema_type == 'array':
# special case oneOf so that we can show examples for all possible
# combinations
if 'oneOf' in schema['items']:
return [
_parse_schema(x, method) for x in schema['items']['oneOf']]
return [_parse_schema(schema['items'], method)]
if schema_type == 'object':
if method and all(v.get('readOnly', False)
for v in schema['properties'].values()):
return _READONLY_PROPERTY
results = []
for name, prop in schema.get('properties', {}).items():
result = _parse_schema(prop, method)
if result != _READONLY_PROPERTY:
results.append((name, result))
return collections.OrderedDict(results)
if (schema_type, schema.get('format')) in _TYPE_MAPPING:
return _TYPE_MAPPING[(schema_type, schema.get('format'))]
return _TYPE_MAPPING[(schema_type, None)] | Convert a Schema Object to a Python object.
Args:
schema: An ``OrderedDict`` representing the schema object. | entailment |
def _example(media_type_objects, method=None, endpoint=None, status=None,
nb_indent=0):
"""
Format examples in `Media Type Object` openapi v3 to HTTP request or
HTTP response example.
If method and endpoint is provided, this fonction prints a request example
else status should be provided to print a response example.
Arguments:
media_type_objects (Dict[str, Dict]): Dict containing
Media Type Objects.
method: The HTTP method to use in example.
endpoint: The HTTP route to use in example.
status: The HTTP status to use in example.
"""
indent = ' '
extra_indent = indent * nb_indent
if method is not None:
method = method.upper()
else:
try:
# one of possible values for status might be 'default'.
# in the case, just fallback to '-'
status_text = http_status_codes[int(status)]
except (ValueError, KeyError):
status_text = '-'
for content_type, content in media_type_objects.items():
examples = content.get('examples')
example = content.get('example')
if examples is None:
examples = {}
if not example:
if content_type != 'application/json':
LOG.info('skipping non-JSON example generation.')
continue
example = _parse_schema(content['schema'], method=method)
if method is None:
examples['Example response'] = {
'value': example,
}
else:
examples['Example request'] = {
'value': example,
}
for example in examples.values():
if not isinstance(example['value'], six.string_types):
example['value'] = json.dumps(
example['value'], indent=4, separators=(',', ': '))
for example_name, example in examples.items():
if 'summary' in example:
example_title = '{example_name} - {example[summary]}'.format(
**locals())
else:
example_title = example_name
yield ''
yield '{extra_indent}**{example_title}:**'.format(**locals())
yield ''
yield '{extra_indent}.. sourcecode:: http'.format(**locals())
yield ''
# Print http request example
if method:
yield '{extra_indent}{indent}{method} {endpoint} HTTP/1.1' \
.format(**locals())
yield '{extra_indent}{indent}Host: example.com' \
.format(**locals())
yield '{extra_indent}{indent}Content-Type: {content_type}' \
.format(**locals())
# Print http response example
else:
yield '{extra_indent}{indent}HTTP/1.1 {status} {status_text}' \
.format(**locals())
yield '{extra_indent}{indent}Content-Type: {content_type}' \
.format(**locals())
yield ''
for example_line in example['value'].splitlines():
yield '{extra_indent}{indent}{example_line}'.format(**locals())
yield '' | Format examples in `Media Type Object` openapi v3 to HTTP request or
HTTP response example.
If method and endpoint is provided, this fonction prints a request example
else status should be provided to print a response example.
Arguments:
media_type_objects (Dict[str, Dict]): Dict containing
Media Type Objects.
method: The HTTP method to use in example.
endpoint: The HTTP route to use in example.
status: The HTTP status to use in example. | entailment |
def _resolve_refs(uri, spec):
"""Resolve JSON references in a given dictionary.
OpenAPI spec may contain JSON references to its nodes or external
sources, so any attempt to rely that there's some expected attribute
in the spec may fail. So we need to resolve JSON references before
we use it (i.e. replace with referenced object). For details see:
https://tools.ietf.org/html/draft-pbryan-zyp-json-ref-02
The input spec is modified in-place despite being returned from
the function.
"""
resolver = jsonschema.RefResolver(uri, spec)
def _do_resolve(node):
if isinstance(node, collections.Mapping) and '$ref' in node:
with resolver.resolving(node['$ref']) as resolved:
return resolved
elif isinstance(node, collections.Mapping):
for k, v in node.items():
node[k] = _do_resolve(v)
elif isinstance(node, (list, tuple)):
for i in range(len(node)):
node[i] = _do_resolve(node[i])
return node
return _do_resolve(spec) | Resolve JSON references in a given dictionary.
OpenAPI spec may contain JSON references to its nodes or external
sources, so any attempt to rely that there's some expected attribute
in the spec may fail. So we need to resolve JSON references before
we use it (i.e. replace with referenced object). For details see:
https://tools.ietf.org/html/draft-pbryan-zyp-json-ref-02
The input spec is modified in-place despite being returned from
the function. | entailment |
def convert_double_to_two_registers(doubleValue):
"""
Convert 32 Bit Value to two 16 Bit Value to send as Modbus Registers
doubleValue: Value to be converted
return: 16 Bit Register values int[]
"""
myList = list()
myList.append(int(doubleValue & 0x0000FFFF)) #Append Least Significant Word
myList.append(int((doubleValue & 0xFFFF0000)>>16)) #Append Most Significant Word
return myList | Convert 32 Bit Value to two 16 Bit Value to send as Modbus Registers
doubleValue: Value to be converted
return: 16 Bit Register values int[] | entailment |
def convert_float_to_two_registers(floatValue):
"""
Convert 32 Bit real Value to two 16 Bit Value to send as Modbus Registers
floatValue: Value to be converted
return: 16 Bit Register values int[]
"""
myList = list()
s = bytearray(struct.pack('<f', floatValue) ) #little endian
myList.append(s[0] | (s[1]<<8)) #Append Least Significant Word
myList.append(s[2] | (s[3]<<8)) #Append Most Significant Word
return myList | Convert 32 Bit real Value to two 16 Bit Value to send as Modbus Registers
floatValue: Value to be converted
return: 16 Bit Register values int[] | entailment |
def convert_registers_to_float(registers):
"""
Convert two 16 Bit Registers to 32 Bit real value - Used to receive float values from Modbus (Modbus Registers are 16 Bit long)
registers: 16 Bit Registers
return: 32 bit value real
"""
b = bytearray(4)
b [0] = registers[0] & 0xff
b [1] = (registers[0] & 0xff00)>>8
b [2] = (registers[1] & 0xff)
b [3] = (registers[1] & 0xff00)>>8
returnValue = struct.unpack('<f', b) #little Endian
return returnValue | Convert two 16 Bit Registers to 32 Bit real value - Used to receive float values from Modbus (Modbus Registers are 16 Bit long)
registers: 16 Bit Registers
return: 32 bit value real | entailment |
def connect(self):
"""
Connects to a Modbus-TCP Server or a Modbus-RTU Slave with the given Parameters
"""
if (self.__ser is not None):
serial = importlib.import_module("serial")
if self.__stopbits == 0:
self.__ser.stopbits = serial.STOPBITS_ONE
elif self.__stopbits == 1:
self.__ser.stopbits = serial.STOPBITS_TWO
elif self.__stopbits == 2:
self.__ser.stopbits = serial.STOPBITS_ONE_POINT_FIVE
if self.__parity == 0:
self.__ser.parity = serial.PARITY_EVEN
elif self.__parity == 1:
self.__ser.parity = serial.PARITY_ODD
elif self.__parity == 2:
self.__ser.parity = serial.PARITY_NONE
self.__ser = serial.Serial(self.serialPort, self.__baudrate, timeout=self.__timeout, parity=self.__ser.parity, stopbits=self.__ser.stopbits, xonxoff=0, rtscts=0)
self.__ser.writeTimeout = self.__timeout
#print (self.ser)
if (self.__tcpClientSocket is not None):
self.__tcpClientSocket.settimeout(5)
self.__tcpClientSocket.connect((self.__ipAddress, self.__port))
self.__connected = True
self.__thread = threading.Thread(target=self.__listen, args=())
self.__thread.start() | Connects to a Modbus-TCP Server or a Modbus-RTU Slave with the given Parameters | entailment |
def close(self):
"""
Closes Serial port, or TCP-Socket connection
"""
if (self.__ser is not None):
self.__ser.close()
if (self.__tcpClientSocket is not None):
self.__stoplistening = True
self.__tcpClientSocket.shutdown(socket.SHUT_RDWR)
self.__tcpClientSocket.close()
self.__connected = False | Closes Serial port, or TCP-Socket connection | entailment |
def read_discreteinputs(self, starting_address, quantity):
"""
Read Discrete Inputs from Master device (Function code 2)
starting_address: First discrete input to be read
quantity: Numer of discrete Inputs to be read
returns: Boolean Array [0..quantity-1] which contains the discrete Inputs
"""
self.__transactionIdentifier+=1
if (self.__ser is not None):
if (self.__ser.closed):
raise Exception.SerialPortNotOpenedException("serial port not opened")
if ((starting_address > 65535) | (quantity >2000)):
raise ValueError("Starting address must be 0 - 65535; quantity must be 0 - 2000");
function_code = 2
length = 6;
transaction_identifier_lsb = self.__transactionIdentifier & 0xFF
transaction_identifier_msb = ((self.__transactionIdentifier & 0xFF00) >> 8)
length_lsb = length&0xFF
length_msb = (length&0xFF00) >> 8
starting_address_lsb = starting_address & 0xFF
starting_address_msb = (starting_address & 0xFF00) >> 8
quantity_lsb = quantity & 0xFF
quantity_msb = (quantity & 0xFF00) >> 8
if (self.__ser is not None):
data = bytearray([self.__unitIdentifier, function_code, starting_address_msb, starting_address_lsb, quantity_msb, quantity_lsb, 0, 0])
crc = self.__calculateCRC(data, len(data)-2, 0)
crcLSB = crc&0xFF
crcMSB = (crc&0xFF00) >> 8
data[6] = crcLSB
data[7] = crcMSB
self.__ser.write(data)
if (quantity % 8 != 0):
bytes_to_read = 6+int(quantity/8)
else:
bytes_to_read = 5+int(quantity/8)
data = self.__ser.read(bytes_to_read)
b=bytearray(data)
data = b
if (len(data) < bytes_to_read):
raise Exceptions.TimeoutError('Read timeout Exception')
if ((data[1] == 0x82) & (data[2] == 0x01)):
raise Exceptions.function_codeNotSupportedException("Function code not supported by master");
if ((data[1] == 0x82) & (data[2] == 0x02)):
raise Exceptions.starting_addressInvalidException("Starting address invalid or starting address + quantity invalid");
if ((data[1] == 0x82) & (data[2] == 0x03)):
raise Exceptions.QuantityInvalidException("quantity invalid");
if ((data[1] == 0x82) & (data[2] == 0x04)):
raise Exceptions.ModbusException("error reading");
crc = self.__calculateCRC(data, len(data) - 2, 0)
crcLSB = crc&0xFF
crcMSB = (crc&0xFF00) >> 8
if ((crcLSB != data[len(data)-2]) & (crcMSB != data[len(data)-1])):
raise Exceptions.CRCCheckFailedException("CRC check failed");
myList = list()
for i in range(0, quantity):
myList.append(bool((data[int(i/8)+3] >> int(i%8)) & 0x1))
return myList
else:
protocolIdentifierLSB = 0x00;
protocolIdentifierMSB = 0x00;
length_lsb = 0x06;
length_msb = 0x00;
data = bytearray([transaction_identifier_msb, transaction_identifier_lsb, protocolIdentifierMSB, protocolIdentifierLSB, length_msb, length_lsb, self.__unitIdentifier, function_code, starting_address_msb, starting_address_lsb, quantity_msb, quantity_lsb])
self.__tcpClientSocket.send(data)
self.__receivedata = bytearray()
if (quantity % 8 != 0):
bytes_to_read = 9+int(quantity/8)
else:
bytes_to_read = 8+int(quantity/8)
try:
while (len(self.__receivedata) == 0):
pass
except Exception:
raise Exception('Read Timeout')
data = bytearray(self.__receivedata)
if ((data[1 + 6] == 0x82) & (data[2 + 6] == 0x01)):
raise Exceptions.function_codeNotSupportedException("Function code not supported by master");
if ((data[1 + 6] == 0x82) & (data[2+6] == 0x02)):
raise Exceptions.starting_addressInvalidException("Starting address invalid or starting address + quantity invalid");
if ((data[1 + 6] == 0x82) & (data[2+6] == 0x03)):
raise Exceptions.QuantityInvalidException("quantity invalid");
if ((data[1 + 6] == 0x82) & (data[2+6] == 0x04)):
raise Exceptions.ModbusException("error reading");
myList = list()
for i in range(0, quantity):
myList.append(bool((data[int(i/8)+3+6] >> int(i%8)) & 0x1))
return myList | Read Discrete Inputs from Master device (Function code 2)
starting_address: First discrete input to be read
quantity: Numer of discrete Inputs to be read
returns: Boolean Array [0..quantity-1] which contains the discrete Inputs | entailment |
def write_single_coil(self, starting_address, value):
"""
Write single Coil to Master device (Function code 5)
starting_address: Coil to be written
value: Coil Value to be written
"""
self.__transactionIdentifier+=1
if (self.__ser is not None):
if (self.__ser.closed):
raise Exception.SerialPortNotOpenedException("serial port not opened")
function_code = 5
length = 6;
transaction_identifier_lsb = self.__transactionIdentifier&0xFF
transaction_identifier_msb = ((self.__transactionIdentifier&0xFF00) >> 8)
length_lsb = length&0xFF
length_msb = (length&0xFF00) >> 8
starting_address_lsb = starting_address&0xFF
starting_address_msb = (starting_address&0xFF00) >> 8
if value:
valueLSB = 0x00
valueMSB = (0xFF00) >> 8
else:
valueLSB = 0x00
valueMSB = (0x00) >> 8
if (self.__ser is not None):
data = bytearray([self.__unitIdentifier, function_code, starting_address_msb, starting_address_lsb, valueMSB, valueLSB, 0, 0])
crc = self.__calculateCRC(data, len(data)-2, 0)
crcLSB = crc&0xFF
crcMSB = (crc&0xFF00) >> 8
data[6] = crcLSB
data[7] = crcMSB
self.__ser.write(data)
bytes_to_read = 8
data = self.__ser.read(bytes_to_read)
b=bytearray(data)
data = b
if (len(data) < bytes_to_read):
raise Exceptions.TimeoutError('Read timeout Exception')
if ((data[1] == 0x85) & (data[2] == 0x01)):
raise Exceptions.function_codeNotSupportedException("Function code not supported by master");
if ((data[1] == 0x85) & (data[2] == 0x02)):
raise Exceptions.starting_addressInvalidException("Address invalid");
if ((data[1] == 0x85) & (data[2] == 0x03)):
raise Exceptions.QuantityInvalidException("Value invalid");
if ((data[1] == 0x85) & (data[2] == 0x04)):
raise Exceptions.ModbusException("error reading");
crc = self.__calculateCRC(data, len(data) - 2, 0)
crcLSB = crc&0xFF
crcMSB = (crc&0xFF00) >> 8
if ((crcLSB != data[len(data)-2]) & (crcMSB != data[len(data)-1])):
raise Exceptions.CRCCheckFailedException("CRC check failed");
if data[1] == self.__unitIdentifier:
return True
else:
return False
else:
protocolIdentifierLSB = 0x00;
protocolIdentifierMSB = 0x00;
length_lsb = 0x06;
length_msb = 0x00;
data = bytearray([transaction_identifier_msb, transaction_identifier_lsb, protocolIdentifierMSB, protocolIdentifierLSB, length_msb, length_lsb, self.__unitIdentifier, function_code, starting_address_msb, starting_address_lsb, valueMSB, valueLSB])
self.__tcpClientSocket.send(data)
bytes_to_read = 12
self.__receivedata = bytearray()
try:
while (len(self.__receivedata) == 0):
pass
except Exception:
raise Exception('Read Timeout')
data = bytearray(self.__receivedata)
if ((data[1+6] == 0x85) & (data[2+6] == 0x01)):
raise Exceptions.function_codeNotSupportedException("Function code not supported by master");
if ((data[1+6] == 0x85) & (data[2+6] == 0x02)):
raise Exceptions.starting_addressInvalidException("Address invalid");
if ((data[1+6] == 0x85) & (data[2+6] == 0x03)):
raise Exceptions.QuantityInvalidException("Value invalid");
if ((data[1+6] == 0x85) & (data[2+6] == 0x04)):
raise Exceptions.ModbusException("error reading");
return True | Write single Coil to Master device (Function code 5)
starting_address: Coil to be written
value: Coil Value to be written | entailment |
def write_multiple_coils(self, starting_address, values):
"""
Write multiple coils to Master device (Function code 15)
starting_address : First coil to be written
values: Coil Values [0..quantity-1] to be written
"""
self.__transactionIdentifier+=1
if (self.__ser is not None):
if (self.__ser.closed):
raise Exception.SerialPortNotOpenedException("serial port not opened");
function_code = 15
length = 6;
transaction_identifier_lsb = self.__transactionIdentifier&0xFF
transaction_identifier_msb = ((self.__transactionIdentifier&0xFF00) >> 8)
length_lsb = length&0xFF
length_msb = (length&0xFF00) >> 8
starting_address_lsb = starting_address&0xFF
starting_address_msb = (starting_address&0xFF00) >> 8
quantityLSB = len(values)&0xFF
quantityMSB = (len(values)&0xFF00) >> 8
valueToWrite = list()
singleCoilValue = 0;
for i in range(0, len(values)):
if ((i % 8) == 0):
if i > 0:
valueToWrite.append(singleCoilValue)
singleCoilValue = 0;
if (values[i] == True):
coilValue = 1
else:
coilValue = 0
singleCoilValue = ((coilValue)<<(i%8) | (singleCoilValue));
valueToWrite.append(singleCoilValue)
if (self.__ser is not None):
data = bytearray([self.__unitIdentifier, function_code, starting_address_msb, starting_address_lsb, quantityMSB, quantityLSB])
data.append(len(valueToWrite)) #Bytecount
for i in range (0, len(valueToWrite)):
data.append(valueToWrite[i]&0xFF)
crc = self.__calculateCRC(data, len(data), 0)
crcLSB = crc&0xFF
crcMSB = (crc&0xFF00) >> 8
data.append(crcLSB)
data.append(crcMSB)
self.__ser.write(data)
bytes_to_read = 8
data = self.__ser.read(bytes_to_read)
b=bytearray(data)
data = b
if (len(data) < bytes_to_read):
raise Exceptions.TimeoutError('Read timeout Exception')
if ((data[1] == 0x8F) & (data[2] == 0x01)):
raise Exceptions.function_codeNotSupportedException("Function code not supported by master");
if ((data[1] == 0x8F) & (data[2] == 0x02)):
raise Exceptions.starting_addressInvalidException("Starting address invalid or starting address + quantity invalid");
if ((data[1] == 0x8F) & (data[2] == 0x03)):
raise Exceptions.QuantityInvalidException("quantity invalid");
if ((data[1] == 0x8F) & (data[2] == 0x04)):
raise Exceptions.ModbusException("error reading");
crc = self.__calculateCRC(data, len(data) - 2, 0)
crcLSB = crc&0xFF
crcMSB = (crc&0xFF00) >> 8
if ((crcLSB != data[len(data)-2]) & (crcMSB != data[len(data)-1])):
raise Exceptions.CRCCheckFailedException("CRC check failed");
if data[1] == self.__unitIdentifier:
return True
else:
return False
else:
protocolIdentifierLSB = 0x00;
protocolIdentifierMSB = 0x00;
length_lsb = 0x06;
length_msb = 0x00;
data = bytearray([transaction_identifier_msb, transaction_identifier_lsb, protocolIdentifierMSB, protocolIdentifierLSB, length_msb, length_lsb, self.__unitIdentifier, function_code, starting_address_msb, starting_address_lsb, quantityMSB, quantityLSB])
data.append(len(valueToWrite)) #Bytecount
for i in range (0, len(valueToWrite)):
data.append(valueToWrite[i]&0xFF)
self.__tcpClientSocket.send(data)
bytes_to_read = 12
self.__receivedata = bytearray()
try:
while (len(self.__receivedata) == 0):
pass
except Exception:
raise Exception('Read Timeout')
data = bytearray(self.__receivedata)
if ((data[1] == 0x8F) & (data[2] == 0x01)):
raise Exceptions.function_codeNotSupportedException("Function code not supported by master");
if ((data[1] == 0x8F) & (data[2] == 0x02)):
raise Exceptions.starting_addressInvalidException("Starting address invalid or starting address + quantity invalid");
if ((data[1] == 0x8F) & (data[2] == 0x03)):
raise Exceptions.QuantityInvalidException("quantity invalid");
if ((data[1] == 0x8F) & (data[2] == 0x04)):
raise Exceptions.ModbusException("error reading");
return True | Write multiple coils to Master device (Function code 15)
starting_address : First coil to be written
values: Coil Values [0..quantity-1] to be written | entailment |
def services(self, service_group=None):
"""
Args:
service_group: optional name of service group
Returns:
if service_group is omitted or None, flattened dict of all service records in the service registry
if service_group is present, dict of service records in that group
"""
# Specific service group requested
if service_group is not None:
if service_group not in EFConfig.SERVICE_GROUPS:
raise RuntimeError("service registry: {} doesn't have '{}' section listed in EFConfig".format(
self._service_registry_file, service_group))
else:
return self.service_registry_json[service_group]
# Specific service group not requested - flatten and return all service records
else:
result = dict()
for service_group in EFConfig.SERVICE_GROUPS:
result.update(self.service_registry_json[service_group])
return result | Args:
service_group: optional name of service group
Returns:
if service_group is omitted or None, flattened dict of all service records in the service registry
if service_group is present, dict of service records in that group | entailment |
def iter_services(self, service_group=None):
"""
Args:
service_group: optional name of service group
Returns:
if service_group is omitted or None, an Iterator over all flattened service records in the service registry
if service_group is present, an Iterator over all service records in that group
"""
if service_group is not None:
if service_group not in EFConfig.SERVICE_GROUPS:
raise RuntimeError("service registry: {} doesn't have '{}' section listed in EFConfig".format(
self._service_registry_file, service_group))
return self.service_registry_json[service_group].iteritems()
else:
return self.services().iteritems() | Args:
service_group: optional name of service group
Returns:
if service_group is omitted or None, an Iterator over all flattened service records in the service registry
if service_group is present, an Iterator over all service records in that group | entailment |
def valid_envs(self, service_name):
"""
Args:
service_name: the name of the service in the service registry
Returns:
a list of strings - all the valid environments for 'service'
Raises:
RuntimeError if the service wasn't found
"""
service_record = self.service_record(service_name)
if service_record is None:
raise RuntimeError("service registry doesn't have service: {}".format(service_name))
# Return empty list if service has no "environments" section
if not (service_record.has_key("environments")):
return []
# Otherwise gather up the envs
service_record_envs = service_record["environments"]
result = []
for service_env in service_record_envs:
if service_env not in EFConfig.PROTECTED_ENVS and service_env in EFConfig.EPHEMERAL_ENVS:
result.extend((lambda env=service_env: [env + str(x) for x in range(EFConfig.EPHEMERAL_ENVS[env])])())
else:
result.append(service_env)
return result | Args:
service_name: the name of the service in the service registry
Returns:
a list of strings - all the valid environments for 'service'
Raises:
RuntimeError if the service wasn't found | entailment |
def service_record(self, service_name):
"""
Args:
service_name: the name of the service in the service registry
Returns:
the entire service record from the service registry or None if the record was not found
"""
if not self.services().has_key(service_name):
return None
return self.services()[service_name] | Args:
service_name: the name of the service in the service registry
Returns:
the entire service record from the service registry or None if the record was not found | entailment |
def service_group(self, service_name):
"""
Args:
service_name: the name of the service in the service registry
Returns:
the name of the group the service is in, or None of the service was not found
"""
for group in EFConfig.SERVICE_GROUPS:
if self.services(group).has_key(service_name):
return group
return None | Args:
service_name: the name of the service in the service registry
Returns:
the name of the group the service is in, or None of the service was not found | entailment |
def service_region(self, service_name):
"""
Args:
service_name: the name of the service in the service registry
Returns:
the region the service is in, or EFConfig.DEFAULT_REGION if the region was not found
"""
if not self.services()[service_name].has_key("region"):
return EFConfig.DEFAULT_REGION
else:
return self.services()[service_name]["region"] | Args:
service_name: the name of the service in the service registry
Returns:
the region the service is in, or EFConfig.DEFAULT_REGION if the region was not found | entailment |
def easy_train_and_evaluate(hyper_params, Model=None, create_loss=None,
training_data=None, validation_data=None,
inline_plotting=False, session_config=None, log_suffix=None,
continue_training=False, continue_with_specific_checkpointpath=None):
"""
Train and evaluate your model without any boilerplate code.
1) Write your data using the starttf.tfrecords.autorecords.write_data method.
2) Create your hyper parameter file containing all required fields and then load it using
starttf.utils.hyper_params.load_params method.
Minimal Sample Hyperparams File:
{"train": {
"learning_rate": {
"type": "const",
"start_value": 0.001
},
"optimizer": {
"type": "adam"
},
"batch_size": 1024,
"iters": 10000,
"summary_iters": 100,
"checkpoint_path": "checkpoints/mnist",
"tf_records_path": "data/.records/mnist"
}
}
3) Pass everything required to this method and that's it.
:param hyper_params: The hyper parameters obejct loaded via starttf.utils.hyper_params.load_params
:param Model: A keras model.
:param create_loss: A create_loss function like that in starttf.examples.mnist.loss.
:param inline_plotting: When you are using jupyter notebooks you can tell it to plot the loss directly inside the notebook.
:param continue_training: Bool, continue last training in the checkpoint path specified in the hyper parameters.
:param session_config: A configuration for the session.
:param log_suffix: A suffix for the log folder, so you can remember what was special about the run.
:return:
"""
time_stamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H.%M.%S')
chkpt_path = hyper_params.train.checkpoint_path + "/" + time_stamp
if log_suffix is not None:
chkpt_path = chkpt_path + "_" + log_suffix
if session_config is None:
session_config = get_default_config()
if continue_with_specific_checkpointpath:
chkpt_path = hyper_params.train.checkpoint_path + "/" + continue_with_specific_checkpointpath
print("Continue with checkpoint: {}".format(chkpt_path))
elif continue_training:
chkpts = sorted([name for name in os.listdir(hyper_params.train.checkpoint_path)])
chkpt_path = hyper_params.train.checkpoint_path + "/" + chkpts[-1]
print("Latest found checkpoint: {}".format(chkpt_path))
if not os.path.exists(chkpt_path):
os.makedirs(chkpt_path)
# If hyperparam config is used, load and save code
if Model is None:
model_backup = os.path.join(chkpt_path, "model.py")
copyfile(hyperparams["arch"]["model"].replace(".", os.sep), model_backup)
arch_model = __import__(hyperparams["arch"]["model"], fromlist=["Model"])
Model = arch_model.Model
if create_loss is None:
loss_backup = os.path.join(chkpt_path, "loss.py")
copyfile(hyperparams["arch"]["loss"].replace(".", os.sep), loss_backup)
arch_loss = __import__(hyperparams["arch"]["loss"], fromlist=["create_loss"])
create_loss = arch_loss.create_loss
# Load training data
print("Load data")
if training_data is None:
training_data = create_input_fn(os.path.join(hyper_params.train.tf_records_path, PHASE_TRAIN),
hyper_params.train.batch_size)
if validation_data is None:
validation_data = create_input_fn(os.path.join(hyper_params.train.tf_records_path, PHASE_VALIDATION),
hyper_params.train.batch_size)
# Write hyper parameters to be able to track what config you had.
with open(chkpt_path + "/hyperparameters.json", "w") as json_file:
json_file.write(json.dumps(hyper_params.to_dict(), indent=4, sort_keys=True))
estimator_spec = create_tf_estimator_spec(chkpt_path, Model, create_loss, inline_plotting)
# Create a run configuration
config = None
if hyper_params.train.get("distributed", False):
distribution = tf.contrib.distribute.MirroredStrategy()
config = tf.estimator.RunConfig(model_dir=chkpt_path,
save_summary_steps=hyper_params.train.summary_steps,
train_distribute=distribution,
save_checkpoints_steps=hyper_params.train.save_checkpoint_steps,
keep_checkpoint_max=hyper_params.train.keep_checkpoint_max,
keep_checkpoint_every_n_hours=1)
else:
config = tf.estimator.RunConfig(session_config=session_config,
model_dir=chkpt_path,
save_summary_steps=hyper_params.train.summary_steps,
save_checkpoints_steps=hyper_params.train.save_checkpoint_steps,
keep_checkpoint_max=hyper_params.train.keep_checkpoint_max,
keep_checkpoint_every_n_hours=1)
# Create the estimator.
estimator = None
if hyper_params.train.get("warm_start_checkpoint", None) is not None:
warm_start_dir = hyper_params.train.warm_start_checkpoint
estimator = tf.estimator.Estimator(estimator_spec,
config=config,
warm_start_from=warm_start_dir,
params=hyper_params)
else:
estimator = tf.estimator.Estimator(estimator_spec,
config=config,
params=hyper_params)
# Specify training and actually train.
throttle_secs = hyper_params.train.get("throttle_secs", 120)
train_spec = tf.estimator.TrainSpec(input_fn=training_data,
max_steps=hyper_params.train.steps)
eval_spec = tf.estimator.EvalSpec(input_fn=validation_data,
throttle_secs=throttle_secs)
print("Start training")
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
return estimator | Train and evaluate your model without any boilerplate code.
1) Write your data using the starttf.tfrecords.autorecords.write_data method.
2) Create your hyper parameter file containing all required fields and then load it using
starttf.utils.hyper_params.load_params method.
Minimal Sample Hyperparams File:
{"train": {
"learning_rate": {
"type": "const",
"start_value": 0.001
},
"optimizer": {
"type": "adam"
},
"batch_size": 1024,
"iters": 10000,
"summary_iters": 100,
"checkpoint_path": "checkpoints/mnist",
"tf_records_path": "data/.records/mnist"
}
}
3) Pass everything required to this method and that's it.
:param hyper_params: The hyper parameters obejct loaded via starttf.utils.hyper_params.load_params
:param Model: A keras model.
:param create_loss: A create_loss function like that in starttf.examples.mnist.loss.
:param inline_plotting: When you are using jupyter notebooks you can tell it to plot the loss directly inside the notebook.
:param continue_training: Bool, continue last training in the checkpoint path specified in the hyper parameters.
:param session_config: A configuration for the session.
:param log_suffix: A suffix for the log folder, so you can remember what was special about the run.
:return: | entailment |
def create_prediction_estimator(hyper_params, model, checkpoint_path=None):
"""
Create an estimator for prediction purpose only.
:param hyper_params: The hyper params file.
:param model: The keras model.
:param checkpoint_path: (Optional) Path to the specific checkpoint to use.
:return:
"""
if checkpoint_path is None:
chkpts = sorted([name for name in os.listdir(hyper_params.train.checkpoint_path)])
checkpoint_path = hyper_params.train.checkpoint_path + "/" + chkpts[-1]
print("Latest found checkpoint: {}".format(checkpoint_path))
estimator_spec = create_tf_estimator_spec(checkpoint_path, model, create_loss=None)
# Create the estimator.
estimator = tf.estimator.Estimator(estimator_spec,
model_dir=checkpoint_path,
params=hyper_params)
return estimator | Create an estimator for prediction purpose only.
:param hyper_params: The hyper params file.
:param model: The keras model.
:param checkpoint_path: (Optional) Path to the specific checkpoint to use.
:return: | entailment |
def ef_plugin(service_name):
"""
Decorator for ef plugin classes. Any wrapped classes should contain a run() method which executes the plugin code.
Args:
service_name (str): The name of the service being extended.
Example:
@ef_plugin('ef-generate')
class NewRelicPlugin(object):
def run(self):
exec_code()
"""
def class_rebuilder(cls):
class EFPlugin(cls):
"""
Base class of ef-plugins. Defines which service is extended and provides access to the current instance of
EFContext to the plugin.
Args:
context (obj:EFContext): Instance of EFContext created by ef-open command line tool
clients (dict): Dictionary of boto3 clients created by ef_utils.create_aws_clients()
"""
def __init__(self, context, clients):
self.service = service_name
self.context = context
self.clients = clients
self.oInstance = cls()
def __getattribute__(self, s):
"""
This is called whenever any attribute of a EFPlugin object is accessed. This function first tries to
get the attribute off EFPlugin. If it fails then it tries to fetch the attribute from self.oInstance
(an instance of the decorated class).
"""
try:
x = super(EFPlugin, self).__getattribute__(s)
except AttributeError:
pass
else:
return x
return self.oInstance.__getattribute__(s)
return EFPlugin
return class_rebuilder | Decorator for ef plugin classes. Any wrapped classes should contain a run() method which executes the plugin code.
Args:
service_name (str): The name of the service being extended.
Example:
@ef_plugin('ef-generate')
class NewRelicPlugin(object):
def run(self):
exec_code() | entailment |
def run_plugins(context_obj, boto3_clients):
"""
Executes all loaded plugins designated for the service calling the function.
Args:
context_obj (obj:EFContext): The EFContext object created by the service.
boto3_clients (dict): Dictionary of boto3 clients created by ef_utils.create_aws_clients()
"""
def print_if_verbose(message):
if context_obj.verbose:
print(message)
service_name = os.path.basename(sys.argv[0]).replace(".py", "")
try:
import plugins
except ImportError:
print_if_verbose("no plugins detected.")
return
else:
for plugin_importer, plugin_name, plugin_ispkg in pkgutil.iter_modules(plugins.__path__):
if plugin_ispkg:
plugin_package = importlib.import_module("plugins.{}".format(plugin_name))
for importer, modname, ispkg in pkgutil.iter_modules(plugin_package.__path__):
plugin_module = importlib.import_module("plugins.{}.{}".format(plugin_name, modname))
for name, obj in inspect.getmembers(plugin_module):
if inspect.isclass(obj) and obj.__name__ == "EFPlugin":
plugin_class = getattr(plugin_module, name)
plugin_instance = plugin_class(context=context_obj, clients=boto3_clients)
if plugin_instance.service == service_name:
print_if_verbose("plugin '{}' loaded".format(plugin_name))
if not context_obj.commit:
print_if_verbose("dryrun: skipping plugin execution.")
else:
try:
plugin_instance.run()
except AttributeError:
print("error executing plugin '{}'".format(modname)) | Executes all loaded plugins designated for the service calling the function.
Args:
context_obj (obj:EFContext): The EFContext object created by the service.
boto3_clients (dict): Dictionary of boto3 clients created by ef_utils.create_aws_clients() | entailment |
def easy_train_and_evaluate(hyper_params, Model=None, define_loss_fn=None,
training_data=None, validation_data=None,
continue_training=False,
session_config=None, log_suffix=None, continue_with_specific_checkpointpath=None):
"""
Train and evaluate your model without any boilerplate code.
1) Write your data using the starttf.tfrecords.autorecords.write_data method.
2) Create your hyper parameter file containing all required fields and then load it using
starttf.utils.hyper_params.load_params method.
Minimal Sample Hyperparams File:
{"train": {
"learning_rate": {
"type": "const",
"start_value": 0.001
},
"optimizer": {
"type": "adam"
},
"batch_size": 1024,
"iters": 10000,
"summary_iters": 100,
"checkpoint_path": "checkpoints/mnist",
"tf_records_path": "data/.records/mnist"
}
}
3) Pass everything required to this method and that's it.
:param hyper_params: The hyper parameters obejct loaded via starttf.utils.hyper_params.load_params
:param Model: A keras model.
:param create_loss: A create_loss function like that in starttf.examples.mnist.loss.
:param inline_plotting: When you are using jupyter notebooks you can tell it to plot the loss directly inside the notebook.
:param continue_training: Bool, continue last training in the checkpoint path specified in the hyper parameters.
:param session_config: A configuration for the session.
:param log_suffix: A suffix for the log folder, so you can remember what was special about the run.
:return:
"""
time_stamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H.%M.%S')
chkpt_path = hyper_params.train.checkpoint_path + "/" + time_stamp
if log_suffix is not None:
chkpt_path = chkpt_path + "_" + log_suffix
if session_config is None:
session_config = get_default_config()
if continue_with_specific_checkpointpath:
chkpt_path = hyper_params.train.checkpoint_path + "/" + continue_with_specific_checkpointpath
print("Continue with checkpoint: {}".format(chkpt_path))
elif continue_training:
chkpts = sorted([name for name in os.listdir(hyper_params.train.checkpoint_path)])
chkpt_path = hyper_params.train.checkpoint_path + "/" + chkpts[-1]
print("Latest found checkpoint: {}".format(chkpt_path))
if not os.path.exists(chkpt_path):
os.makedirs(chkpt_path)
# If hyperparam config is used
if Model is None:
arch_model = __import__(hyperparams.arch.model, fromlist=["Model"])
Model = arch_model.Model
if define_loss_fn is None and hyperparams.arch.get("loss", None) is not None:
arch_loss = __import__(hyperparams.arch.loss, fromlist=["define_loss_fn"])
define_loss_fn = arch_loss.define_loss_fn
if training_data is None and hyperparams.problem.get("prepare", None) is not None:
prepare = __import__(hyperparams.problem.prepare, fromlist=["Sequence"])
training_data = prepare.Sequence(hyperparams, PHASE_TRAIN)
validation_data = prepare.Sequence(hyperparams, PHASE_VALIDATION)
# TODO save code
# Write hyper parameters to be able to track what config you had.
with open(chkpt_path + "/hyperparameters.json", "w") as json_file:
json_file.write(json.dumps(hyper_params.to_dict(), indent=4, sort_keys=True))
if training_data is not None:
hyper_params.train.steps = hyper_params.train.epochs * len(training_data)
losses = {}
metrics = {}
if define_loss_fn is None:
losses = hyper_params.train.loss.to_dict()
metrics = hyper_params.train.metrics.to_dict()
else:
losses, metrics = define_loss_fn(hyper_params)
callbacks = create_keras_callbacks(hyper_params, chkpt_path)
optimizer, lr_sheduler = create_keras_optimizer(hyper_params)
callbacks.append(lr_sheduler)
if training_data is None:
train_features, train_labels = create_input_fn(os.path.join(hyper_params.train.tf_records_path, PHASE_TRAIN),
hyper_params.train.batch_size)().make_one_shot_iterator().get_next()
validation_data = create_input_fn(os.path.join(hyper_params.train.tf_records_path, PHASE_VALIDATION),
hyper_params.train.batch_size)().make_one_shot_iterator().get_next()
model = Model(hyper_params)
input_tensor = {k: tf.keras.layers.Input(shape=train_features[k].get_shape().as_list(), name=k) for k in train_features}
target_placeholders = {k: tf.placeholder(shape=(None,) + train_labels[k].shape[1:], dtype=train_labels[k].dtype, name=k + "_placeholder") for k in train_labels}
model = model.create_keras_model(input_tensor, training=True)
# model.metrics_names = [k for k in metrics]
model.compile(loss=losses, optimizer=optimizer, metrics=[rename_fn(v, name=k) for k, v in metrics.iteritems()], target_tensors=target_placeholders)
tf.keras.backend.get_session().run(tf.global_variables_initializer())
model.fit(train_features, train_labels, validation_data=validation_data,
batch_size=hyper_params.train.batch_size,
steps_per_epoch=hyper_params.train.get("steps_per_epoch", 1),
epochs=hyper_params.train.get("epochs", 50),
validation_steps=hyper_params.train.get("validation_steps", 1),
callbacks=callbacks, verbose=1)
else:
# first batches features
#features = training_data[0][0]
#model._set_inputs({k: tf.zeros(features[k].shape) for k in features})
model = Model(hyper_params)
train_features = training_data[0][0]
train_labels = training_data[0][1]
input_tensor = {k: tf.keras.layers.Input(shape=train_features[k].shape[1:], name=k) for k in train_features}
target_placeholders = {k: tf.placeholder(shape=(None,) + train_labels[k].shape[1:], dtype=train_labels[k].dtype, name=k + "_placeholder") for k in train_labels}
model = model.create_keras_model(input_tensor, training=True)
# model.metrics_names = [k for k in metrics]
model.compile(loss=losses, optimizer=optimizer, metrics=[metrics[k] for k in metrics], target_tensors=target_placeholders)
tf.keras.backend.get_session().run(tf.global_variables_initializer())
model.fit_generator(training_data, validation_data=validation_data, epochs=hyper_params.train.get("epochs", 50),
callbacks=callbacks, workers=2, use_multiprocessing=False, shuffle=True, verbose=1)
return chkpt_path | Train and evaluate your model without any boilerplate code.
1) Write your data using the starttf.tfrecords.autorecords.write_data method.
2) Create your hyper parameter file containing all required fields and then load it using
starttf.utils.hyper_params.load_params method.
Minimal Sample Hyperparams File:
{"train": {
"learning_rate": {
"type": "const",
"start_value": 0.001
},
"optimizer": {
"type": "adam"
},
"batch_size": 1024,
"iters": 10000,
"summary_iters": 100,
"checkpoint_path": "checkpoints/mnist",
"tf_records_path": "data/.records/mnist"
}
}
3) Pass everything required to this method and that's it.
:param hyper_params: The hyper parameters obejct loaded via starttf.utils.hyper_params.load_params
:param Model: A keras model.
:param create_loss: A create_loss function like that in starttf.examples.mnist.loss.
:param inline_plotting: When you are using jupyter notebooks you can tell it to plot the loss directly inside the notebook.
:param continue_training: Bool, continue last training in the checkpoint path specified in the hyper parameters.
:param session_config: A configuration for the session.
:param log_suffix: A suffix for the log folder, so you can remember what was special about the run.
:return: | entailment |
def crop(img, start_y, start_x, h, w):
"""
Crop an image given the top left corner.
:param img: The image
:param start_y: The top left corner y coord
:param start_x: The top left corner x coord
:param h: The result height
:param w: The result width
:return: The cropped image.
"""
return img[start_y:start_y + h, start_x:start_x + w, :].copy() | Crop an image given the top left corner.
:param img: The image
:param start_y: The top left corner y coord
:param start_x: The top left corner x coord
:param h: The result height
:param w: The result width
:return: The cropped image. | entailment |
def resize_image_with_crop_or_pad(img, target_height, target_width):
"""
Crops and/or pads an image to a target width and height.
Resizes an image to a target width and height by either cropping the image or padding it with zeros.
NO CENTER CROP. NO CENTER PAD. (Just fill bottom right or crop bottom right)
:param img: Numpy array representing the image.
:param target_height: Target height.
:param target_width: Target width.
:return: The cropped and padded image.
"""
h, w = target_height, target_width
max_h, max_w, c = img.shape
# crop
img = crop_center(img, min(max_h, h), min(max_w, w))
# pad
padded_img = np.zeros(shape=(h, w, c), dtype=img.dtype)
padded_img[:img.shape[0], :img.shape[1], :img.shape[2]] = img
return padded_img | Crops and/or pads an image to a target width and height.
Resizes an image to a target width and height by either cropping the image or padding it with zeros.
NO CENTER CROP. NO CENTER PAD. (Just fill bottom right or crop bottom right)
:param img: Numpy array representing the image.
:param target_height: Target height.
:param target_width: Target width.
:return: The cropped and padded image. | entailment |
def _rotatedRectWithMaxArea(w, h, angle):
"""
Given a rectangle of size wxh that has been rotated by 'angle' (in
radians), computes the width and height of the largest possible
axis-aligned rectangle (maximal area) within the rotated rectangle.
Answer from: https://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders
"""
if w <= 0 or h <= 0:
return 0,0
width_is_longer = w >= h
side_long, side_short = (w,h) if width_is_longer else (h,w)
# since the solutions for angle, -angle and 180-angle are all the same,
# if suffices to look at the first quadrant and the absolute values of sin,cos:
sin_a, cos_a = abs(math.sin(angle)), abs(math.cos(angle))
if side_short <= 2.*sin_a*cos_a*side_long or abs(sin_a-cos_a) < 1e-10:
# half constrained case: two crop corners touch the longer side,
# the other two corners are on the mid-line parallel to the longer line
x = 0.5*side_short
wr,hr = (x/sin_a,x/cos_a) if width_is_longer else (x/cos_a,x/sin_a)
else:
# fully constrained case: crop touches all 4 sides
cos_2a = cos_a*cos_a - sin_a*sin_a
wr,hr = (w*cos_a - h*sin_a)/cos_2a, (h*cos_a - w*sin_a)/cos_2a
return wr,hr | Given a rectangle of size wxh that has been rotated by 'angle' (in
radians), computes the width and height of the largest possible
axis-aligned rectangle (maximal area) within the rotated rectangle.
Answer from: https://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders | entailment |
def rotate_img_and_crop(img, angle):
"""
Rotate an image and then crop it so that there is no black area.
:param img: The image to rotate.
:param angle: The rotation angle in degrees.
:return: The rotated and cropped image.
"""
h, w, _ = img.shape
img = scipy.ndimage.interpolation.rotate(img, angle)
w, h = _rotatedRectWithMaxArea(w, h, math.radians(angle))
return crop_center(img, int(h), int(w)) | Rotate an image and then crop it so that there is no black area.
:param img: The image to rotate.
:param angle: The rotation angle in degrees.
:return: The rotated and cropped image. | entailment |
def diff_string_templates(string_a, string_b):
"""
Determine the diff of two strings. Return an empty string if the strings
are identical, and the diff output string if they are not.
"""
s1 = string_a.strip().splitlines()
s2 = string_b.strip().splitlines()
diffs = unified_diff(s2, s1, fromfile='deployed', tofile='local', lineterm='')
return '\n'.join(diffs) | Determine the diff of two strings. Return an empty string if the strings
are identical, and the diff output string if they are not. | entailment |
def render_local_template(service_name, environment, repo_root, template_file):
"""
Render a given service's template for a given environment and return it
"""
cmd = 'cd {} && ef-cf {} {} --devel --verbose'.format(repo_root, template_file, environment)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
stderr = indentify('\n{}'.format(stderr))
stdout = indentify('\n{}'.format(stdout))
raise Exception('Service: `{}`, Env: `{}`, Msg: `{}{}`'
.format(service_name, environment, stderr, stdout))
logger.debug('Rendered template for `%s` in `%s`', template_file, environment)
r = re.match(r".*(^{.*^})$", stdout, re.MULTILINE | re.DOTALL)
return jsonify(json.loads(r.group(1))) | Render a given service's template for a given environment and return it | entailment |
def fetch_current_cloudformation_template(service_name, environment, cf_client):
"""
Fetch the currently-deployed template for the given service in the given
environment and return it.
"""
stack_name = get_stack_name(environment, service_name)
logger.debug('Fetching template for `%s`', stack_name)
result = cf_client.get_template(StackName=stack_name)
return jsonify(result['TemplateBody']) | Fetch the currently-deployed template for the given service in the given
environment and return it. | entailment |
def diff_sevice_by_text(service_name, service, environment, cf_client, repo_root):
"""
Render the local template and compare it to the template that was last
applied in the target environment.
"""
global ret_code
logger.info('Investigating textual diff for `%s`:`%s` in environment `%s`',
service['type'], service_name, environment)
try:
local_template = render_local_template(service_name, environment,
repo_root, service['template_file'])
current_template = fetch_current_cloudformation_template(
service_name, environment, cf_client)
except Exception as e:
ret_code = 2
logger.error(e)
return
ret = diff_string_templates(local_template, current_template)
if not ret:
logger.info('Deployed service `%s` in environment `%s` matches '
'the local template.', service_name, environment)
else:
ret_code = 1
logger.error('Service `%s` in environment `%s` differs from '
'the local template.',
service_name, environment)
logger.info('Change details:\n %s', indentify(ret)) | Render the local template and compare it to the template that was last
applied in the target environment. | entailment |
def diff_sevice_by_changeset(service_name, service, environment, cf_client, repo_root):
"""
If an ef-cf call fails, the error will be logged, the retcode set to 2, but
the function will run to completion and return the list of non-error
results.
"""
global ret_code
logger.info('Investigating changeset for `%s`:`%s` in environment `%s`',
service['type'], service_name, environment)
delete_any_existing_changesets(cf_client, service_name, environment)
try:
changeset = generate_changeset(service_name, environment,
repo_root, service['template_file'])
except Exception as e:
ret_code = 2
logger.error(e)
return
wait_for_changeset_creation(cf_client, changeset['Id'], changeset['StackId'])
logger.info('Created Changeset ID: `%s`', changeset['Id'])
desc = cf_client.describe_change_set(
ChangeSetName=changeset['Id'], StackName=changeset['StackId'])
cf_client.delete_change_set(
ChangeSetName=changeset['Id'], StackName=changeset['StackId'])
if changeset_is_empty(desc):
logger.info('Deployed service `%s` in environment `%s` matches '
'the local template.', service_name, environment)
else:
ret_code = 1
logger.error('Service `%s` in environment `%s` differs from '
'the local template.',
service_name, environment)
details = jsonify(desc['Changes'])
logger.info('Change details:\n %s', indentify(details)) | If an ef-cf call fails, the error will be logged, the retcode set to 2, but
the function will run to completion and return the list of non-error
results. | entailment |
def get_cloudformation_client(service_name, environment_name):
"""
Given a service name and an environment name, return a boto CloudFormation
client object.
"""
region = service_registry.service_region(service_name)
if whereami() == 'ec2':
profile = None
else:
profile = get_account_alias(environment_name)
clients = create_aws_clients(region, profile, 'cloudformation')
return clients['cloudformation'] | Given a service name and an environment name, return a boto CloudFormation
client object. | entailment |
def evaluate_service_changes(services, envs, repo_root, func):
"""
Given a dict of services, and a list of environments, apply the diff
function to evaluate the differences between the target environments
and the rendered templates.
Sub-services (names with '.' in them) are skipped.
"""
for service_name, service in services.iteritems():
for env_category in service['environments']:
if env_category not in get_env_categories(envs):
logger.debug('Skipping not-included environment `%s` for service `%s`',
env_category, service_name)
continue
environment = generate_test_environment_name(env_category)
cf_client = get_cloudformation_client(service_name, environment)
func(service_name, service, environment, cf_client, repo_root) | Given a dict of services, and a list of environments, apply the diff
function to evaluate the differences between the target environments
and the rendered templates.
Sub-services (names with '.' in them) are skipped. | entailment |
def get_matching_service_template_file(service_name, template_files):
"""
Return the template file that goes with the given service name, or return
None if there's no match. Subservices return the parent service's file.
"""
# If this is a subservice, use the parent service's template
service_name = service_name.split('.')[0]
if service_name in template_files:
return template_files[service_name]
return None | Return the template file that goes with the given service name, or return
None if there's no match. Subservices return the parent service's file. | entailment |
def get_dict_registry_services(registry, template_files, warn_missing_files=True):
"""
Return a dict mapping service name to a dict containing the service's
type ('fixtures', 'platform_services', 'application_services', 'internal_services'),
the template file's absolute path, and a list of environments to which the
service is intended to deploy.
Service names that appear twice in the output list will emit a warning and
ignore the latter records.
Services which have no template file will not appear in the returned dict.
If the `warn_missing_files` boolean is True these files will emit a warning.
"""
with open(registry) as fr:
parsed_registry = json.load(fr)
services = {}
for type, type_services in parsed_registry.iteritems():
for name, service in type_services.iteritems():
if name in services:
logger.warning("Template name appears twice, ignoring later items: `%s`", name)
continue
template_file = get_matching_service_template_file(name, template_files)
if not template_file:
if warn_missing_files:
logger.warning("No template file for `%s` (%s) `%s`", type, service['type'], name)
continue
services[name] = {
'type': type,
'template_file': template_file,
'environments': service['environments']
}
return services | Return a dict mapping service name to a dict containing the service's
type ('fixtures', 'platform_services', 'application_services', 'internal_services'),
the template file's absolute path, and a list of environments to which the
service is intended to deploy.
Service names that appear twice in the output list will emit a warning and
ignore the latter records.
Services which have no template file will not appear in the returned dict.
If the `warn_missing_files` boolean is True these files will emit a warning. | entailment |
def scan_dir_for_template_files(search_dir):
"""
Return a map of "likely service/template name" to "template file".
This includes all the template files in fixtures and in services.
"""
template_files = {}
cf_dir = os.path.join(search_dir, 'cloudformation')
for type in os.listdir(cf_dir):
template_dir = os.path.join(cf_dir, type, 'templates')
for x in os.listdir(template_dir):
name = os.path.splitext(x)[0]
template_files[name] = os.path.join(template_dir, x)
return template_files | Return a map of "likely service/template name" to "template file".
This includes all the template files in fixtures and in services. | entailment |
def generate_secret(length=32):
"""
Generate a random secret consisting of mixed-case letters and numbers
Args:
length (int): Length of the generated password
Returns:
a randomly generated secret string
Raises:
None
"""
alphabet = string.ascii_letters + string.digits
random_bytes = os.urandom(length)
indices = [int(len(alphabet) * (ord(byte) / 256.0)) for byte in random_bytes]
return "".join([alphabet[index] for index in indices]) | Generate a random secret consisting of mixed-case letters and numbers
Args:
length (int): Length of the generated password
Returns:
a randomly generated secret string
Raises:
None | entailment |
def generate_secret_file(file_path, pattern, service, environment, clients):
"""
Generate a parameter files with it's secrets encrypted in KMS
Args:
file_path (string): Path to the parameter file to be encrypted
pattern (string): Pattern to do fuzzy string matching
service (string): Service to use KMS key to encrypt file
environment (string): Environment to encrypt values
clients (dict): KMS AWS client that has been instantiated
Returns:
None
Raises:
IOError: If the file does not exist
"""
changed = False
with open(file_path) as json_file:
data = json.load(json_file, object_pairs_hook=OrderedDict)
try:
for key, value in data["params"][environment].items():
if pattern in key:
if "aws:kms:decrypt" in value:
print("Found match, key {} but value is encrypted already; skipping...".format(key))
else:
print("Found match, encrypting key {}".format(key))
encrypted_password = ef_utils.kms_encrypt(clients['kms'], service, environment, value)
data["params"][environment][key] = format_secret(encrypted_password)
changed = True
except KeyError:
ef_utils.fail("Error env: {} does not exist in parameters file".format(environment))
if changed:
with open(file_path, "w") as encrypted_file:
json.dump(data, encrypted_file, indent=2, separators=(',', ': '))
# Writing new line here so it conforms to WG14 N1256 5.1.1.1 (so github doesn't complain)
encrypted_file.write("\n") | Generate a parameter files with it's secrets encrypted in KMS
Args:
file_path (string): Path to the parameter file to be encrypted
pattern (string): Pattern to do fuzzy string matching
service (string): Service to use KMS key to encrypt file
environment (string): Environment to encrypt values
clients (dict): KMS AWS client that has been instantiated
Returns:
None
Raises:
IOError: If the file does not exist | entailment |
def handle_args_and_set_context(args):
"""
Args:
args: the command line args, probably passed from main() as sys.argv[1:]
Returns:
a populated EFPWContext object
Raises:
RuntimeError: if repo or branch isn't as spec'd in ef_config.EF_REPO and ef_config.EF_REPO_BRANCH
ValueError: if a parameter is invalid
"""
parser = argparse.ArgumentParser()
parser.add_argument("service", help="name of service password is being generated for")
parser.add_argument("env", help=", ".join(EFConfig.ENV_LIST))
group = parser.add_mutually_exclusive_group()
group.add_argument("--decrypt", help="encrypted string to be decrypted", default="")
group.add_argument("--plaintext", help="secret to be encrypted rather than a randomly generated one", default="")
group.add_argument("--secret_file", help="json file containing secrets to be encrypted", default="")
parser.add_argument("--match", help="used in conjunction with --secret_file to match against keys to be encrypted", default="")
parser.add_argument("--length", help="length of generated password (default 32)", default=32)
parsed_args = vars(parser.parse_args(args))
context = EFPWContext()
try:
context.env = parsed_args["env"]
except ValueError as e:
ef_utils.fail("Error in env: {}".format(e.message))
context.service = parsed_args["service"]
context.decrypt = parsed_args["decrypt"]
context.length = parsed_args["length"]
context.plaintext = parsed_args["plaintext"]
context.secret_file = parsed_args["secret_file"]
context.match = parsed_args["match"]
if context.match or context.secret_file:
if not context.match or not context.secret_file:
raise ValueError("Must have both --match and --secret_file flag")
return context | Args:
args: the command line args, probably passed from main() as sys.argv[1:]
Returns:
a populated EFPWContext object
Raises:
RuntimeError: if repo or branch isn't as spec'd in ef_config.EF_REPO and ef_config.EF_REPO_BRANCH
ValueError: if a parameter is invalid | entailment |
def tile_2d(input, k_x, k_y, name, reorder_required=True):
"""
A tiling layer like introduced in overfeat and huval papers.
:param input: Your input tensor.
:param k_x: The tiling factor in x direction.
:param k_y: The tiling factor in y direction.
:param name: The name of the layer.
:param reorder_required: To implement an exact huval tiling you need reordering.
However not using it is more efficient and when training from scratch setting this to false is highly recommended.
:return: The output tensor.
"""
size = input.get_shape().as_list()
c, h, w = size[3], size[1], size[2]
batch_size = size[0]
if batch_size is None:
batch_size = -1
# Check if tiling is possible and define output shape.
assert c % (k_x * k_y) == 0
tmp = input
if reorder_required:
output_channels = int(c / (k_x * k_y))
channels = tf.unstack(tmp, axis=-1)
reordered_channels = [None for _ in range(len(channels))]
for o in range(output_channels):
for i in range(k_x * k_y):
target = o + i * output_channels
source = o * (k_x * k_y) + i
reordered_channels[target] = channels[source]
tmp = tf.stack(reordered_channels, axis=-1)
# Actual tilining
with tf.variable_scope(name) as scope:
tmp = tf.transpose(tmp, [0, 2, 1, 3])
tmp = tf.reshape(tmp, (batch_size, w, int(h * k_y), int(c / (k_y))))
tmp = tf.transpose(tmp, [0, 2, 1, 3])
tmp = tf.reshape(tmp, (batch_size, int(h * k_y), int(w * k_x), int(c / (k_y * k_x))))
return tmp | A tiling layer like introduced in overfeat and huval papers.
:param input: Your input tensor.
:param k_x: The tiling factor in x direction.
:param k_y: The tiling factor in y direction.
:param name: The name of the layer.
:param reorder_required: To implement an exact huval tiling you need reordering.
However not using it is more efficient and when training from scratch setting this to false is highly recommended.
:return: The output tensor. | entailment |
def inverse_tile_2d(input, k_x, k_y, name):
"""
An inverse tiling layer.
An inverse to the tiling layer can be of great use, since you can keep the resolution of your output low,
but harness the benefits of the resolution of a higher level feature layer.
If you insist on a source you can call it very lightly inspired by yolo9000 "passthrough layer".
:param input: Your input tensor. (Assert input.shape[1] % k_y = 0 and input.shape[2] % k_x = 0)
:param k_x: The tiling factor in x direction [int].
:param k_y: The tiling factor in y direction [int].
:param name: The name of the layer.
:return: The output tensor of shape [batch_size, inp.height / k_y, inp.width / k_x, inp.channels * k_x * k_y].
"""
batch_size, h, w, c = input.get_shape().as_list()
if batch_size is None:
batch_size = -1
# Check if tiling is possible and define output shape.
assert w % k_x == 0 and h % k_y == 0
# Actual inverse tilining
with tf.variable_scope(name) as scope:
tmp = input
tmp = tf.reshape(tmp, (batch_size, int(h * k_y), w, int(c * k_x)))
tmp = tf.transpose(tmp, [0, 2, 1, 3])
tmp = tf.reshape(tmp, (batch_size, w, h, int(c * k_y * k_x)))
tmp = tf.transpose(tmp, [0, 2, 1, 3])
return tmp | An inverse tiling layer.
An inverse to the tiling layer can be of great use, since you can keep the resolution of your output low,
but harness the benefits of the resolution of a higher level feature layer.
If you insist on a source you can call it very lightly inspired by yolo9000 "passthrough layer".
:param input: Your input tensor. (Assert input.shape[1] % k_y = 0 and input.shape[2] % k_x = 0)
:param k_x: The tiling factor in x direction [int].
:param k_y: The tiling factor in y direction [int].
:param name: The name of the layer.
:return: The output tensor of shape [batch_size, inp.height / k_y, inp.width / k_x, inp.channels * k_x * k_y]. | entailment |
def feature_passthrough(early_feat, late_feat, filters, name, kernel_size=(1, 1)):
"""
A feature passthrough layer inspired by yolo9000 and the inverse tiling layer.
It can be proven, that this layer does the same as conv(concat(inverse_tile(early_feat), late_feat)).
This layer has no activation function.
:param early_feat: The early feature layer of shape [batch_size, h * s_x, w * s_y, _].
s_x and s_y are integers computed internally describing the scale between the layers.
:param late_feat: The late feature layer of shape [batch_size, h, w, _].
:param filters: The number of convolution filters.
:param name: The name of the layer.
:param kernel_size: The size of the kernel. Default (1x1).
:return: The output tensor of shape [batch_size, h, w, outputs]
"""
_, h_early, w_early, c_early = early_feat.get_shape().as_list()
_, h_late, w_late, c_late = late_feat.get_shape().as_list()
s_x = int(w_early / w_late)
s_y = int(h_early / h_late)
assert h_late * s_y == h_early and w_late * s_x == w_early
with tf.variable_scope(name) as scope:
early_conv = tf.layers.conv2d(early_feat, filters=filters, kernel_size=(s_x * kernel_size[0], s_y * kernel_size[1]), strides=(s_x, s_y), padding="same")
late_conv = tf.layers.conv2d(late_feat, filters=filters, kernel_size=kernel_size, strides=(1, 1), padding="same")
return early_conv + late_conv | A feature passthrough layer inspired by yolo9000 and the inverse tiling layer.
It can be proven, that this layer does the same as conv(concat(inverse_tile(early_feat), late_feat)).
This layer has no activation function.
:param early_feat: The early feature layer of shape [batch_size, h * s_x, w * s_y, _].
s_x and s_y are integers computed internally describing the scale between the layers.
:param late_feat: The late feature layer of shape [batch_size, h, w, _].
:param filters: The number of convolution filters.
:param name: The name of the layer.
:param kernel_size: The size of the kernel. Default (1x1).
:return: The output tensor of shape [batch_size, h, w, outputs] | entailment |
def upsampling_feature_passthrough(early_feat, late_feat, filters, name, kernel_size=(1, 1)):
"""
An upsampling feature passthrough layer inspired by yolo9000 and the tiling layer.
It can be proven, that this layer does the same as conv(concat(early_feat, tile_2d(late_feat))).
This layer has no activation function.
:param early_feat: The early feature layer of shape [batch_size, h * s_x, w * s_y, _].
s_x and s_y are integers computed internally describing the scale between the layers.
:param late_feat: The late feature layer of shape [batch_size, h, w, _].
:param filters: The number of convolution filters.
:param name: The name of the layer.
:param kernel_size: The size of the kernel. Default (1x1).
:return: The output tensor of shape [batch_size, h * s_x, w * s_y, outputs]
"""
_, h_early, w_early, c_early = early_feat.get_shape().as_list()
_, h_late, w_late, c_late = late_feat.get_shape().as_list()
s_x = int(w_early / w_late)
s_y = int(h_early / h_late)
assert h_late * s_y == h_early and w_late * s_x == w_early
with tf.variable_scope(name) as scope:
tiled = tile_2d(late_feat, s_x, s_y, "tile_2d", reorder_required=False)
concated = tf.concat([early_feat, tiled], axis=-1)
return tf.layers.conv2d(concated, filters=filters, kernel_size=kernel_size, strides=(1, 1), padding="same") | An upsampling feature passthrough layer inspired by yolo9000 and the tiling layer.
It can be proven, that this layer does the same as conv(concat(early_feat, tile_2d(late_feat))).
This layer has no activation function.
:param early_feat: The early feature layer of shape [batch_size, h * s_x, w * s_y, _].
s_x and s_y are integers computed internally describing the scale between the layers.
:param late_feat: The late feature layer of shape [batch_size, h, w, _].
:param filters: The number of convolution filters.
:param name: The name of the layer.
:param kernel_size: The size of the kernel. Default (1x1).
:return: The output tensor of shape [batch_size, h * s_x, w * s_y, outputs] | entailment |
def load(self):
"""Loads the config"""
try:
with open(self._ef_site_config, 'r') as yml_file:
return yaml.safe_load(yml_file)
except (IOError, yaml.parser.ParserError) as error:
print("Error: {}".format(error), file=sys.stderr)
sys.exit(1) | Loads the config | entailment |
def interpolate_loss(labels, loss1, loss2, interpolation_values):
"""
Interpolate two losses linearly.
:param labels: A float tensor of shape [batch_size, ..., num_classes] representing the label class probabilities.
:param loss1: A float tensor of shape [batch_size, ...] representing the loss1 for interpolation.
:param loss2: A float tensor of shape [batch_size, ...] representing the loss2 for interpolation.
:param interpolation_values: The values for each class how much focal loss should be interpolated in.
:return: A tensor representing the weighted cross entropy.
"""
with tf.variable_scope("interpolate_focus_loss"):
# Select the probs or weights with the labels.
t = tf.reduce_sum(labels * interpolation_values, axis=-1)
return (1 - t) * loss1 + t * loss2 | Interpolate two losses linearly.
:param labels: A float tensor of shape [batch_size, ..., num_classes] representing the label class probabilities.
:param loss1: A float tensor of shape [batch_size, ...] representing the loss1 for interpolation.
:param loss2: A float tensor of shape [batch_size, ...] representing the loss2 for interpolation.
:param interpolation_values: The values for each class how much focal loss should be interpolated in.
:return: A tensor representing the weighted cross entropy. | entailment |
def alpha_balance_loss(labels, loss, alpha_weights):
"""
Calculate the alpha balanced cross_entropy.
This means for each sample the cross entropy is calculated and then weighted by the class specific weight.
:param labels: A float tensor of shape [batch_size, ..., num_classes] representing the label class probabilities.
:param loss: A float tensor of shape [batch_size, ...] representing the loss that should be focused.
:param alpha_weights: A float tensor of shape [1, ..., num_classes] (... is filled with ones to match number
of dimensions to labels tensor) representing the weights for each class.
:return: A tensor representing the weighted cross entropy.
"""
with tf.variable_scope("alpha_balance"):
# Broadcast multiply labels with alpha weights to select weights and then reduce them along last axis.
weights = tf.reduce_sum(labels * alpha_weights, axis=-1)
return weights * loss | Calculate the alpha balanced cross_entropy.
This means for each sample the cross entropy is calculated and then weighted by the class specific weight.
:param labels: A float tensor of shape [batch_size, ..., num_classes] representing the label class probabilities.
:param loss: A float tensor of shape [batch_size, ...] representing the loss that should be focused.
:param alpha_weights: A float tensor of shape [1, ..., num_classes] (... is filled with ones to match number
of dimensions to labels tensor) representing the weights for each class.
:return: A tensor representing the weighted cross entropy. | entailment |
def batch_alpha_balance_loss(labels, loss):
"""
Calculate the alpha balanced cross_entropy.
This means for each sample the cross entropy is calculated and then weighted by the class specific weight.
There is yet no paper for this type of loss.
:param labels: A float tensor of shape [batch_size, ..., num_classes] representing the label class probabilities.
:param loss: A float tensor of shape [batch_size, ...] representing the loss that should be focused.
:return: A tensor representing the weighted cross entropy.
"""
with tf.variable_scope("batch_alpha_balance"):
# Compute the occurrence probability for each class
mu, _ = tf.nn.moments(labels, [0, 1, 2])
# For weighting a class should be down weighted by its occurrence probability.
not_mu = 1 - mu
# Select the class specific not_mu
not_mu_class = tf.reduce_sum(labels * not_mu, axis=-1)
return not_mu_class * loss | Calculate the alpha balanced cross_entropy.
This means for each sample the cross entropy is calculated and then weighted by the class specific weight.
There is yet no paper for this type of loss.
:param labels: A float tensor of shape [batch_size, ..., num_classes] representing the label class probabilities.
:param loss: A float tensor of shape [batch_size, ...] representing the loss that should be focused.
:return: A tensor representing the weighted cross entropy. | entailment |
def mask_loss(input_tensor, binary_tensor):
"""
Mask a loss by using a tensor filled with 0 or 1.
:param input_tensor: A float tensor of shape [batch_size, ...] representing the loss/cross_entropy
:param binary_tensor: A float tensor of shape [batch_size, ...] representing the mask.
:return: A float tensor of shape [batch_size, ...] representing the masked loss.
"""
with tf.variable_scope("mask_loss"):
mask = tf.cast(tf.cast(binary_tensor, tf.bool), tf.float32)
return input_tensor * mask | Mask a loss by using a tensor filled with 0 or 1.
:param input_tensor: A float tensor of shape [batch_size, ...] representing the loss/cross_entropy
:param binary_tensor: A float tensor of shape [batch_size, ...] representing the mask.
:return: A float tensor of shape [batch_size, ...] representing the masked loss. | entailment |
def mean_on_masked(loss, mask, epsilon=1e-8, axis=None):
"""
Average a loss correctly when it was masked.
:param loss: A float tensor of shape [batch_size, ...] representing the (already masked) loss to be averaged.
:param mask: A float tensor of shape [batch_size, ...] representing the mask.
:param epsilon: Offset of log for numerical stability.
:param axis: The dimensions to reduce. If None (the default), reduces all dimensions.
Must be in the range [-rank(input_tensor), rank(input_tensor)).
"""
mask = tf.cast(tf.cast(mask, tf.bool), tf.float32)
active_pixels = tf.reduce_sum(mask)
active_pixels = tf_if(tf.equal(active_pixels, 0), epsilon, active_pixels)
return tf.reduce_sum(loss, axis=axis) / active_pixels | Average a loss correctly when it was masked.
:param loss: A float tensor of shape [batch_size, ...] representing the (already masked) loss to be averaged.
:param mask: A float tensor of shape [batch_size, ...] representing the mask.
:param epsilon: Offset of log for numerical stability.
:param axis: The dimensions to reduce. If None (the default), reduces all dimensions.
Must be in the range [-rank(input_tensor), rank(input_tensor)). | entailment |
def mask_and_mean_loss(input_tensor, binary_tensor, axis=None):
"""
Mask a loss by using a tensor filled with 0 or 1 and average correctly.
:param input_tensor: A float tensor of shape [batch_size, ...] representing the loss/cross_entropy
:param binary_tensor: A float tensor of shape [batch_size, ...] representing the mask.
:return: A float tensor of shape [batch_size, ...] representing the masked loss.
:param axis: The dimensions to reduce. If None (the default), reduces all dimensions.
Must be in the range [-rank(input_tensor), rank(input_tensor)).
"""
return mean_on_masked(mask_loss(input_tensor, binary_tensor), binary_tensor, axis=axis) | Mask a loss by using a tensor filled with 0 or 1 and average correctly.
:param input_tensor: A float tensor of shape [batch_size, ...] representing the loss/cross_entropy
:param binary_tensor: A float tensor of shape [batch_size, ...] representing the mask.
:return: A float tensor of shape [batch_size, ...] representing the masked loss.
:param axis: The dimensions to reduce. If None (the default), reduces all dimensions.
Must be in the range [-rank(input_tensor), rank(input_tensor)). | entailment |
def variance_corrected_loss(loss, sigma_2=None):
"""
Create a variance corrected loss.
When summing variance corrected losses you get the same as multiloss.
This is especially usefull for keras where when having multiple losses they are summed by keras.
This multi-loss implementation is inspired by the Paper "Multi-Task Learning Using Uncertainty to Weight Losses
for Scene Geometry and Semantics" by Kendall, Gal and Cipolla.
:param loss: The loss that should be variance corrected.
:param sigma_2: Optional a variance (sigma squared) to use. If none is provided it is learned.
:return: The variance corrected loss.
"""
with tf.variable_scope("variance_corrected_loss"):
sigma_cost = 0
if sigma_2 is None:
# FIXME the paper has been updated Apr 2018, check if implementation is still valid.
sigma = tf.get_variable(name="sigma", dtype=tf.float32, initializer=tf.constant(1.0), trainable=True)
sigma_2 = tf.pow(sigma, 2)
tf.summary.scalar("sigma2", sigma_2)
sigma_cost = tf.log(sigma_2 + 1.0)
return 0.5 / sigma_2 * loss + sigma_cost | Create a variance corrected loss.
When summing variance corrected losses you get the same as multiloss.
This is especially usefull for keras where when having multiple losses they are summed by keras.
This multi-loss implementation is inspired by the Paper "Multi-Task Learning Using Uncertainty to Weight Losses
for Scene Geometry and Semantics" by Kendall, Gal and Cipolla.
:param loss: The loss that should be variance corrected.
:param sigma_2: Optional a variance (sigma squared) to use. If none is provided it is learned.
:return: The variance corrected loss. | entailment |
def multiloss(losses, logging_namespace="multiloss", exclude_from_weighting=[]):
"""
Create a loss from multiple losses my mixing them.
This multi-loss implementation is inspired by the Paper "Multi-Task Learning Using Uncertainty to Weight Losses
for Scene Geometry and Semantics" by Kendall, Gal and Cipolla.
:param losses: A dict containing all losses that should be merged.
:param logging_namespace: Variable scope in which multiloss lives.
:param exclude_from_weighting: A list of losses that are already weighted and should not be sigma weighted.
:return: A single loss.
"""
with tf.variable_scope(logging_namespace):
sum_loss = 0
for loss_name, loss in losses.items():
if loss_name not in exclude_from_weighting:
with tf.variable_scope(loss_name) as scope:
sum_loss += variance_corrected_loss(loss)
else:
sum_loss += loss
return sum_loss | Create a loss from multiple losses my mixing them.
This multi-loss implementation is inspired by the Paper "Multi-Task Learning Using Uncertainty to Weight Losses
for Scene Geometry and Semantics" by Kendall, Gal and Cipolla.
:param losses: A dict containing all losses that should be merged.
:param logging_namespace: Variable scope in which multiloss lives.
:param exclude_from_weighting: A list of losses that are already weighted and should not be sigma weighted.
:return: A single loss. | entailment |
def focus_loss(labels, probs, loss, gamma):
"""
Calculate the alpha balanced focal loss.
See the focal loss paper: "Focal Loss for Dense Object Detection" [by Facebook AI Research]
:param labels: A float tensor of shape [batch_size, ..., num_classes] representing the label class probabilities.
:param probs: A float tensor of shape [batch_size, ..., num_classes] representing the probs (after softmax).
:param loss: A float tensor of shape [batch_size, ...] representing the loss that should be focused.
:param gamma: The focus parameter.
:return: A tensor representing the weighted cross entropy.
"""
with tf.variable_scope("focus_loss"):
# Compute p_t that is used in paper.
# FIXME is it possible that the 1-p term does not make any sense?
p_t = tf.reduce_sum(probs * labels, axis=-1)# + tf.reduce_sum((1.0 - probs) * (1.0 - labels), axis=-1)
focal_factor = tf.pow(1.0 - p_t, gamma) if gamma > 0 else 1 # Improve stability for gamma = 0
return tf.stop_gradient(focal_factor) * loss | Calculate the alpha balanced focal loss.
See the focal loss paper: "Focal Loss for Dense Object Detection" [by Facebook AI Research]
:param labels: A float tensor of shape [batch_size, ..., num_classes] representing the label class probabilities.
:param probs: A float tensor of shape [batch_size, ..., num_classes] representing the probs (after softmax).
:param loss: A float tensor of shape [batch_size, ...] representing the loss that should be focused.
:param gamma: The focus parameter.
:return: A tensor representing the weighted cross entropy. | entailment |
def layer(op):
'''Decorator for composable network layers.'''
def layer_decorated(self, *args, **kwargs):
# Automatically set a name if not provided.
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
# Figure out the layer inputs.
if len(self.terminals) == 0:
raise RuntimeError('No input variables found for layer %s.' % name)
elif len(self.terminals) == 1:
layer_input = self.terminals[0]
else:
layer_input = list(self.terminals)
# Perform the operation and get the output.
layer_output = op(self, layer_input, *args, **kwargs)
# Add to layer LUT.
self.layers[name] = layer_output
# This output is now the input for the next layer.
self.feed(layer_output)
# Return self for chained calls.
return self
return layer_decorated | Decorator for composable network layers. | entailment |
def multi_output_layer(op):
'''Decorator for composable network layers.'''
def layer_decorated(self, *args, **kwargs):
# Automatically set a name if not provided.
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
output_names = kwargs.setdefault('output_names', self.get_unique_name(op.__name__))
# Figure out the layer inputs.
if len(self.terminals) == 0:
raise RuntimeError('No input variables found for layer %s.' % name)
elif len(self.terminals) == 1:
layer_input = self.terminals[0]
else:
layer_input = list(self.terminals)
# Perform the operation and get the output.
layer_output = op(self, layer_input, *args, **kwargs)
# Add to layer LUT.
for i in range(len(output_names)):
self.layers[output_names[i]] = layer_output[i]
# This output is now the input for the next layer.
self.feed(layer_output)
# Return self for chained calls.
return self
return layer_decorated | Decorator for composable network layers. | entailment |
def _load(self, data_path, ignore_missing=False):
'''Load network weights.
data_path: The path to the numpy-serialized network weights
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored.
'''
if data_path.endswith(".npz"):
data_dict = np.load(data_path)
keys = sorted(data_dict.keys())
for i, k in enumerate(keys):
data = data_dict[k]
op_name = "_".join(k.split("_")[:-1])
param_name = "weights" if k.split("_")[-1] == "W" else "biases"
if self.verbose:
print("Loaded: {} {}".format(op_name, param_name))
if op_name not in self.weights:
self.weights[op_name] = {}
self.weights[op_name][param_name] = data
elif data_path.endswith(".npy"):
data_dict = np.load(data_path).item()
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
for param_name, data in data_dict[op_name].iteritems():
if self.verbose:
print("Loaded: {} {}".format(op_name, param_name))
if op_name not in self.weights:
self.weights[op_name] = {}
self.weights[op_name][param_name] = data
else:
raise RuntimeError("Invalid file type.") | Load network weights.
data_path: The path to the numpy-serialized network weights
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored. | entailment |
def feed(self, *args):
'''Set the input(s) for the next operation by replacing the terminal nodes.
The arguments can be either layer names or the actual layers.
'''
assert len(args) != 0
self.terminals = []
for fed_layer in args:
if isinstance(fed_layer, str):
try:
fed_layer = self.layers[fed_layer]
except KeyError:
raise KeyError('Unknown layer name fed: %s' % fed_layer)
self.terminals.append(fed_layer)
return self | Set the input(s) for the next operation by replacing the terminal nodes.
The arguments can be either layer names or the actual layers. | entailment |
def get_unique_name(self, prefix):
'''Returns an index-suffixed unique name for the given prefix.
This is used for auto-generating layer names based on the type-prefix.
'''
ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1
return '%s_%d' % (prefix, ident) | Returns an index-suffixed unique name for the given prefix.
This is used for auto-generating layer names based on the type-prefix. | entailment |
def make_var(self, op_name, name, shape):
'''Creates a new TensorFlow variable.'''
if op_name in self.weights and name in self.weights[op_name]:
if self.verbose:
print("Using: {} {}".format(op_name, name))
initializer = tf.constant(self.weights[op_name][name], shape=shape)
return tf.get_variable(name, initializer=initializer, trainable=self.trainable)
return tf.get_variable(name, shape, trainable=self.trainable) | Creates a new TensorFlow variable. | entailment |
def mode_to_str(mode):
"""
Converts a tf.estimator.ModeKeys in a nice readable string.
:param mode: The mdoe as a tf.estimator.ModeKeys
:return: A human readable string representing the mode.
"""
if mode == tf.estimator.ModeKeys.TRAIN:
return "train"
if mode == tf.estimator.ModeKeys.EVAL:
return "eval"
if mode == tf.estimator.ModeKeys.PREDICT:
return "predict"
return "unknown" | Converts a tf.estimator.ModeKeys in a nice readable string.
:param mode: The mdoe as a tf.estimator.ModeKeys
:return: A human readable string representing the mode. | entailment |
def tf_if(condition, a, b):
"""
Implements an if condition in tensorflow.
:param condition: A boolean condition.
:param a: Case a.
:param b: Case b.
:return: A if condition was true, b otherwise.
"""
int_condition = tf.to_float(tf.to_int64(condition))
return a * int_condition + (1 - int_condition) * b | Implements an if condition in tensorflow.
:param condition: A boolean condition.
:param a: Case a.
:param b: Case b.
:return: A if condition was true, b otherwise. | entailment |
def _read_data_legacy(prefix, batch_size):
"""
Loads a tf record as tensors you can use.
:param prefix: The path prefix as defined in the write data method.
:param batch_size: The batch size you want for the tensors.
:return: A feature tensor dict and a label tensor dict.
"""
prefix = prefix.replace("\\", "/")
folder = "/".join(prefix.split("/")[:-1])
phase = prefix.split("/")[-1]
config = json.load(open(prefix + '_config.json'))
num_threads = config["num_threads"]
filenames = [folder + "/" + f for f in listdir(folder) if isfile(join(folder, f)) and phase in f and not "config.json" in f]
# Create a tf object for the filename list and the readers.
filename_queue = tf.train.string_input_producer(filenames)
readers = [_read_tf_record(filename_queue, config) for _ in range(num_threads)]
batch_dict = tf.train.shuffle_batch_join(
readers,
batch_size=batch_size,
capacity=10 * batch_size,
min_after_dequeue=5 * batch_size
)
# Add batch dimension to feature and label shape
feature_batch = {}
label_batch = {}
for k in batch_dict.keys():
shape = tuple([batch_size] + list(config[k]["shape"]))
tensor = tf.reshape(batch_dict[k], shape, name="input/"+phase+"/" + k + "_reshape")
if "feature_" in k:
feature_batch["_".join(k.split("_")[1:])] = tensor
if "label_" in k:
label_batch["_".join(k.split("_")[1:])] = tensor
return feature_batch, label_batch | Loads a tf record as tensors you can use.
:param prefix: The path prefix as defined in the write data method.
:param batch_size: The batch size you want for the tensors.
:return: A feature tensor dict and a label tensor dict. | entailment |
def _read_data(prefix, batch_size, augmentation=None):
"""
Loads a dataset.
:param prefix: The path prefix as defined in the write data method.
:param batch_size: The batch size you want for the tensors.
:param augmentation: An augmentation function.
:return: A tensorflow.data.dataset object.
"""
prefix = prefix.replace("\\", "/")
folder = "/".join(prefix.split("/")[:-1])
phase = prefix.split("/")[-1]
config = json.load(open(prefix + '_config.json'))
num_threads = config["num_threads"]
filenames = [folder + "/" + f for f in listdir(folder) if isfile(join(folder, f)) and phase in f and not "config.json" in f]
dataset = tf.data.TFRecordDataset(filenames=filenames, num_parallel_reads=num_threads)
dataset = dataset.shuffle(buffer_size=10 * batch_size)
dataset = dataset.repeat()
dataset = dataset.map(map_func=_create_parser_fn(config, phase), num_parallel_calls=num_threads)
if augmentation is not None:
dataset = dataset.map(map_func=augmentation, num_parallel_calls=num_threads)
dataset = dataset.batch(batch_size=batch_size)
dataset = dataset.prefetch(buffer_size=1)
return dataset | Loads a dataset.
:param prefix: The path prefix as defined in the write data method.
:param batch_size: The batch size you want for the tensors.
:param augmentation: An augmentation function.
:return: A tensorflow.data.dataset object. | entailment |
def create_input_fn(prefix, batch_size, augmentation=None):
"""
Loads a dataset.
:param prefix: The path prefix as defined in the write data method.
:param batch_size: The batch size you want for the tensors.
:param augmentation: An augmentation function.
:return: An input function for a tf estimator.
"""
# Check if the version is too old for dataset api to work better than manually loading data.
if tf.__version__.startswith("1.6") or tf.__version__.startswith("1.5") or tf.__version__.startswith("1.4") \
or tf.__version__.startswith("1.3") or tf.__version__.startswith("1.2") \
or tf.__version__.startswith("1.1") or tf.__version__.startswith("1.0"):
def input_fn():
with tf.variable_scope("input_pipeline"):
return _read_data_legacy(prefix, batch_size)
return input_fn
else:
def input_fn():
with tf.variable_scope("input_pipeline"):
return _read_data(prefix, batch_size, augmentation)
return input_fn | Loads a dataset.
:param prefix: The path prefix as defined in the write data method.
:param batch_size: The batch size you want for the tensors.
:param augmentation: An augmentation function.
:return: An input function for a tf estimator. | entailment |
def write_data(hyper_params,
mode,
sequence,
num_threads):
"""
Write a tf record containing a feature dict and a label dict.
:param hyper_params: The hyper parameters required for writing {"problem": {"augmentation": {"steps": Int}}}
:param mode: The mode specifies the purpose of the data. Typically it is either "train" or "validation".
:param sequence: A tf.keras.utils.sequence.
:param num_threads: The number of threads. (Recommended: 4 for training and 2 for validation seems to works nice)
:return:
"""
if not isinstance(sequence, Sequence) and not (callable(getattr(sequence, "__getitem__", None)) and callable(getattr(sequence, "__len__", None))):
raise ValueError("sequence must be tf.keras.utils.Sequence or a subtype or implement __len__(self) and __getitem__(self, idx)")
prefix = os.path.join(hyper_params.train.get("tf_records_path", "tfrecords"), mode)
prefix = prefix.replace("\\", "/")
data_tmp_folder = "/".join(prefix.split("/")[:-1])
if not os.path.exists(data_tmp_folder):
os.makedirs(data_tmp_folder)
args = [(hyper_params, sequence, num_threads, i, (prefix + "_%d.tfrecords") % i) for i in range(num_threads)]
# Retrieve a single batch
sample_feature, sample_label = sequence[0]
config = {"num_threads": num_threads}
for k in sample_feature.keys():
config["feature_" + k] = {"shape": sample_feature[k].shape[1:], "dtype": sample_feature[k].dtype.name}
for k in sample_label.keys():
config["label_" + k] = {"shape": sample_label[k].shape[1:], "dtype": sample_label[k].dtype.name}
with open(prefix + '_config.json', 'w') as outfile:
json.dump(config, outfile)
pool = Pool(processes=num_threads)
pool.map(_write_tf_record_pool_helper, args) | Write a tf record containing a feature dict and a label dict.
:param hyper_params: The hyper parameters required for writing {"problem": {"augmentation": {"steps": Int}}}
:param mode: The mode specifies the purpose of the data. Typically it is either "train" or "validation".
:param sequence: A tf.keras.utils.sequence.
:param num_threads: The number of threads. (Recommended: 4 for training and 2 for validation seems to works nice)
:return: | entailment |
def env(self, value):
"""
Sets context.env, context.env_short, and context.account_alias if env is valid
For envs of the form "global.<account>" and "mgmt.<account_alias>",
env is captured as "global" or "mgmt" and account_alias is parsed
out of the full env rather than looked up
Args:
value: the fully-qualified env value
Raises:
ValueError if env is not valid
"""
env_valid(value)
self._env_full = value
if value.find(".") == -1:
# plain environment, e.g. prod, staging, proto<n>
self._env = value
self._account_alias = get_account_alias(value)
else:
# "<env>.<account_alias>" form, e.g. global.ellationeng or mgmt.ellationeng
self._env, self._account_alias = value.split(".")
# since we extracted an env, must reconfirm that it's legit
global_env_valid(self._env)
self._env_short = get_env_short(value) | Sets context.env, context.env_short, and context.account_alias if env is valid
For envs of the form "global.<account>" and "mgmt.<account_alias>",
env is captured as "global" or "mgmt" and account_alias is parsed
out of the full env rather than looked up
Args:
value: the fully-qualified env value
Raises:
ValueError if env is not valid | entailment |
def service_registry(self, sr):
"""
Sets service registry object in context, doesn't check it
Args:
sr: EFServiceRegistry object
"""
if type(sr) is not EFServiceRegistry:
raise TypeError("sr value must be type 'EFServiceRegistry'")
self._service_registry = sr | Sets service registry object in context, doesn't check it
Args:
sr: EFServiceRegistry object | entailment |
def account_id(self, value):
"""
Sets the current account id
Args:
value: current account id (string)
Returns:
None
"""
if type(value) is not str:
raise TypeError("commit value must be string")
self._account_id = value | Sets the current account id
Args:
value: current account id (string)
Returns:
None | entailment |
def aws_client(self, client_id=None):
"""
Get AWS client if it exists (must have been formerly stored with set_aws_clients)
If client_id is not provided, returns the dictionary of all clients
Args:
client_id: label for the client, e.g. 'ec2'; omit to get a dictionary of all clients
Returns:
aws client if found, or None if not
"""
if client_id is None:
return self._aws_clients
elif self._aws_clients is not None and self._aws_clients.has_key(client_id):
return self._aws_clients[client_id]
else:
return None | Get AWS client if it exists (must have been formerly stored with set_aws_clients)
If client_id is not provided, returns the dictionary of all clients
Args:
client_id: label for the client, e.g. 'ec2'; omit to get a dictionary of all clients
Returns:
aws client if found, or None if not | entailment |
def set_aws_clients(self, clients):
"""
Stash a dictionary of AWS clients in the context object
Args:
clients: dictionary of clients
"""
if type(clients) is not dict:
raise TypeError("clients must be a dict")
self._aws_clients = clients | Stash a dictionary of AWS clients in the context object
Args:
clients: dictionary of clients | entailment |
def handle_args_and_set_context(args):
"""
Args:
args: the command line args, probably passed from main() as sys.argv[1:]
Returns:
a populated EFVersionContext object
"""
parser = argparse.ArgumentParser()
parser.add_argument("service_name", help="name of the service")
parser.add_argument("key", help="version key to look up for <service_name> such as 'ami-id' (list in EF_Config)")
parser.add_argument("env", help=", ".join(EFConfig.ENV_LIST))
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--get", help="get current version", action="store_true")
group.add_argument("--set", help="set current version of <key> to <value> for <service_name>")
group.add_argument("--rollback", help="set current version to most recent 'stable' version in history",
action="store_true")
group.add_argument("--rollback-to", help="rollback current version to <ami-id> in history",
action="store", metavar='<ami-id>')
group.add_argument("--history", help="Show version history for env/service/key", choices=['json', 'text'])
group.add_argument("--show", help="Show keys and values. '*' allowed for <key> and <env>",
action="store_true", default=False)
parser.add_argument("--build",
help="On --set, also set the externally defined build number associated with the version entity",
default="")
parser.add_argument("--commit_hash", help="On --set, also set the commit hash associated with the version entity",
default="")
parser.add_argument("--commit", help="Actually --set or --rollback (dry run if omitted)",
action="store_true", default=False)
parser.add_argument("--devel", help="Allow running from branch; don't refresh from origin", action="store_true",
default=False)
parser.add_argument("--force_env_full", help="Override env with env_full for account-scoped environments",
action="store_true", default=False)
parser.add_argument("--limit", help="Limit 'history', 'rollback', 'show' to first N records (default 100, max 1000)",
type=int, default=100)
parser.add_argument("--location", help="On --set, also mark the url location of the static build's version file to"
"support dist-hash precheck", default="")
if EFConfig.ALLOW_EF_VERSION_SKIP_PRECHECK:
parser.add_argument("--noprecheck", help="--set or --rollback without precheck", action="store_true", default=False)
parser.add_argument("--sr", help="optional /path/to/service_registry_file.json", default=None)
parser.add_argument("--stable", help="On --set, also mark the version 'stable'", action="store_true")
parser.add_argument("--verbose", help="Print additional info", action="store_true", default=False)
# parse
parsed_args = vars(parser.parse_args(args))
context = EFVersionContext()
# marshall the inherited context values
context._build_number = parsed_args["build"]
context._commit_hash = parsed_args["commit_hash"]
context.commit = parsed_args["commit"]
context.devel = parsed_args["devel"]
context._force_env_full = parsed_args["force_env_full"]
try:
context.env = parsed_args["env"]
except ValueError as e:
fail("Error in env: {}".format(e.message))
# marshall this module's additional context values
context._get = parsed_args["get"]
context._history = parsed_args["history"]
context._key = parsed_args["key"]
if EFConfig.ALLOW_EF_VERSION_SKIP_PRECHECK:
context._noprecheck = parsed_args["noprecheck"]
if not 1 <= parsed_args["limit"] <= 1000:
fail("Error in --limit. Valid range: 1..1000")
context._limit = parsed_args["limit"]
context._location = parsed_args["location"]
context._rollback = parsed_args["rollback"]
context._rollback_to = parsed_args["rollback_to"]
context._service_name = parsed_args["service_name"]
context._show = parsed_args["show"]
context._stable = parsed_args["stable"]
context._value = parsed_args["set"]
# Set up service registry and policy template path which depends on it
context.service_registry = EFServiceRegistry(parsed_args["sr"])
# VERBOSE is global
global VERBOSE
VERBOSE = parsed_args["verbose"]
validate_context(context)
return context | Args:
args: the command line args, probably passed from main() as sys.argv[1:]
Returns:
a populated EFVersionContext object | entailment |
def validate_context(context):
"""
Set the key for the current context.
Args:
context: a populated EFVersionContext object
"""
# Service must exist in service registry
if not context.service_registry.service_record(context.service_name):
fail("service: {} not found in service registry: {}".format(
context.service_name, context.service_registry.filespec))
service_type = context.service_registry.service_record(context.service_name)["type"]
# Key must be valid
if context.key not in EFConfig.VERSION_KEYS:
fail("invalid key: {}; see VERSION_KEYS in ef_config for supported keys".format(context.key))
# Lookup allowed key for service type
if "allowed_types" in EFConfig.VERSION_KEYS[context.key] and \
service_type not in EFConfig.VERSION_KEYS[context.key]["allowed_types"]:
fail("service_type: {} is not allowed for key {}; see VERSION_KEYS[KEY]['allowed_types']"
"in ef_config and validate service registry entry".format(service_type, context.key))
return True | Set the key for the current context.
Args:
context: a populated EFVersionContext object | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.