sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def add_unit(unit,**kwargs):
"""
Add the unit defined into the object "unit" to the DB
If unit["project_id"] is None it means that the unit is global, otherwise is property of a project
If the unit exists emits an exception
A minimal example:
.. code-block:: python
new_unit = dict(
name = 'Teaspoons per second',
abbreviation = 'tsp s^-1',
cf = 0, # Constant conversion factor
lf = 1.47867648e-05, # Linear conversion factor
dimension_id = 2,
description = 'A flow of one teaspoon per second.',
)
add_unit(new_unit)
"""
new_unit = Unit()
new_unit.dimension_id = unit["dimension_id"]
new_unit.name = unit['name']
# Needed to uniform abbr to abbreviation
new_unit.abbreviation = unit['abbreviation']
# Needed to uniform into to description
new_unit.description = unit['description']
new_unit.lf = unit['lf']
new_unit.cf = unit['cf']
if ('project_id' in unit) and (unit['project_id'] is not None):
# Adding dimension to the "user" dimensions list
new_unit.project_id = unit['project_id']
# Save on DB
db.DBSession.add(new_unit)
db.DBSession.flush()
return JSONObject(new_unit) | Add the unit defined into the object "unit" to the DB
If unit["project_id"] is None it means that the unit is global, otherwise is property of a project
If the unit exists emits an exception
A minimal example:
.. code-block:: python
new_unit = dict(
name = 'Teaspoons per second',
abbreviation = 'tsp s^-1',
cf = 0, # Constant conversion factor
lf = 1.47867648e-05, # Linear conversion factor
dimension_id = 2,
description = 'A flow of one teaspoon per second.',
)
add_unit(new_unit) | entailment |
def bulk_add_units(unit_list, **kwargs):
"""
Save all the units contained in the passed list, with the name of their dimension.
"""
# for unit in unit_list:
# add_unit(unit, **kwargs)
added_units = []
for unit in unit_list:
added_units.append(add_unit(unit, **kwargs))
return JSONObject({"units": added_units}) | Save all the units contained in the passed list, with the name of their dimension. | entailment |
def delete_unit(unit_id, **kwargs):
"""
Delete a unit from the DB.
Raises and exception if the unit does not exist
"""
try:
db_unit = db.DBSession.query(Unit).filter(Unit.id==unit_id).one()
db.DBSession.delete(db_unit)
db.DBSession.flush()
return True
except NoResultFound:
raise ResourceNotFoundError("Unit (ID=%s) does not exist"%(unit_id)) | Delete a unit from the DB.
Raises and exception if the unit does not exist | entailment |
def update_unit(unit, **kwargs):
"""
Update a unit in the DB.
Raises and exception if the unit does not exist
"""
try:
db_unit = db.DBSession.query(Unit).join(Dimension).filter(Unit.id==unit["id"]).filter().one()
db_unit.name = unit["name"]
# Needed to uniform into to description
db_unit.abbreviation = unit.abbreviation
db_unit.description = unit.description
db_unit.lf = unit["lf"]
db_unit.cf = unit["cf"]
if "project_id" in unit and unit['project_id'] is not None and unit['project_id'] != "":
db_unit.project_id = unit["project_id"]
except NoResultFound:
raise ResourceNotFoundError("Unit (ID=%s) does not exist"%(unit["id"]))
db.DBSession.flush()
return JSONObject(db_unit) | Update a unit in the DB.
Raises and exception if the unit does not exist | entailment |
def convert_dataset(dataset_id, target_unit_abbreviation,**kwargs):
"""
Convert a whole dataset (specified by 'dataset_id') to new unit ('target_unit_abbreviation').
Conversion ALWAYS creates a NEW dataset, so function returns the dataset ID of new dataset.
"""
ds_i = db.DBSession.query(Dataset).filter(Dataset.id==dataset_id).one()
dataset_type = ds_i.type
dsval = ds_i.get_val()
source_unit_abbreviation = get_unit(ds_i.unit_id).abbreviation
if source_unit_abbreviation is not None:
if dataset_type == 'scalar':
new_val = convert(float(dsval), source_unit_abbreviation, target_unit_abbreviation)
elif dataset_type == 'array':
dim = array_dim(dsval)
vecdata = arr_to_vector(dsval)
newvec = convert(vecdata, source_unit_abbreviation, target_unit_abbreviation)
new_val = vector_to_arr(newvec, dim)
elif dataset_type == 'timeseries':
new_val = []
for ts_time, ts_val in dsval.items():
dim = array_dim(ts_val)
vecdata = arr_to_vector(ts_val)
newvec = convert(vecdata, source_unit_abbreviation, target_unit_abbreviation)
newarr = vector_to_arr(newvec, dim)
new_val.append(ts_time, newarr)
elif dataset_type == 'descriptor':
raise HydraError('Cannot convert descriptor.')
new_dataset = Dataset()
new_dataset.type = ds_i.type
new_dataset.value = str(new_val) # The data type is TEXT!!!
new_dataset.name = ds_i.name
new_dataset.unit_id = get_unit_by_abbreviation(target_unit_abbreviation).id
new_dataset.hidden = 'N'
new_dataset.set_metadata(ds_i.get_metadata_as_dict())
new_dataset.set_hash()
existing_ds = db.DBSession.query(Dataset).filter(Dataset.hash==new_dataset.hash).first()
if existing_ds is not None:
db.DBSession.expunge_all()
return existing_ds.id
db.DBSession.add(new_dataset)
db.DBSession.flush()
return new_dataset.id
else:
raise HydraError('Dataset has no units.') | Convert a whole dataset (specified by 'dataset_id') to new unit ('target_unit_abbreviation').
Conversion ALWAYS creates a NEW dataset, so function returns the dataset ID of new dataset. | entailment |
def validate_resource_attributes(resource, attributes, template, check_unit=True, exact_match=False,**kwargs):
"""
Validate that the resource provided matches the template.
Only passes if the resource contains ONLY the attributes specified
in the template.
The template should take the form of a dictionary, as should the
resources.
*check_unit*: Makes sure that if a unit is specified in the template, it
is the same in the data
*exact_match*: Ensures that all the attributes in the template are in
the data also. By default this is false, meaning a subset
of the template attributes may be specified in the data.
An attribute specified in the data *must* be defined in
the template.
@returns a list of error messages. An empty list indicates no
errors were found.
"""
errors = []
#is it a node or link?
res_type = 'GROUP'
if resource.get('x') is not None:
res_type = 'NODE'
elif resource.get('node_1_id') is not None:
res_type = 'LINK'
elif resource.get('nodes') is not None:
res_type = 'NETWORK'
#Find all the node/link/network definitions in the template
tmpl_res = template['resources'][res_type]
#the user specified type of the resource
res_user_type = resource.get('type')
#Check the user specified type is in the template
if res_user_type is None:
errors.append("No type specified on resource %s"%(resource['name']))
elif tmpl_res.get(res_user_type) is None:
errors.append("Resource %s is defined as having type %s but "
"this type is not specified in the template."%
(resource['name'], res_user_type))
#It is in the template. Now check all the attributes are correct.
tmpl_attrs = tmpl_res.get(res_user_type)['attributes']
attrs = {}
for a in attributes.values():
attrs[a['id']] = a
for a in tmpl_attrs.values():
if a.get('id') is not None:
attrs[a['id']] = {'name':a['name'], 'unit':a.get('unit'), 'dimen':a.get('dimension')}
if exact_match is True:
#Check that all the attributes in the template are in the data.
#get all the attribute names from the template
tmpl_attr_names = set(tmpl_attrs.keys())
#get all the attribute names from the data for this resource
resource_attr_names = []
for ra in resource['attributes']:
attr_name = attrs[ra['attr_id']]['name']
resource_attr_names.append(attr_name)
resource_attr_names = set(resource_attr_names)
#Compare the two lists to ensure they are the same (using sets is easier)
in_tmpl_not_in_resource = tmpl_attr_names - resource_attr_names
in_resource_not_in_tmpl = resource_attr_names - tmpl_attr_names
if len(in_tmpl_not_in_resource) > 0:
errors.append("Template has defined attributes %s for type %s but they are not"
" specified in the Data."%(','.join(in_tmpl_not_in_resource),
res_user_type ))
if len(in_resource_not_in_tmpl) > 0:
errors.append("Resource %s (type %s) has defined attributes %s but this is not"
" specified in the Template."%(resource['name'],
res_user_type,
','.join(in_resource_not_in_tmpl)))
#Check that each of the attributes specified on the resource are valid.
for res_attr in resource['attributes']:
attr = attrs.get(res_attr['attr_id'])
if attr is None:
errors.append("An attribute mismatch has occurred. Attr %s is not "
"defined in the data but is present on resource %s"
%(res_attr['attr_id'], resource['name']))
continue
#If an attribute is not specified in the template, then throw an error
if tmpl_attrs.get(attr['name']) is None:
errors.append("Resource %s has defined attribute %s but this is not"
" specified in the Template."%(resource['name'], attr['name']))
else:
#If the dimensions or units don't match, throw an error
tmpl_attr = tmpl_attrs[attr['name']]
if tmpl_attr.get('data_type') is not None:
if res_attr.get('type') is not None:
if tmpl_attr.get('data_type') != res_attr.get('type'):
errors.append("Error in data. Template says that %s on %s is a %s, but data suggests it is a %s"%
(attr['name'], resource['name'], tmpl_attr.get('data_type'), res_attr.get('type')))
attr_dimension = 'dimensionless' if attr.get('dimension') is None else attr.get('dimension')
tmpl_attr_dimension = 'dimensionless' if tmpl_attr.get('dimension') is None else tmpl_attr.get('dimension')
if attr_dimension.lower() != tmpl_attr_dimension.lower():
errors.append("Dimension mismatch on resource %s for attribute %s"
" (template says %s on type %s, data says %s)"%
(resource['name'], attr.get('name'),
tmpl_attr.get('dimension'), res_user_type, attr_dimension))
if check_unit is True:
if tmpl_attr.get('unit') is not None:
if attr.get('unit') != tmpl_attr.get('unit'):
errors.append("Unit mismatch for resource %s with unit %s "
"(template says %s for type %s)"
%(resource['name'], attr.get('unit'),
tmpl_attr.get('unit'), res_user_type))
return errors | Validate that the resource provided matches the template.
Only passes if the resource contains ONLY the attributes specified
in the template.
The template should take the form of a dictionary, as should the
resources.
*check_unit*: Makes sure that if a unit is specified in the template, it
is the same in the data
*exact_match*: Ensures that all the attributes in the template are in
the data also. By default this is false, meaning a subset
of the template attributes may be specified in the data.
An attribute specified in the data *must* be defined in
the template.
@returns a list of error messages. An empty list indicates no
errors were found. | entailment |
def encode(encoding, data):
"""
Encodes the given data using the encoding that is specified
:param str encoding: encoding to use, should be one of the supported encoding
:param data: data to encode
:type data: str or bytes
:return: multibase encoded data
:rtype: bytes
:raises ValueError: if the encoding is not supported
"""
data = ensure_bytes(data, 'utf8')
try:
return ENCODINGS_LOOKUP[encoding].code + ENCODINGS_LOOKUP[encoding].converter.encode(data)
except KeyError:
raise ValueError('Encoding {} not supported.'.format(encoding)) | Encodes the given data using the encoding that is specified
:param str encoding: encoding to use, should be one of the supported encoding
:param data: data to encode
:type data: str or bytes
:return: multibase encoded data
:rtype: bytes
:raises ValueError: if the encoding is not supported | entailment |
def get_codec(data):
"""
Returns the codec used to encode the given data
:param data: multibase encoded data
:type data: str or bytes
:return: the :py:obj:`multibase.Encoding` object for the data's codec
:raises ValueError: if the codec is not supported
"""
try:
key = ensure_bytes(data[:CODE_LENGTH], 'utf8')
codec = ENCODINGS_LOOKUP[key]
except KeyError:
raise ValueError('Can not determine encoding for {}'.format(data))
else:
return codec | Returns the codec used to encode the given data
:param data: multibase encoded data
:type data: str or bytes
:return: the :py:obj:`multibase.Encoding` object for the data's codec
:raises ValueError: if the codec is not supported | entailment |
def decode(data):
"""
Decode the multibase decoded data
:param data: multibase encoded data
:type data: str or bytes
:return: decoded data
:rtype: str
:raises ValueError: if the data is not multibase encoded
"""
data = ensure_bytes(data, 'utf8')
codec = get_codec(data)
return codec.converter.decode(data[CODE_LENGTH:]) | Decode the multibase decoded data
:param data: multibase encoded data
:type data: str or bytes
:return: decoded data
:rtype: str
:raises ValueError: if the data is not multibase encoded | entailment |
def ed25519_generate_key_pair_from_secret(secret):
"""
Generate a new key pair.
Args:
secret (:class:`string`): A secret that serves as a seed
Returns:
A tuple of (private_key, public_key) encoded in base58.
"""
# if you want to do this correctly, use a key derivation function!
if not isinstance(secret, bytes):
secret = secret.encode()
hash_bytes = sha3.keccak_256(secret).digest()
sk = Ed25519SigningKeyFromHash.generate(hash_bytes=hash_bytes)
# Private key
private_value_base58 = sk.encode(encoding='base58')
# Public key
public_value_compressed_base58 = sk.get_verifying_key().encode(encoding='base58')
return private_value_base58, public_value_compressed_base58 | Generate a new key pair.
Args:
secret (:class:`string`): A secret that serves as a seed
Returns:
A tuple of (private_key, public_key) encoded in base58. | entailment |
def generate_key_pair(secret=None):
"""Generates a cryptographic key pair.
Args:
secret (:class:`string`): A secret that serves as a seed
Returns:
:class:`~bigchaindb.common.crypto.CryptoKeypair`: A
:obj:`collections.namedtuple` with named fields
:attr:`~bigchaindb.common.crypto.CryptoKeypair.private_key` and
:attr:`~bigchaindb.common.crypto.CryptoKeypair.public_key`.
"""
if secret:
keypair_raw = ed25519_generate_key_pair_from_secret(secret)
return CryptoKeypair(
*(k.decode() for k in keypair_raw))
else:
return generate_keypair() | Generates a cryptographic key pair.
Args:
secret (:class:`string`): A secret that serves as a seed
Returns:
:class:`~bigchaindb.common.crypto.CryptoKeypair`: A
:obj:`collections.namedtuple` with named fields
:attr:`~bigchaindb.common.crypto.CryptoKeypair.private_key` and
:attr:`~bigchaindb.common.crypto.CryptoKeypair.public_key`. | entailment |
def _get_role_arn():
"""
Return role arn from X-Role-ARN header,
lookup role arn from source IP,
or fall back to command line default.
"""
role_arn = bottle.request.headers.get('X-Role-ARN')
if not role_arn:
role_arn = _lookup_ip_role_arn(bottle.request.environ.get('REMOTE_ADDR'))
if not role_arn:
role_arn = _role_arn
return role_arn | Return role arn from X-Role-ARN header,
lookup role arn from source IP,
or fall back to command line default. | entailment |
def _on_dynamodb_exception(self, error):
"""Dynamically handle DynamoDB exceptions, returning HTTP error
responses.
:param exceptions.DynamoDBException error:
"""
if isinstance(error, exceptions.ConditionalCheckFailedException):
raise web.HTTPError(409, reason='Condition Check Failure')
elif isinstance(error, exceptions.NoCredentialsError):
if _no_creds_should_return_429():
raise web.HTTPError(429, reason='Instance Credentials Failure')
elif isinstance(error, (exceptions.ThroughputExceeded,
exceptions.ThrottlingException)):
raise web.HTTPError(429, reason='Too Many Requests')
if hasattr(self, 'logger'):
self.logger.error('DynamoDB Error: %s', error)
raise web.HTTPError(500, reason=str(error)) | Dynamically handle DynamoDB exceptions, returning HTTP error
responses.
:param exceptions.DynamoDBException error: | entailment |
def _chunk_with_padding(self, iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# _chunk_with_padding('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue) | Collect data into fixed-length chunks or blocks | entailment |
def marshall(values):
"""
Marshall a `dict` into something DynamoDB likes.
:param dict values: The values to marshall
:rtype: dict
:raises ValueError: if an unsupported type is encountered
Return the values in a nested dict structure that is required for
writing the values to DynamoDB.
"""
serialized = {}
for key in values:
serialized[key] = _marshall_value(values[key])
return serialized | Marshall a `dict` into something DynamoDB likes.
:param dict values: The values to marshall
:rtype: dict
:raises ValueError: if an unsupported type is encountered
Return the values in a nested dict structure that is required for
writing the values to DynamoDB. | entailment |
def unmarshall(values):
"""
Transform a response payload from DynamoDB to a native dict
:param dict values: The response payload from DynamoDB
:rtype: dict
:raises ValueError: if an unsupported type code is encountered
"""
unmarshalled = {}
for key in values:
unmarshalled[key] = _unmarshall_dict(values[key])
return unmarshalled | Transform a response payload from DynamoDB to a native dict
:param dict values: The response payload from DynamoDB
:rtype: dict
:raises ValueError: if an unsupported type code is encountered | entailment |
def _marshall_value(value):
"""
Recursively transform `value` into an AttributeValue `dict`
:param mixed value: The value to encode
:rtype: dict
:raises ValueError: for unsupported types
Return the value as dict indicating the data type and transform or
recursively process the value if required.
"""
if PYTHON3 and isinstance(value, bytes):
return {'B': base64.b64encode(value).decode('ascii')}
elif PYTHON3 and isinstance(value, str):
return {'S': value}
elif not PYTHON3 and isinstance(value, str):
if is_binary(value):
return {'B': base64.b64encode(value).decode('ascii')}
return {'S': value}
elif not PYTHON3 and isinstance(value, unicode):
return {'S': value.encode('utf-8')}
elif isinstance(value, dict):
return {'M': marshall(value)}
elif isinstance(value, bool):
return {'BOOL': value}
elif isinstance(value, (int, float)):
return {'N': str(value)}
elif isinstance(value, datetime.datetime):
return {'S': value.isoformat()}
elif isinstance(value, uuid.UUID):
return {'S': str(value)}
elif isinstance(value, list):
return {'L': [_marshall_value(v) for v in value]}
elif isinstance(value, set):
if PYTHON3 and all([isinstance(v, bytes) for v in value]):
return {'BS': _encode_binary_set(value)}
elif PYTHON3 and all([isinstance(v, str) for v in value]):
return {'SS': sorted(list(value))}
elif all([isinstance(v, (int, float)) for v in value]):
return {'NS': sorted([str(v) for v in value])}
elif not PYTHON3 and all([isinstance(v, str) for v in value]) and \
all([is_binary(v) for v in value]):
return {'BS': _encode_binary_set(value)}
elif not PYTHON3 and all([isinstance(v, str) for v in value]) and \
all([is_binary(v) is False for v in value]):
return {'SS': sorted(list(value))}
else:
raise ValueError('Can not mix types in a set')
elif value is None:
return {'NULL': True}
raise ValueError('Unsupported type: %s' % type(value)) | Recursively transform `value` into an AttributeValue `dict`
:param mixed value: The value to encode
:rtype: dict
:raises ValueError: for unsupported types
Return the value as dict indicating the data type and transform or
recursively process the value if required. | entailment |
def _unmarshall_dict(value):
"""Unmarshall a single dict value from a row that was returned from
DynamoDB, returning the value as a normal Python dict.
:param dict value: The value to unmarshall
:rtype: mixed
:raises ValueError: if an unsupported type code is encountered
"""
key = list(value.keys()).pop()
if key == 'B':
return base64.b64decode(value[key].encode('ascii'))
elif key == 'BS':
return set([base64.b64decode(v.encode('ascii'))
for v in value[key]])
elif key == 'BOOL':
return value[key]
elif key == 'L':
return [_unmarshall_dict(v) for v in value[key]]
elif key == 'M':
return unmarshall(value[key])
elif key == 'NULL':
return None
elif key == 'N':
return _to_number(value[key])
elif key == 'NS':
return set([_to_number(v) for v in value[key]])
elif key == 'S':
return value[key]
elif key == 'SS':
return set([v for v in value[key]])
raise ValueError('Unsupported value type: %s' % key) | Unmarshall a single dict value from a row that was returned from
DynamoDB, returning the value as a normal Python dict.
:param dict value: The value to unmarshall
:rtype: mixed
:raises ValueError: if an unsupported type code is encountered | entailment |
def _unwrap_result(action, result):
"""Unwrap a request response and return only the response data.
:param str action: The action name
:param result: The result of the action
:type: result: list or dict
:rtype: dict | None
"""
if not result:
return
elif action in {'DeleteItem', 'PutItem', 'UpdateItem'}:
return _unwrap_delete_put_update_item(result)
elif action == 'GetItem':
return _unwrap_get_item(result)
elif action == 'Query' or action == 'Scan':
return _unwrap_query_scan(result)
elif action == 'CreateTable':
return _unwrap_create_table(result)
elif action == 'DescribeTable':
return _unwrap_describe_table(result)
return result | Unwrap a request response and return only the response data.
:param str action: The action name
:param result: The result of the action
:type: result: list or dict
:rtype: dict | None | entailment |
def list_tables(self, exclusive_start_table_name=None, limit=None):
"""
Invoke the `ListTables`_ function.
Returns an array of table names associated with the current account
and endpoint. The output from *ListTables* is paginated, with each page
returning a maximum of ``100`` table names.
:param str exclusive_start_table_name: The first table name that this
operation will evaluate. Use the value that was returned for
``LastEvaluatedTableName`` in a previous operation, so that you can
obtain the next page of results.
:param int limit: A maximum number of table names to return. If this
parameter is not specified, the limit is ``100``.
.. _ListTables: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_ListTables.html
"""
payload = {}
if exclusive_start_table_name:
payload['ExclusiveStartTableName'] = exclusive_start_table_name
if limit:
payload['Limit'] = limit
return self.execute('ListTables', payload) | Invoke the `ListTables`_ function.
Returns an array of table names associated with the current account
and endpoint. The output from *ListTables* is paginated, with each page
returning a maximum of ``100`` table names.
:param str exclusive_start_table_name: The first table name that this
operation will evaluate. Use the value that was returned for
``LastEvaluatedTableName`` in a previous operation, so that you can
obtain the next page of results.
:param int limit: A maximum number of table names to return. If this
parameter is not specified, the limit is ``100``.
.. _ListTables: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_ListTables.html | entailment |
def put_item(self, table_name, item,
condition_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None):
"""Invoke the `PutItem`_ function, creating a new item, or replaces an
old item with a new item. If an item that has the same primary key as
the new item already exists in the specified table, the new item
completely replaces the existing item. You can perform a conditional
put operation (add a new item if one with the specified primary key
doesn't exist), or replace an existing item if it has certain attribute
values.
For more information about using this API, see Working with Items in
the Amazon DynamoDB Developer Guide.
:param str table_name: The table to put the item to
:param dict item: A map of attribute name/value pairs, one for each
attribute. Only the primary key attributes are required; you can
optionally provide other attribute name-value pairs for the item.
You must provide all of the attributes for the primary key. For
example, with a simple primary key, you only need to provide a
value for the partition key. For a composite primary key, you must
provide both values for both the partition key and the sort key.
If you specify any attributes that are part of an index key, then
the data types for those attributes must match those of the schema
in the table's attribute definition.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *PutItem* operation to succeed. See the
`AWS documentation for ConditionExpression <http://docs.aws.amazon.
com/amazondynamodb/latest/APIReference/API_PutItem.html#DDB-Put
Item-request-ConditionExpression>`_ for more information.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression. See the `AWS documentation
for ExpressionAttributeNames <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeNames>`_ for more information.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression. See the `AWS documentation
for ExpressionAttributeValues <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeValues>`_ for more information.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. Should be ``None`` or one of ``INDEXES`` or ``TOTAL``
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ``ReturnValues`` if you want to get the
item attributes as they appeared before they were updated with the
``PutItem`` request.
:rtype: tornado.concurrent.Future
.. _PutItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_PutItem.html
"""
payload = {'TableName': table_name, 'Item': utils.marshall(item)}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = expression_attribute_values
if return_consumed_capacity:
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
payload['ReturnItemCollectionMetrics'] = 'SIZE'
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('PutItem', payload) | Invoke the `PutItem`_ function, creating a new item, or replaces an
old item with a new item. If an item that has the same primary key as
the new item already exists in the specified table, the new item
completely replaces the existing item. You can perform a conditional
put operation (add a new item if one with the specified primary key
doesn't exist), or replace an existing item if it has certain attribute
values.
For more information about using this API, see Working with Items in
the Amazon DynamoDB Developer Guide.
:param str table_name: The table to put the item to
:param dict item: A map of attribute name/value pairs, one for each
attribute. Only the primary key attributes are required; you can
optionally provide other attribute name-value pairs for the item.
You must provide all of the attributes for the primary key. For
example, with a simple primary key, you only need to provide a
value for the partition key. For a composite primary key, you must
provide both values for both the partition key and the sort key.
If you specify any attributes that are part of an index key, then
the data types for those attributes must match those of the schema
in the table's attribute definition.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *PutItem* operation to succeed. See the
`AWS documentation for ConditionExpression <http://docs.aws.amazon.
com/amazondynamodb/latest/APIReference/API_PutItem.html#DDB-Put
Item-request-ConditionExpression>`_ for more information.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression. See the `AWS documentation
for ExpressionAttributeNames <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeNames>`_ for more information.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression. See the `AWS documentation
for ExpressionAttributeValues <http://docs.aws.amazon.com/amazon
dynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-
ExpressionAttributeValues>`_ for more information.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. Should be ``None`` or one of ``INDEXES`` or ``TOTAL``
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ``ReturnValues`` if you want to get the
item attributes as they appeared before they were updated with the
``PutItem`` request.
:rtype: tornado.concurrent.Future
.. _PutItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_PutItem.html | entailment |
def get_item(self, table_name, key_dict,
consistent_read=False,
expression_attribute_names=None,
projection_expression=None,
return_consumed_capacity=None):
"""
Invoke the `GetItem`_ function.
:param str table_name: table to retrieve the item from
:param dict key_dict: key to use for retrieval. This will
be marshalled for you so a native :class:`dict` works.
:param bool consistent_read: Determines the read consistency model: If
set to :py:data`True`, then the operation uses strongly consistent
reads; otherwise, the operation uses eventually consistent reads.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param str projection_expression: A string that identifies one or more
attributes to retrieve from the table. These attributes can include
scalars, sets, or elements of a JSON document. The attributes in
the expression must be separated by commas. If no attribute names
are specified, then all attributes will be returned. If any of the
requested attributes are not found, they will not appear in the
result.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- INDEXES: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying INDEXES
will only return consumed capacity information for table(s).
- TOTAL: The response includes only the aggregate consumed
capacity for the operation.
- NONE: No consumed capacity details are included in the
response.
:rtype: tornado.concurrent.Future
.. _GetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_GetItem.html
"""
payload = {'TableName': table_name,
'Key': utils.marshall(key_dict),
'ConsistentRead': consistent_read}
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('GetItem', payload) | Invoke the `GetItem`_ function.
:param str table_name: table to retrieve the item from
:param dict key_dict: key to use for retrieval. This will
be marshalled for you so a native :class:`dict` works.
:param bool consistent_read: Determines the read consistency model: If
set to :py:data`True`, then the operation uses strongly consistent
reads; otherwise, the operation uses eventually consistent reads.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param str projection_expression: A string that identifies one or more
attributes to retrieve from the table. These attributes can include
scalars, sets, or elements of a JSON document. The attributes in
the expression must be separated by commas. If no attribute names
are specified, then all attributes will be returned. If any of the
requested attributes are not found, they will not appear in the
result.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- INDEXES: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying INDEXES
will only return consumed capacity information for table(s).
- TOTAL: The response includes only the aggregate consumed
capacity for the operation.
- NONE: No consumed capacity details are included in the
response.
:rtype: tornado.concurrent.Future
.. _GetItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_GetItem.html | entailment |
def update_item(self, table_name, key_dict,
condition_expression=None,
update_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None,
return_values=None):
"""Invoke the `UpdateItem`_ function.
Edits an existing item's attributes, or adds a new item to the table
if it does not already exist. You can put, delete, or add attribute
values. You can also perform a conditional update on an existing item
(insert a new attribute name-value pair if it doesn't exist, or replace
an existing name-value pair if it has certain expected attribute
values).
:param str table_name: The name of the table that contains the item to
update
:param dict key_dict: A dictionary of key/value pairs that are used to
define the primary key values for the item. For the primary key,
you must provide all of the attributes. For example, with a simple
primary key, you only need to provide a value for the partition
key. For a composite primary key, you must provide values for both
the partition key and the sort key.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *UpdateItem* operation to succeed. One of:
``attribute_exists``, ``attribute_not_exists``, ``attribute_type``,
``contains``, ``begins_with``, ``size``, ``=``, ``<>``, ``<``,
``>``, ``<=``, ``>=``, ``BETWEEN``, ``IN``, ``AND``, ``OR``, or
``NOT``.
:param str update_expression: An expression that defines one or more
attributes to be updated, the action to be performed on them, and
new value(s) for them.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. See the `AWS documentation
for ReturnConsumedCapacity <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_UpdateItem.html#DDB-Update
Item-request-ReturnConsumedCapacity>`_ for more information.
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ReturnValues if you want to get the item
attributes as they appeared either before or after they were
updated. See the `AWS documentation for ReturnValues <http://docs.
aws.amazon.com/amazondynamodb/latest/APIReference/
API_UpdateItem.html#DDB-UpdateItem-request-ReturnValues>`_
:rtype: tornado.concurrent.Future
.. _UpdateItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_UpdateItem.html
"""
payload = {'TableName': table_name,
'Key': utils.marshall(key_dict),
'UpdateExpression': update_expression}
if condition_expression:
payload['ConditionExpression'] = condition_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
if return_item_collection_metrics:
_validate_return_item_collection_metrics(
return_item_collection_metrics)
payload['ReturnItemCollectionMetrics'] = \
return_item_collection_metrics
if return_values:
_validate_return_values(return_values)
payload['ReturnValues'] = return_values
return self.execute('UpdateItem', payload) | Invoke the `UpdateItem`_ function.
Edits an existing item's attributes, or adds a new item to the table
if it does not already exist. You can put, delete, or add attribute
values. You can also perform a conditional update on an existing item
(insert a new attribute name-value pair if it doesn't exist, or replace
an existing name-value pair if it has certain expected attribute
values).
:param str table_name: The name of the table that contains the item to
update
:param dict key_dict: A dictionary of key/value pairs that are used to
define the primary key values for the item. For the primary key,
you must provide all of the attributes. For example, with a simple
primary key, you only need to provide a value for the partition
key. For a composite primary key, you must provide values for both
the partition key and the sort key.
:param str condition_expression: A condition that must be satisfied in
order for a conditional *UpdateItem* operation to succeed. One of:
``attribute_exists``, ``attribute_not_exists``, ``attribute_type``,
``contains``, ``begins_with``, ``size``, ``=``, ``<>``, ``<``,
``>``, ``<=``, ``>=``, ``BETWEEN``, ``IN``, ``AND``, ``OR``, or
``NOT``.
:param str update_expression: An expression that defines one or more
attributes to be updated, the action to be performed on them, and
new value(s) for them.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response. See the `AWS documentation
for ReturnConsumedCapacity <http://docs.aws.amazon.com/
amazondynamodb/latest/APIReference/API_UpdateItem.html#DDB-Update
Item-request-ReturnConsumedCapacity>`_ for more information.
:param str return_item_collection_metrics: Determines whether item
collection metrics are returned.
:param str return_values: Use ReturnValues if you want to get the item
attributes as they appeared either before or after they were
updated. See the `AWS documentation for ReturnValues <http://docs.
aws.amazon.com/amazondynamodb/latest/APIReference/
API_UpdateItem.html#DDB-UpdateItem-request-ReturnValues>`_
:rtype: tornado.concurrent.Future
.. _UpdateItem: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_UpdateItem.html | entailment |
def query(self, table_name,
index_name=None,
consistent_read=None,
key_condition_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
projection_expression=None,
select=None,
exclusive_start_key=None,
limit=None,
scan_index_forward=True,
return_consumed_capacity=None):
"""A `Query`_ operation uses the primary key of a table or a secondary
index to directly access items from that table or index.
:param str table_name: The name of the table containing the requested
items.
:param bool consistent_read: Determines the read consistency model: If
set to ``True``, then the operation uses strongly consistent reads;
otherwise, the operation uses eventually consistent reads. Strongly
consistent reads are not supported on global secondary indexes. If
you query a global secondary index with ``consistent_read`` set to
``True``, you will receive a
:exc:`~sprockets_dynamodb.exceptions.ValidationException`.
:param dict exclusive_start_key: The primary key of the first
item that this operation will evaluate. Use the value that was
returned for ``LastEvaluatedKey`` in the previous operation. In a
parallel scan, a *Scan* request that includes
``exclusive_start_key`` must specify the same segment whose
previous *Scan* returned the corresponding value of
``LastEvaluatedKey``.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str key_condition_expression: The condition that specifies the
key value(s) for items to be retrieved by the *Query* action. The
condition must perform an equality test on a single partition key
value, but can optionally perform one of several comparison tests
on a single sort key value. The partition key equality test is
required. For examples see `KeyConditionExpression
<https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/
Query.html#Query.KeyConditionExpressions>.
:param str filter_expression: A string that contains conditions that
DynamoDB applies after the *Query* operation, but before the data
is returned to you. Items that do not satisfy the criteria are not
returned. Note that a filter expression is applied after the items
have already been read; the process of filtering does not consume
any additional read capacity units. For more information, see
`Filter Expressions <http://docs.aws.amazon.com/amazondynamodb/
latest/developerguide/QueryAndScan.html#FilteringResults>`_ in the
Amazon DynamoDB Developer Guide.
:param str projection_expression:
:param str index_name: The name of a secondary index to query. This
index can be any local secondary index or global secondary index.
Note that if you use this parameter, you must also provide
``table_name``.
:param int limit: The maximum number of items to evaluate (not
necessarily the number of matching items). If DynamoDB processes
the number of items up to the limit while processing the results,
it stops the operation and returns the matching values up to that
point, and a key in ``LastEvaluatedKey`` to apply in a subsequent
operation, so that you can pick up where you left off. Also, if the
processed data set size exceeds 1 MB before DynamoDB reaches this
limit, it stops the operation and returns the matching values up to
the limit, and a key in ``LastEvaluatedKey`` to apply in a
subsequent operation to continue the operation. For more
information, see `Query and Scan <http://docs.aws.amazon.com/amazo
ndynamodb/latest/developerguide/QueryAndScan.html>`_ in the Amazon
DynamoDB Developer Guide.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- ``INDEXES``: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying
``INDEXES`` will only return consumed capacity information for
table(s).
- ``TOTAL``: The response includes only the aggregate consumed
capacity for the operation.
- ``NONE``: No consumed capacity details are included in the
response.
:param bool scan_index_forward: Specifies the order for index
traversal: If ``True`` (default), the traversal is performed in
ascending order; if ``False``, the traversal is performed in
descending order. Items with the same partition key value are
stored in sorted order by sort key. If the sort key data type is
*Number*, the results are stored in numeric order. For type
*String*, the results are stored in order of ASCII character code
values. For type *Binary*, DynamoDB treats each byte of the binary
data as unsigned. If set to ``True``, DynamoDB returns the results
in the order in which they are stored (by sort key value). This is
the default behavior. If set to ``False``, DynamoDB reads the
results in reverse order by sort key value, and then returns the
results to the client.
:param str select: The attributes to be returned in the result. You can
retrieve all item attributes, specific item attributes, the count
of matching items, or in the case of an index, some or all of the
attributes projected into the index. Possible values are:
- ``ALL_ATTRIBUTES``: Returns all of the item attributes from the
specified table or index. If you query a local secondary index,
then for each matching item in the index DynamoDB will fetch
the entire item from the parent table. If the index is
configured to project all item attributes, then all of the data
can be obtained from the local secondary index, and no fetching
is required.
- ``ALL_PROJECTED_ATTRIBUTES``: Allowed only when querying an
index. Retrieves all attributes that have been projected into
the index. If the index is configured to project all
attributes, this return value is equivalent to specifying
``ALL_ATTRIBUTES``.
- ``COUNT``: Returns the number of matching items, rather than
the matching items themselves.
:rtype: dict
.. _Query: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Query.html
"""
payload = {'TableName': table_name,
'ScanIndexForward': scan_index_forward}
if index_name:
payload['IndexName'] = index_name
if consistent_read is not None:
payload['ConsistentRead'] = consistent_read
if key_condition_expression:
payload['KeyConditionExpression'] = key_condition_expression
if filter_expression:
payload['FilterExpression'] = filter_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if select:
_validate_select(select)
payload['Select'] = select
if exclusive_start_key:
payload['ExclusiveStartKey'] = utils.marshall(exclusive_start_key)
if limit:
payload['Limit'] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('Query', payload) | A `Query`_ operation uses the primary key of a table or a secondary
index to directly access items from that table or index.
:param str table_name: The name of the table containing the requested
items.
:param bool consistent_read: Determines the read consistency model: If
set to ``True``, then the operation uses strongly consistent reads;
otherwise, the operation uses eventually consistent reads. Strongly
consistent reads are not supported on global secondary indexes. If
you query a global secondary index with ``consistent_read`` set to
``True``, you will receive a
:exc:`~sprockets_dynamodb.exceptions.ValidationException`.
:param dict exclusive_start_key: The primary key of the first
item that this operation will evaluate. Use the value that was
returned for ``LastEvaluatedKey`` in the previous operation. In a
parallel scan, a *Scan* request that includes
``exclusive_start_key`` must specify the same segment whose
previous *Scan* returned the corresponding value of
``LastEvaluatedKey``.
:param dict expression_attribute_names: One or more substitution tokens
for attribute names in an expression.
:param dict expression_attribute_values: One or more values that can be
substituted in an expression.
:param str key_condition_expression: The condition that specifies the
key value(s) for items to be retrieved by the *Query* action. The
condition must perform an equality test on a single partition key
value, but can optionally perform one of several comparison tests
on a single sort key value. The partition key equality test is
required. For examples see `KeyConditionExpression
<https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/
Query.html#Query.KeyConditionExpressions>.
:param str filter_expression: A string that contains conditions that
DynamoDB applies after the *Query* operation, but before the data
is returned to you. Items that do not satisfy the criteria are not
returned. Note that a filter expression is applied after the items
have already been read; the process of filtering does not consume
any additional read capacity units. For more information, see
`Filter Expressions <http://docs.aws.amazon.com/amazondynamodb/
latest/developerguide/QueryAndScan.html#FilteringResults>`_ in the
Amazon DynamoDB Developer Guide.
:param str projection_expression:
:param str index_name: The name of a secondary index to query. This
index can be any local secondary index or global secondary index.
Note that if you use this parameter, you must also provide
``table_name``.
:param int limit: The maximum number of items to evaluate (not
necessarily the number of matching items). If DynamoDB processes
the number of items up to the limit while processing the results,
it stops the operation and returns the matching values up to that
point, and a key in ``LastEvaluatedKey`` to apply in a subsequent
operation, so that you can pick up where you left off. Also, if the
processed data set size exceeds 1 MB before DynamoDB reaches this
limit, it stops the operation and returns the matching values up to
the limit, and a key in ``LastEvaluatedKey`` to apply in a
subsequent operation to continue the operation. For more
information, see `Query and Scan <http://docs.aws.amazon.com/amazo
ndynamodb/latest/developerguide/QueryAndScan.html>`_ in the Amazon
DynamoDB Developer Guide.
:param str return_consumed_capacity: Determines the level of detail
about provisioned throughput consumption that is returned in the
response:
- ``INDEXES``: The response includes the aggregate consumed
capacity for the operation, together with consumed capacity for
each table and secondary index that was accessed. Note that
some operations, such as *GetItem* and *BatchGetItem*, do not
access any indexes at all. In these cases, specifying
``INDEXES`` will only return consumed capacity information for
table(s).
- ``TOTAL``: The response includes only the aggregate consumed
capacity for the operation.
- ``NONE``: No consumed capacity details are included in the
response.
:param bool scan_index_forward: Specifies the order for index
traversal: If ``True`` (default), the traversal is performed in
ascending order; if ``False``, the traversal is performed in
descending order. Items with the same partition key value are
stored in sorted order by sort key. If the sort key data type is
*Number*, the results are stored in numeric order. For type
*String*, the results are stored in order of ASCII character code
values. For type *Binary*, DynamoDB treats each byte of the binary
data as unsigned. If set to ``True``, DynamoDB returns the results
in the order in which they are stored (by sort key value). This is
the default behavior. If set to ``False``, DynamoDB reads the
results in reverse order by sort key value, and then returns the
results to the client.
:param str select: The attributes to be returned in the result. You can
retrieve all item attributes, specific item attributes, the count
of matching items, or in the case of an index, some or all of the
attributes projected into the index. Possible values are:
- ``ALL_ATTRIBUTES``: Returns all of the item attributes from the
specified table or index. If you query a local secondary index,
then for each matching item in the index DynamoDB will fetch
the entire item from the parent table. If the index is
configured to project all item attributes, then all of the data
can be obtained from the local secondary index, and no fetching
is required.
- ``ALL_PROJECTED_ATTRIBUTES``: Allowed only when querying an
index. Retrieves all attributes that have been projected into
the index. If the index is configured to project all
attributes, this return value is equivalent to specifying
``ALL_ATTRIBUTES``.
- ``COUNT``: Returns the number of matching items, rather than
the matching items themselves.
:rtype: dict
.. _Query: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Query.html | entailment |
def scan(self,
table_name,
index_name=None,
consistent_read=None,
projection_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
segment=None,
total_segments=None,
select=None,
limit=None,
exclusive_start_key=None,
return_consumed_capacity=None):
"""The `Scan`_ operation returns one or more items and item attributes
by accessing every item in a table or a secondary index.
If the total number of scanned items exceeds the maximum data set size
limit of 1 MB, the scan stops and results are returned to the user as a
``LastEvaluatedKey`` value to continue the scan in a subsequent
operation. The results also include the number of items exceeding the
limit. A scan can result in no table data meeting the filter criteria.
By default, Scan operations proceed sequentially; however, for faster
performance on a large table or secondary index, applications can
request a parallel *Scan* operation by providing the ``segment`` and
``total_segments`` parameters. For more information, see
`Parallel Scan <http://docs.aws.amazon.com/amazondynamodb/latest/
developerguide/QueryAndScan.html#QueryAndScanParallelScan>`_ in the
Amazon DynamoDB Developer Guide.
By default, *Scan* uses eventually consistent reads when accessing the
data in a table; therefore, the result set might not include the
changes to data in the table immediately before the operation began. If
you need a consistent copy of the data, as of the time that the *Scan*
begins, you can set the ``consistent_read`` parameter to ``True``.
:rtype: dict
.. _Scan: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Scan.html
"""
payload = {'TableName': table_name}
if index_name:
payload['IndexName'] = index_name
if consistent_read is not None:
payload['ConsistentRead'] = consistent_read
if filter_expression:
payload['FilterExpression'] = filter_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if segment:
payload['Segment'] = segment
if total_segments:
payload['TotalSegments'] = total_segments
if select:
_validate_select(select)
payload['Select'] = select
if exclusive_start_key:
payload['ExclusiveStartKey'] = utils.marshall(exclusive_start_key)
if limit:
payload['Limit'] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('Scan', payload) | The `Scan`_ operation returns one or more items and item attributes
by accessing every item in a table or a secondary index.
If the total number of scanned items exceeds the maximum data set size
limit of 1 MB, the scan stops and results are returned to the user as a
``LastEvaluatedKey`` value to continue the scan in a subsequent
operation. The results also include the number of items exceeding the
limit. A scan can result in no table data meeting the filter criteria.
By default, Scan operations proceed sequentially; however, for faster
performance on a large table or secondary index, applications can
request a parallel *Scan* operation by providing the ``segment`` and
``total_segments`` parameters. For more information, see
`Parallel Scan <http://docs.aws.amazon.com/amazondynamodb/latest/
developerguide/QueryAndScan.html#QueryAndScanParallelScan>`_ in the
Amazon DynamoDB Developer Guide.
By default, *Scan* uses eventually consistent reads when accessing the
data in a table; therefore, the result set might not include the
changes to data in the table immediately before the operation began. If
you need a consistent copy of the data, as of the time that the *Scan*
begins, you can set the ``consistent_read`` parameter to ``True``.
:rtype: dict
.. _Scan: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Scan.html | entailment |
def execute(self, action, parameters):
"""
Execute a DynamoDB action with the given parameters. The method will
retry requests that failed due to OS level errors or when being
throttled by DynamoDB.
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:rtype: tornado.concurrent.Future
This method creates a future that will resolve to the result
of calling the specified DynamoDB function. It does it's best
to unwrap the response from the function to make life a little
easier for you. It does this for the ``GetItem`` and ``Query``
functions currently.
:raises:
:exc:`~sprockets_dynamodb.exceptions.DynamoDBException`
:exc:`~sprockets_dynamodb.exceptions.ConfigNotFound`
:exc:`~sprockets_dynamodb.exceptions.NoCredentialsError`
:exc:`~sprockets_dynamodb.exceptions.NoProfileError`
:exc:`~sprockets_dynamodb.exceptions.TimeoutException`
:exc:`~sprockets_dynamodb.exceptions.RequestException`
:exc:`~sprockets_dynamodb.exceptions.InternalFailure`
:exc:`~sprockets_dynamodb.exceptions.LimitExceeded`
:exc:`~sprockets_dynamodb.exceptions.MissingParameter`
:exc:`~sprockets_dynamodb.exceptions.OptInRequired`
:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
:exc:`~sprockets_dynamodb.exceptions.RequestExpired`
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
:exc:`~sprockets_dynamodb.exceptions.ServiceUnavailable`
:exc:`~sprockets_dynamodb.exceptions.ThroughputExceeded`
:exc:`~sprockets_dynamodb.exceptions.ValidationException`
"""
measurements = collections.deque([], self._max_retries)
for attempt in range(1, self._max_retries + 1):
try:
result = yield self._execute(
action, parameters, attempt, measurements)
except (exceptions.InternalServerError,
exceptions.RequestException,
exceptions.ThrottlingException,
exceptions.ThroughputExceeded,
exceptions.ServiceUnavailable) as error:
if attempt == self._max_retries:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
duration = self._sleep_duration(attempt)
self.logger.warning('%r on attempt %i, sleeping %.2f seconds',
error, attempt, duration)
yield gen.sleep(duration)
except exceptions.DynamoDBException as error:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self._on_exception(error)
else:
if self._instrumentation_callback:
self._instrumentation_callback(measurements)
self.logger.debug('%s result: %r', action, result)
raise gen.Return(_unwrap_result(action, result)) | Execute a DynamoDB action with the given parameters. The method will
retry requests that failed due to OS level errors or when being
throttled by DynamoDB.
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:rtype: tornado.concurrent.Future
This method creates a future that will resolve to the result
of calling the specified DynamoDB function. It does it's best
to unwrap the response from the function to make life a little
easier for you. It does this for the ``GetItem`` and ``Query``
functions currently.
:raises:
:exc:`~sprockets_dynamodb.exceptions.DynamoDBException`
:exc:`~sprockets_dynamodb.exceptions.ConfigNotFound`
:exc:`~sprockets_dynamodb.exceptions.NoCredentialsError`
:exc:`~sprockets_dynamodb.exceptions.NoProfileError`
:exc:`~sprockets_dynamodb.exceptions.TimeoutException`
:exc:`~sprockets_dynamodb.exceptions.RequestException`
:exc:`~sprockets_dynamodb.exceptions.InternalFailure`
:exc:`~sprockets_dynamodb.exceptions.LimitExceeded`
:exc:`~sprockets_dynamodb.exceptions.MissingParameter`
:exc:`~sprockets_dynamodb.exceptions.OptInRequired`
:exc:`~sprockets_dynamodb.exceptions.ResourceInUse`
:exc:`~sprockets_dynamodb.exceptions.RequestExpired`
:exc:`~sprockets_dynamodb.exceptions.ResourceNotFound`
:exc:`~sprockets_dynamodb.exceptions.ServiceUnavailable`
:exc:`~sprockets_dynamodb.exceptions.ThroughputExceeded`
:exc:`~sprockets_dynamodb.exceptions.ValidationException` | entailment |
def set_error_callback(self, callback):
"""Assign a method to invoke when a request has encountered an
unrecoverable error in an action execution.
:param method callback: The method to invoke
"""
self.logger.debug('Setting error callback: %r', callback)
self._on_error = callback | Assign a method to invoke when a request has encountered an
unrecoverable error in an action execution.
:param method callback: The method to invoke | entailment |
def set_instrumentation_callback(self, callback):
"""Assign a method to invoke when a request has completed gathering
measurements.
:param method callback: The method to invoke
"""
self.logger.debug('Setting instrumentation callback: %r', callback)
self._instrumentation_callback = callback | Assign a method to invoke when a request has completed gathering
measurements.
:param method callback: The method to invoke | entailment |
def _execute(self, action, parameters, attempt, measurements):
"""Invoke a DynamoDB action
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:param int attempt: Which attempt number this is
:param list measurements: A list for accumulating request measurements
:rtype: tornado.concurrent.Future
"""
future = concurrent.Future()
start = time.time()
def handle_response(request):
"""Invoked by the IOLoop when fetch has a response to process.
:param tornado.concurrent.Future request: The request future
"""
self._on_response(
action, parameters.get('TableName', 'Unknown'), attempt,
start, request, future, measurements)
ioloop.IOLoop.current().add_future(self._client.fetch(
'POST', '/',
body=json.dumps(parameters).encode('utf-8'),
headers={
'x-amz-target': 'DynamoDB_20120810.{}'.format(action),
'Content-Type': 'application/x-amz-json-1.0',
}), handle_response)
return future | Invoke a DynamoDB action
:param str action: DynamoDB action to invoke
:param dict parameters: parameters to send into the action
:param int attempt: Which attempt number this is
:param list measurements: A list for accumulating request measurements
:rtype: tornado.concurrent.Future | entailment |
def _on_response(self, action, table, attempt, start, response, future,
measurements):
"""Invoked when the HTTP request to the DynamoDB has returned and
is responsible for setting the future result or exception based upon
the HTTP response provided.
:param str action: The action that was taken
:param str table: The table name the action was made against
:param int attempt: The attempt number for the action
:param float start: When the request was submitted
:param tornado.concurrent.Future response: The HTTP request future
:param tornado.concurrent.Future future: The action execution future
:param list measurements: The measurement accumulator
"""
self.logger.debug('%s on %s request #%i = %r',
action, table, attempt, response)
now, exception = time.time(), None
try:
future.set_result(self._process_response(response))
except aws_exceptions.ConfigNotFound as error:
exception = exceptions.ConfigNotFound(str(error))
except aws_exceptions.ConfigParserError as error:
exception = exceptions.ConfigParserError(str(error))
except aws_exceptions.NoCredentialsError as error:
exception = exceptions.NoCredentialsError(str(error))
except aws_exceptions.NoProfileError as error:
exception = exceptions.NoProfileError(str(error))
except aws_exceptions.AWSError as error:
exception = exceptions.DynamoDBException(error)
except (ConnectionError, ConnectionResetError, OSError,
aws_exceptions.RequestException, ssl.SSLError,
_select.error, ssl.socket_error, socket.gaierror) as error:
exception = exceptions.RequestException(str(error))
except TimeoutError:
exception = exceptions.TimeoutException()
except httpclient.HTTPError as error:
if error.code == 599:
exception = exceptions.TimeoutException()
else:
exception = exceptions.RequestException(
getattr(getattr(error, 'response', error),
'body', str(error.code)))
except Exception as error:
exception = error
if exception:
future.set_exception(exception)
measurements.append(
Measurement(now, action, table, attempt, max(now, start) - start,
exception.__class__.__name__
if exception else exception)) | Invoked when the HTTP request to the DynamoDB has returned and
is responsible for setting the future result or exception based upon
the HTTP response provided.
:param str action: The action that was taken
:param str table: The table name the action was made against
:param int attempt: The attempt number for the action
:param float start: When the request was submitted
:param tornado.concurrent.Future response: The HTTP request future
:param tornado.concurrent.Future future: The action execution future
:param list measurements: The measurement accumulator | entailment |
def _process_response(response):
"""Process the raw AWS response, returning either the mapped exception
or deserialized response.
:param tornado.concurrent.Future response: The request future
:rtype: dict or list
:raises: sprockets_dynamodb.exceptions.DynamoDBException
"""
error = response.exception()
if error:
if isinstance(error, aws_exceptions.AWSError):
if error.args[1]['type'] in exceptions.MAP:
raise exceptions.MAP[error.args[1]['type']](
error.args[1]['message'])
raise error
http_response = response.result()
if not http_response or not http_response.body:
raise exceptions.DynamoDBException('empty response')
return json.loads(http_response.body.decode('utf-8')) | Process the raw AWS response, returning either the mapped exception
or deserialized response.
:param tornado.concurrent.Future response: The request future
:rtype: dict or list
:raises: sprockets_dynamodb.exceptions.DynamoDBException | entailment |
def write(self, obj, resource_id=None):
"""Write and obj in bdb.
:param obj: value to be written in bdb.
:param resource_id: id to make possible read and search for an resource.
:return: id of the transaction
"""
if resource_id is not None:
if self.read(resource_id):
raise ValueError("There are one object already with this id.")
obj['_id'] = resource_id
prepared_creation_tx = self.driver.instance.transactions.prepare(
operation='CREATE',
signers=self.user.public_key,
asset={
'namespace': self.namespace,
'data': obj
},
metadata={
'namespace': self.namespace,
'data': obj
}
)
signed_tx = self.driver.instance.transactions.fulfill(
prepared_creation_tx,
private_keys=self.user.private_key
)
self.logger.debug('bdb::write::{}'.format(signed_tx['id']))
self.driver.instance.transactions.send_commit(signed_tx)
return signed_tx | Write and obj in bdb.
:param obj: value to be written in bdb.
:param resource_id: id to make possible read and search for an resource.
:return: id of the transaction | entailment |
def _get(self, tx_id):
"""Read and obj in bdb using the tx_id.
:param resource_id: id of the transaction to be read.
:return: value with the data, transaction id and transaction.
"""
# tx_id=self._find_tx_id(resource_id)
value = [
{
'data': transaction['metadata'],
'id': transaction['id']
}
for transaction in self.driver.instance.transactions.get(asset_id=self.get_asset_id(tx_id))
][-1]
if value['data']['data']:
self.logger.debug('bdb::read::{}'.format(value['data']))
return value
else:
return False | Read and obj in bdb using the tx_id.
:param resource_id: id of the transaction to be read.
:return: value with the data, transaction id and transaction. | entailment |
def _update(self, metadata, tx_id, resource_id):
"""Update and obj in bdb using the tx_id.
:param metadata: new metadata for the transaction.
:param tx_id: id of the transaction to be updated.
:return: id of the transaction.
"""
try:
if not tx_id:
sent_tx = self.write(metadata, resource_id)
self.logger.debug('bdb::put::{}'.format(sent_tx['id']))
return sent_tx
else:
txs = self.driver.instance.transactions.get(asset_id=self.get_asset_id(tx_id))
unspent = txs[-1]
sent_tx = self._put(metadata, unspent, resource_id)
self.logger.debug('bdb::put::{}'.format(sent_tx))
return sent_tx
except BadRequest as e:
logging.error(e) | Update and obj in bdb using the tx_id.
:param metadata: new metadata for the transaction.
:param tx_id: id of the transaction to be updated.
:return: id of the transaction. | entailment |
def list(self, search_from=None, search_to=None, limit=None):
"""List all the objects saved in the namespace.
:param search_from: TBI
:param search_to: TBI
:param offset: TBI
:param limit: max number of values to be shows.
:return: list with transactions.
"""
l = []
for i in self._list():
l.append(i['data']['data'])
return l[0:limit] | List all the objects saved in the namespace.
:param search_from: TBI
:param search_to: TBI
:param offset: TBI
:param limit: max number of values to be shows.
:return: list with transactions. | entailment |
def _list(self):
"""List all the objects saved in the namespace.
:param search_from: TBI
:param search_to: TBI
:param offset: TBI
:param limit: max number of values to be shows.
:return: list with transactions.
"""
all = self.driver.instance.metadata.get(search=self.namespace)
list = []
for id in all:
try:
if not self._get(id['id']) in list:
list.append(self._get(id['id']))
except Exception:
pass
return list | List all the objects saved in the namespace.
:param search_from: TBI
:param search_to: TBI
:param offset: TBI
:param limit: max number of values to be shows.
:return: list with transactions. | entailment |
def query(self, search_model: QueryModel):
"""Query to bdb namespace.
:param query_string: query in string format.
:return: list of transactions that match with the query.
"""
self.logger.debug('bdb::get::{}'.format(search_model.query))
assets = json.loads(requests.post("http://localhost:4000/query", data=search_model.query).content)['data']
self.logger.debug('bdb::result::len {}'.format(len(assets)))
assets_metadata = []
for i in assets:
try:
assets_metadata.append(self._get(i['id'])['data']['data'])
except:
pass
return assets_metadata | Query to bdb namespace.
:param query_string: query in string format.
:return: list of transactions that match with the query. | entailment |
def _delete(self, tx_id):
"""Delete a transaction. Read documentation about CRAB model in https://blog.bigchaindb.com/crab-create-retrieve-append-burn-b9f6d111f460.
:param tx_id: transaction id
:return:
"""
txs = self.driver.instance.transactions.get(asset_id=self.get_asset_id(tx_id))
unspent = txs[-1]
output_index = 0
output = unspent['outputs'][output_index]
transfer_input = {
'fulfillment': output['condition']['details'],
'fulfills': {
'output_index': output_index,
'transaction_id': unspent['id']
},
'owners_before': output['public_keys']
}
prepared_transfer_tx = self.driver.instance.transactions.prepare(
operation='TRANSFER',
asset=unspent['asset'] if 'id' in unspent['asset'] else {'id': unspent['id']},
inputs=transfer_input,
recipients=self.BURN_ADDRESS,
metadata={
'namespace': 'burned',
}
)
signed_tx = self.driver.instance.transactions.fulfill(
prepared_transfer_tx,
private_keys=self.user.private_key,
)
self.driver.instance.transactions.send_commit(signed_tx) | Delete a transaction. Read documentation about CRAB model in https://blog.bigchaindb.com/crab-create-retrieve-append-burn-b9f6d111f460.
:param tx_id: transaction id
:return: | entailment |
def get_asset_id(self, tx_id):
"""Return the tx_id of the first transaction.
:param tx_id: Transaction id to start the recursive search.
:return Transaction id parent.
"""
tx = self.driver.instance.transactions.retrieve(txid=tx_id)
assert tx is not None
return tx['id'] if tx['operation'] == 'CREATE' else tx['asset']['id'] | Return the tx_id of the first transaction.
:param tx_id: Transaction id to start the recursive search.
:return Transaction id parent. | entailment |
def hdr(data, filename):
"""
write ENVI header files
Parameters
----------
data: str or dict
the file or dictionary to get the info from
filename: str
the HDR file to write
Returns
-------
"""
hdrobj = data if isinstance(data, HDRobject) else HDRobject(data)
hdrobj.write(filename) | write ENVI header files
Parameters
----------
data: str or dict
the file or dictionary to get the info from
filename: str
the HDR file to write
Returns
------- | entailment |
def __hdr2dict(self):
"""
read a HDR file into a dictionary
http://gis.stackexchange.com/questions/48618/how-to-read-write-envi-metadata-using-gdal
Returns
-------
dict
the hdr file metadata attributes
"""
with open(self.filename, 'r') as infile:
lines = infile.readlines()
i = 0
out = dict()
while i < len(lines):
line = lines[i].strip('\r\n')
if '=' in line:
if '{' in line and '}' not in line:
while '}' not in line:
i += 1
line += lines[i].strip('\n').lstrip()
line = list(filter(None, re.split(r'\s+=\s+', line)))
line[1] = re.split(',[ ]*', line[1].strip('{}'))
key = line[0].replace(' ', '_')
val = line[1] if len(line[1]) > 1 else line[1][0]
out[key] = parse_literal(val)
i += 1
if 'band_names' in out.keys() and not isinstance(out['band_names'], list):
out['band_names'] = [out['band_names']]
return out | read a HDR file into a dictionary
http://gis.stackexchange.com/questions/48618/how-to-read-write-envi-metadata-using-gdal
Returns
-------
dict
the hdr file metadata attributes | entailment |
def write(self, filename='same'):
"""
write object to an ENVI header file
"""
if filename == 'same':
filename = self.filename
if not filename.endswith('.hdr'):
filename += '.hdr'
with open(filename, 'w') as out:
out.write(self.__str__()) | write object to an ENVI header file | entailment |
def MaxSpeed(self, speed):
' Setup of maximum speed '
spi.SPI_write_byte(self.CS, 0x07) # Max Speed setup
spi.SPI_write_byte(self.CS, 0x00)
spi.SPI_write_byte(self.CS, speed) | Setup of maximum speed | entailment |
def GoZero(self, speed):
' Go to Zero position '
self.ReleaseSW()
spi.SPI_write_byte(self.CS, 0x82 | (self.Dir & 1)) # Go to Zero
spi.SPI_write_byte(self.CS, 0x00)
spi.SPI_write_byte(self.CS, speed)
while self.IsBusy():
pass
time.sleep(0.3)
self.ReleaseSW() | Go to Zero position | entailment |
def Move(self, units):
' Move some distance units from current position '
steps = units * self.SPU # translate units to steps
if steps > 0: # look for direction
spi.SPI_write_byte(self.CS, 0x40 | (~self.Dir & 1))
else:
spi.SPI_write_byte(self.CS, 0x40 | (self.Dir & 1))
steps = int(abs(steps))
spi.SPI_write_byte(self.CS, (steps >> 16) & 0xFF)
spi.SPI_write_byte(self.CS, (steps >> 8) & 0xFF)
spi.SPI_write_byte(self.CS, steps & 0xFF) | Move some distance units from current position | entailment |
def ReadStatusBit(self, bit):
' Report given status bit '
spi.SPI_write_byte(self.CS, 0x39) # Read from address 0x19 (STATUS)
spi.SPI_write_byte(self.CS, 0x00)
data0 = spi.SPI_read_byte() # 1st byte
spi.SPI_write_byte(self.CS, 0x00)
data1 = spi.SPI_read_byte() # 2nd byte
#print hex(data0), hex(data1)
if bit > 7: # extract requested bit
OutputBit = (data0 >> (bit - 8)) & 1
else:
OutputBit = (data1 >> bit) & 1
return OutputBit | Report given status bit | entailment |
def SPI_write_byte(self, chip_select, data):
'Writes a data to a SPI device selected by chipselect bit. '
self.bus.write_byte_data(self.address, chip_select, data) | Writes a data to a SPI device selected by chipselect bit. | entailment |
def SPI_write(self, chip_select, data):
'Writes data to SPI device selected by chipselect bit. '
dat = list(data)
dat.insert(0, chip_select)
return self.bus.write_i2c_block(self.address, dat); | Writes data to SPI device selected by chipselect bit. | entailment |
def SPI_config(self,config):
'Configure SPI interface parameters.'
self.bus.write_byte_data(self.address, 0xF0, config)
return self.bus.read_byte_data(self.address, 0xF0) | Configure SPI interface parameters. | entailment |
def GPIO_read(self):
'Reads logic state on GPIO enabled slave-selects pins.'
self.bus.write_byte_data(self.address, 0xF5, 0x0f)
status = self.bus.read_byte(self.address)
bits_values = dict([('SS0',status & 0x01 == 0x01),('SS1',status & 0x02 == 0x02),('SS2',status & 0x04 == 0x04),('SS3',status & 0x08 == 0x08)])
return bits_values | Reads logic state on GPIO enabled slave-selects pins. | entailment |
def GPIO_config(self, gpio_enable, gpio_config):
'Enable or disable slave-select pins as gpio.'
self.bus.write_byte_data(self.address, 0xF6, gpio_enable)
self.bus.write_byte_data(self.address, 0xF7, gpio_config)
return | Enable or disable slave-select pins as gpio. | entailment |
def get_address(self):
"""
Returns sensors I2C address.
"""
LOGGER.debug("Reading RPS01A sensor's address.",)
return self.bus.read_byte_data(self.address, self.address_reg) | Returns sensors I2C address. | entailment |
def get_zero_position(self):
"""
Returns programmed zero position in OTP memory.
"""
LSB = self.bus.read_byte_data(self.address, self.zero_position_MSB)
MSB = self.bus.read_byte_data(self.address, self.zero_position_LSB)
DATA = (MSB << 6) + LSB
return DATA | Returns programmed zero position in OTP memory. | entailment |
def get_agc_value(self):
"""
Returns sensor's Automatic Gain Control actual value.
0 - Represents high magtetic field
0xFF - Represents low magnetic field
"""
LOGGER.debug("Reading RPS01A sensor's AGC settings",)
return self.bus.read_byte_data(self.address, self.AGC_reg) | Returns sensor's Automatic Gain Control actual value.
0 - Represents high magtetic field
0xFF - Represents low magnetic field | entailment |
def get_diagnostics(self):
"""
Reads diagnostic data from the sensor.
OCF (Offset Compensation Finished) - logic high indicates the finished Offset Compensation Algorithm. After power up the flag remains always to logic high.
COF (Cordic Overflow) - logic high indicates an out of range error in the CORDIC part. When this bit is set, the angle and magnitude data is invalid.
The absolute output maintains the last valid angular value.
COMP low, indicates a high magnetic field. It is recommended to monitor in addition the magnitude value.
COMP high, indicated a weak magnetic field. It is recommended to monitor the magnitude value.
"""
status = self.bus.read_byte_data(self.address, self.diagnostics_reg)
bits_values = dict([('OCF',status & 0x01 == 0x01),
('COF',status & 0x02 == 0x02),
('Comp_Low',status & 0x04 == 0x04),
('Comp_High',status & 0x08 == 0x08)])
return bits_values | Reads diagnostic data from the sensor.
OCF (Offset Compensation Finished) - logic high indicates the finished Offset Compensation Algorithm. After power up the flag remains always to logic high.
COF (Cordic Overflow) - logic high indicates an out of range error in the CORDIC part. When this bit is set, the angle and magnitude data is invalid.
The absolute output maintains the last valid angular value.
COMP low, indicates a high magnetic field. It is recommended to monitor in addition the magnitude value.
COMP high, indicated a weak magnetic field. It is recommended to monitor the magnitude value. | entailment |
def get_angle(self, verify = False):
"""
Retuns measured angle in degrees in range 0-360.
"""
LSB = self.bus.read_byte_data(self.address, self.angle_LSB)
MSB = self.bus.read_byte_data(self.address, self.angle_MSB)
DATA = (MSB << 6) + LSB
if not verify:
return (360.0 / 2**14) * DATA
else:
status = self.get_diagnostics()
if not (status['Comp_Low']) and not(status['Comp_High']) and not(status['COF']):
return (360.0 / 2**14) * DATA
else:
return None | Retuns measured angle in degrees in range 0-360. | entailment |
def main(input_bed, output_file, output_features=False, genome=None,
only_canonical=False, short=False, extended=False, high_confidence=False,
ambiguities_method=False, coding_only=False, collapse_exons=False, work_dir=False, is_debug=False):
""" Annotating BED file based on reference features annotations.
"""
logger.init(is_debug_=is_debug)
if not genome:
raise click.BadParameter('Error: please, specify genome build name with -g (e.g. `-g hg19`)', param='genome')
if short:
if extended: raise click.BadParameter('--short and --extended can\'t be set both', param='extended')
if output_features: raise click.BadParameter('--short and --output-features can\'t be set both', param='output_features')
elif output_features or extended:
extended = True
short = False
if not verify_file(input_bed):
click.BadParameter(f'Usage: {__file__} Input_BED_file -g hg19 -o Annotated_BED_file [options]', param='input_bed')
input_bed = verify_file(input_bed, is_critical=True, description=f'Input BED file for {__file__}')
if work_dir:
work_dir = join(adjust_path(work_dir), os.path.splitext(basename(input_bed))[0])
safe_mkdir(work_dir)
info(f'Created work directory {work_dir}')
else:
work_dir = mkdtemp('bed_annotate')
debug('Created temporary work directory {work_dir}')
input_bed = clean_bed(input_bed, work_dir)
input_bed = verify_bed(input_bed, is_critical=True, description=f'Input BED file for {__file__} after cleaning')
output_file = adjust_path(output_file)
output_file = annotate(
input_bed, output_file, work_dir, genome=genome,
only_canonical=only_canonical, short=short, extended=extended,
high_confidence=high_confidence, collapse_exons=collapse_exons,
output_features=output_features,
ambiguities_method=ambiguities_method, coding_only=coding_only,
is_debug=is_debug)
if not work_dir:
debug(f'Removing work directory {work_dir}')
shutil.rmtree(work_dir)
info(f'Done, saved to {output_file}') | Annotating BED file based on reference features annotations. | entailment |
def StateOfCharge(self):
""" % of Full Charge """
return (self.bus.read_byte_data(self.address, 0x02) + self.bus.read_byte_data(self.address, 0x03) * 256) | % of Full Charge | entailment |
def Chemistry(self):
''' Get cells chemistry '''
length = self.bus.read_byte_data(self.address, 0x79)
chem = []
for n in range(length):
chem.append(self.bus.read_byte_data(self.address, 0x7A + n))
return chem | Get cells chemistry | entailment |
def integrate_2d(uSin, angles, res, nm, lD=0, coords=None,
count=None, max_count=None, verbose=0):
r"""(slow) 2D reconstruction with the Fourier diffraction theorem
Two-dimensional diffraction tomography reconstruction
algorithm for scattering of a plane wave
:math:`u_0(\mathbf{r}) = u_0(x,z)`
by a dielectric object with refractive index
:math:`n(x,z)`.
This function implements the solution by summation in real
space, which is extremely slow.
Parameters
----------
uSin: (A,N) ndarray
Two-dimensional sinogram of line recordings
:math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D})`
divided by the incident plane wave :math:`u_0(l_\mathrm{D})`
measured at the detector.
angles: (A,) ndarray
Angular positions :math:`\phi_j` of `uSin` in radians.
res: float
Vacuum wavelength of the light :math:`\lambda` in pixels.
nm: float
Refractive index of the surrounding medium :math:`n_\mathrm{m}`.
lD: float
Distance from center of rotation to detector plane
:math:`l_\mathrm{D}` in pixels.
coords: None or (2,M) ndarray]
Computes only the output image at these coordinates. This
keyword is reserved for future versions and is not
implemented yet.
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
verbose: int
Increment to increase verbosity.
Returns
-------
f: ndarray of shape (N,N), complex if `onlyreal` is `False`
Reconstructed object function :math:`f(\mathbf{r})` as defined
by the Helmholtz equation.
:math:`f(x,z) =
k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)`
See Also
--------
backpropagate_2d: implementation by backprojection
fourier_map_2d: implementation by Fourier interpolation
odt_to_ri: conversion of the object function :math:`f(\mathbf{r})`
to refractive index :math:`n(\mathbf{r})`
Notes
-----
This method is not meant for production use. The computation time
is very long and the reconstruction quality is bad. This function
is included in the package, because of its educational value,
exemplifying the backpropagation algorithm.
Do not use the parameter `lD` in combination with the Rytov
approximation - the propagation is not correctly described.
Instead, numerically refocus the sinogram prior to converting
it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`)
with a numerical focusing algorithm (available in the Python
package :py:mod:`nrefocus`).
"""
if coords is None:
lx = uSin.shape[1]
x = np.linspace(-lx/2, lx/2, lx, endpoint=False)
xv, yv = np.meshgrid(x, x)
coords = np.zeros((2, lx**2))
coords[0, :] = xv.flat
coords[1, :] = yv.flat
if max_count is not None:
max_count.value += coords.shape[1] + 1
# Cut-Off frequency
km = (2 * np.pi * nm) / res
# Fourier transform of all uB's
# In the script we used the unitary angular frequency (uaf) Fourier
# Transform. The discrete Fourier transform is equivalent to the
# unitary ordinary frequency (uof) Fourier transform.
#
# uof: f₁(ξ) = int f(x) exp(-2πi xξ)
#
# uaf: f₃(ω) = (2π)^(-n/2) int f(x) exp(-i ωx)
#
# f₁(ω/(2π)) = (2π)^(n/2) f₃(ω)
# ω = 2πξ
#
# We have a one-dimensional (n=1) Fourier transform and UB in the
# script is equivalent to f₃(ω). Because we are working with the
# uaf, we divide by sqrt(2π) after computing the fft with the uof.
#
# We calculate the fourier transform of uB further below. This is
# necessary for memory control.
# Corresponding sample frequencies
fx = np.fft.fftfreq(uSin[0].shape[0]) # 1D array
# kx is a 1D array.
kx = 2 * np.pi * fx
# Undersampling/oversampling?
# Determine if the resolution of the image is too low by looking
# at the maximum value for kx. This is no comparison between
# Nyquist and Rayleigh frequency.
if np.max(kx**2) <= 2 * km**2:
# Detector is not set up properly. Higher resolution
# can be achieved.
if verbose:
print("......Measurement data is undersampled.")
else:
if verbose:
print("......Measurement data is oversampled.")
raise NotImplementedError("Oversampled data not yet supported." +
" Please rescale input data")
# Differentials for integral
dphi0 = 2 * np.pi / len(angles)
dkx = kx[1] - kx[0]
# We will later multiply with phi0.
# Make sure we are using correct shapes
kx = kx.reshape(1, kx.shape[0])
# Low-pass filter:
# less-than-or-equal would give us zero division error.
filter_klp = (kx**2 < km**2)
# a0 will be multiplied with kx
# a0 = np.atleast_1d(a0)
# a0 = a0.reshape(1,-1)
# Create the integrand
# Integrals over ϕ₀ [0,2π]; kx [-kₘ,kₘ]
# - double coverage factor 1/2 already included
# - unitary angular frequency to unitary ordinary frequency
# conversion performed in calculation of UB=FT(uB).
#
# f(r) = -i kₘ / ((2π)^(3/2) a₀) (prefactor)
# * iint dϕ₀ dkx (prefactor)
# * |kx| (prefactor)
# * exp(-i kₘ M lD ) (prefactor)
# * UBϕ₀(kx) (dependent on ϕ₀)
# * exp( i (kx t⊥ + kₘ (M - 1) s₀) r ) (dependent on ϕ₀ and r)
#
# (r and s₀ are vectors. In the last term we perform the dot-product)
#
# kₘM = sqrt( kₘ² - kx² )
# t⊥ = ( cos(ϕ₀), sin(ϕ₀) )
# s₀ = ( -sin(ϕ₀), cos(ϕ₀) )
#
#
# everything that is not dependent on phi0:
#
# Filter M so there are no nans from the root
M = 1. / km * np.sqrt((km**2 - kx**2) * filter_klp)
prefactor = -1j * km / ((2 * np.pi)**(3. / 2))
prefactor *= dphi0 * dkx
# Also filter the prefactor, so nothing outside the required
# low-pass contributes to the sum.
prefactor *= np.abs(kx) * filter_klp
# new in version 0.1.4:
# We multiply by the factor (M-1) instead of just (M)
# to take into account that we have a scattered
# wave that is normalized by u0.
prefactor *= np.exp(-1j * km * (M-1) * lD)
# Initiate function f
f = np.zeros(len(coords[0]), dtype=np.complex128)
lenf = len(f)
lenu0 = len(uSin[0]) # lenu0 = len(kx[0])
# Initiate vector r that corresponds to calculating a value of f.
r = np.zeros((2, 1, 1))
# Everything is normal.
# Get the angles ϕ₀.
phi0 = angles.reshape(-1, 1)
# Compute the Fourier transform of uB.
# This is true: np.fft.fft(UB)[0] == np.fft.fft(UB[0])
# because axis -1 is always used.
#
#
# Furthermore, The notation in the our optical tomography script for
# a wave propagating to the right is:
#
# u0(x) = exp(ikx)
#
# However, in physics usually usethe other sign convention:
#
# u0(x) = exp(-ikx)
#
# In order to be consisten with programs like Meep or our scattering
# script for a dielectric cylinder, we want to use the latter sign
# convention.
# This is not a big problem. We only need to multiply the imaginary
# part of the scattered wave by -1.
UB = np.fft.fft(np.fft.ifftshift(uSin, axes=-1)) / np.sqrt(2 * np.pi)
UBi = UB.reshape(len(angles), lenu0)
if count is not None:
count.value += 1
for j in range(lenf):
# Get r (We compute f(r) in this for-loop)
r[0][:] = coords[0, j] # x
r[1][:] = coords[1, j] # y
# Integrand changes with r, so we have to create a new
# array:
integrand = prefactor * UBi
# We save memory by directly applying the following to
# the integrand:
#
# Vector along which we measured
# s0 = np.zeros((2, phi0.shape[0], kx.shape[0]))
# s0[0] = -np.sin(phi0)
# s0[1] = +np.cos(phi0)
# Vector perpendicular to s0
# t_perp_kx = np.zeros((2, phi0.shape[0], kx.shape[1]))
#
# t_perp_kx[0] = kx*np.cos(phi0)
# t_perp_kx[1] = kx*np.sin(phi0)
#
# term3 = np.exp(1j*np.sum(r*( t_perp_kx + (gamma-km)*s0 ), axis=0))
# integrand* = term3
#
# Reminder:
# f(r) = -i kₘ / ((2π)^(3/2) a₀) (prefactor)
# * iint dϕ₀ dkx (prefactor)
# * |kx| (prefactor)
# * exp(-i kₘ M lD ) (prefactor)
# * UB(kx) (dependent on ϕ₀)
# * exp( i (kx t⊥ + kₘ(M - 1) s₀) r ) (dependent on ϕ₀ and r)
#
# (r and s₀ are vectors. In the last term we perform the dot-product)
#
# kₘM = sqrt( kₘ² - kx² )
# t⊥ = ( cos(ϕ₀), sin(ϕ₀) )
# s₀ = ( -sin(ϕ₀), cos(ϕ₀) )
integrand *= np.exp(1j * (
r[0] * (kx * np.cos(phi0) - km * (M - 1) * np.sin(phi0)) +
r[1] * (kx * np.sin(phi0) + km * (M - 1) * np.cos(phi0))))
# Calculate the integral for the position r
# integrand.sort()
f[j] = np.sum(integrand)
# free memory
del integrand
if count is not None:
count.value += 1
return f.reshape(lx, lx) | r"""(slow) 2D reconstruction with the Fourier diffraction theorem
Two-dimensional diffraction tomography reconstruction
algorithm for scattering of a plane wave
:math:`u_0(\mathbf{r}) = u_0(x,z)`
by a dielectric object with refractive index
:math:`n(x,z)`.
This function implements the solution by summation in real
space, which is extremely slow.
Parameters
----------
uSin: (A,N) ndarray
Two-dimensional sinogram of line recordings
:math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D})`
divided by the incident plane wave :math:`u_0(l_\mathrm{D})`
measured at the detector.
angles: (A,) ndarray
Angular positions :math:`\phi_j` of `uSin` in radians.
res: float
Vacuum wavelength of the light :math:`\lambda` in pixels.
nm: float
Refractive index of the surrounding medium :math:`n_\mathrm{m}`.
lD: float
Distance from center of rotation to detector plane
:math:`l_\mathrm{D}` in pixels.
coords: None or (2,M) ndarray]
Computes only the output image at these coordinates. This
keyword is reserved for future versions and is not
implemented yet.
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
verbose: int
Increment to increase verbosity.
Returns
-------
f: ndarray of shape (N,N), complex if `onlyreal` is `False`
Reconstructed object function :math:`f(\mathbf{r})` as defined
by the Helmholtz equation.
:math:`f(x,z) =
k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)`
See Also
--------
backpropagate_2d: implementation by backprojection
fourier_map_2d: implementation by Fourier interpolation
odt_to_ri: conversion of the object function :math:`f(\mathbf{r})`
to refractive index :math:`n(\mathbf{r})`
Notes
-----
This method is not meant for production use. The computation time
is very long and the reconstruction quality is bad. This function
is included in the package, because of its educational value,
exemplifying the backpropagation algorithm.
Do not use the parameter `lD` in combination with the Rytov
approximation - the propagation is not correctly described.
Instead, numerically refocus the sinogram prior to converting
it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`)
with a numerical focusing algorithm (available in the Python
package :py:mod:`nrefocus`). | entailment |
def backpropagate_2d(uSin, angles, res, nm, lD=0, coords=None,
weight_angles=True,
onlyreal=False, padding=True, padval=0,
count=None, max_count=None, verbose=0):
r"""2D backpropagation with the Fourier diffraction theorem
Two-dimensional diffraction tomography reconstruction
algorithm for scattering of a plane wave
:math:`u_0(\mathbf{r}) = u_0(x,z)`
by a dielectric object with refractive index
:math:`n(x,z)`.
This method implements the 2D backpropagation algorithm
:cite:`Mueller2015arxiv`.
.. math::
f(\mathbf{r}) =
-\frac{i k_\mathrm{m}}{2\pi}
\sum_{j=1}^{N} \! \Delta \phi_0 D_{-\phi_j} \!\!
\left \{
\text{FFT}^{-1}_{\mathrm{1D}}
\left \{
\left| k_\mathrm{Dx} \right|
\frac{\text{FFT}_{\mathrm{1D}} \left \{
u_{\mathrm{B},\phi_j}(x_\mathrm{D}) \right \}
}{u_0(l_\mathrm{D})}
\exp \! \left[i k_\mathrm{m}(M - 1) \cdot
(z_{\phi_j}-l_\mathrm{D}) \right]
\right \}
\right \}
with the forward :math:`\text{FFT}_{\mathrm{1D}}` and inverse
:math:`\text{FFT}^{-1}_{\mathrm{1D}}` 1D fast Fourier transform, the
rotational operator :math:`D_{-\phi_j}`, the angular distance between the
projections :math:`\Delta \phi_0`, the ramp filter in Fourier space
:math:`|k_\mathrm{Dx}|`, and the propagation distance
:math:`(z_{\phi_j}-l_\mathrm{D})`.
Parameters
----------
uSin: (A,N) ndarray
Two-dimensional sinogram of line recordings
:math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D})`
divided by the incident plane wave :math:`u_0(l_\mathrm{D})`
measured at the detector.
angles: (A,) ndarray
Angular positions :math:`\phi_j` of `uSin` in radians.
res: float
Vacuum wavelength of the light :math:`\lambda` in pixels.
nm: float
Refractive index of the surrounding medium :math:`n_\mathrm{m}`.
lD: float
Distance from center of rotation to detector plane
:math:`l_\mathrm{D}` in pixels.
coords: None [(2,M) ndarray]
Computes only the output image at these coordinates. This
keyword is reserved for future versions and is not
implemented yet.
weight_angles: bool
If `True`, weights each backpropagated projection with a factor
proportional to the angular distance between the neighboring
projections.
.. math::
\Delta \phi_0 \longmapsto \Delta \phi_j =
\frac{\phi_{j+1} - \phi_{j-1}}{2}
.. versionadded:: 0.1.1
onlyreal: bool
If `True`, only the real part of the reconstructed image
will be returned. This saves computation time.
padding: bool
Pad the input data to the second next power of 2 before
Fourier transforming. This reduces artifacts and speeds up
the process for input image sizes that are not powers of 2.
padval: float
The value used for padding. This is important for the Rytov
approximation, where an approximate zero in the phase might
translate to 2πi due to the unwrapping algorithm. In that
case, this value should be a multiple of 2πi.
If `padval` is `None`, then the edge values are used for
padding (see documentation of :func:`numpy.pad`).
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
verbose: int
Increment to increase verbosity.
Returns
-------
f: ndarray of shape (N,N), complex if `onlyreal` is `False`
Reconstructed object function :math:`f(\mathbf{r})` as defined
by the Helmholtz equation.
:math:`f(x,z) =
k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)`
See Also
--------
odt_to_ri: conversion of the object function :math:`f(\mathbf{r})`
to refractive index :math:`n(\mathbf{r})`
radontea.backproject: backprojection based on the Fourier slice
theorem
Notes
-----
Do not use the parameter `lD` in combination with the Rytov
approximation - the propagation is not correctly described.
Instead, numerically refocus the sinogram prior to converting
it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`)
with a numerical focusing algorithm (available in the Python
package :py:mod:`nrefocus`).
"""
##
##
# TODO:
# - combine the 2nd filter and the rotation in the for loop
# to save memory. However, memory is not a big issue in 2D.
##
##
A = angles.shape[0]
if max_count is not None:
max_count.value += A + 2
# Check input data
assert len(uSin.shape) == 2, "Input data `uB` must have shape (A,N)!"
assert len(uSin) == A, "`len(angles)` must be equal to `len(uSin)`!"
if coords is not None:
raise NotImplementedError("Output coordinates cannot yet be set " +
+ "for the 2D backrpopagation algorithm.")
# Cut-Off frequency
# km [1/px]
km = (2 * np.pi * nm) / res
# Here, the notation defines
# a wave propagating to the right as:
#
# u0(x) = exp(ikx)
#
# However, in physics usually we use the other sign convention:
#
# u0(x) = exp(-ikx)
#
# In order to be consistent with programs like Meep or our
# scattering script for a dielectric cylinder, we want to use the
# latter sign convention.
# This is not a big problem. We only need to multiply the imaginary
# part of the scattered wave by -1.
# Perform weighting
if weight_angles:
weights = util.compute_angle_weights_1d(angles).reshape(-1, 1)
sinogram = uSin * weights
else:
sinogram = uSin
# Size of the input data
ln = sinogram.shape[1]
# We perform padding before performing the Fourier transform.
# This gets rid of artifacts due to false periodicity and also
# speeds up Fourier transforms of the input image size is not
# a power of 2.
order = max(64., 2**np.ceil(np.log(ln * 2.1) / np.log(2)))
if padding:
pad = order - ln
else:
pad = 0
padl = np.int(np.ceil(pad / 2))
padr = np.int(pad - padl)
if padval is None:
sino = np.pad(sinogram, ((0, 0), (padl, padr)),
mode="edge")
if verbose > 0:
print("......Padding with edge values.")
else:
sino = np.pad(sinogram, ((0, 0), (padl, padr)),
mode="linear_ramp",
end_values=(padval,))
if verbose > 0:
print("......Verifying padding value: {}".format(padval))
# zero-padded length of sinogram.
lN = sino.shape[1]
# Ask for the filter. Do not include zero (first element).
#
# Integrals over ϕ₀ [0,2π]; kx [-kₘ,kₘ]
# - double coverage factor 1/2 already included
# - unitary angular frequency to unitary ordinary frequency
# conversion performed in calculation of UB=FT(uB).
#
# f(r) = -i kₘ / ((2π)^(3/2) a₀) (prefactor)
# * iint dϕ₀ dkx (prefactor)
# * |kx| (prefactor)
# * exp(-i kₘ M lD ) (prefactor)
# * UBϕ₀(kx) (dependent on ϕ₀)
# * exp( i (kx t⊥ + kₘ (M - 1) s₀) r ) (dependent on ϕ₀ and r)
#
# (r and s₀ are vectors. In the last term we perform the dot-product)
#
# kₘM = sqrt( kₘ² - kx² )
# t⊥ = ( cos(ϕ₀), sin(ϕ₀) )
# s₀ = ( -sin(ϕ₀), cos(ϕ₀) )
#
# The filter can be split into two parts
#
# 1) part without dependence on the z-coordinate
#
# -i kₘ / ((2π)^(3/2) a₀)
# * iint dϕ₀ dkx
# * |kx|
# * exp(-i kₘ M lD )
#
# 2) part with dependence of the z-coordinate
#
# exp( i (kx t⊥ + kₘ (M - 1) s₀) r )
#
# The filter (1) can be performed using the classical filter process
# as in the backprojection algorithm.
#
#
if count is not None:
count.value += 1
# Corresponding sample frequencies
fx = np.fft.fftfreq(lN) # 1D array
# kx is a 1D array.
kx = 2 * np.pi * fx
# Differentials for integral
dphi0 = 2 * np.pi / A
# We will later multiply with phi0.
# a, x
kx = kx.reshape(1, -1)
# Low-pass filter:
# less-than-or-equal would give us zero division error.
filter_klp = (kx**2 < km**2)
# Filter M so there are no nans from the root
M = 1. / km * np.sqrt((km**2 - kx**2) * filter_klp)
prefactor = -1j * km / (2 * np.pi)
prefactor *= dphi0
prefactor *= np.abs(kx) * filter_klp
# new in version 0.1.4:
# We multiply by the factor (M-1) instead of just (M)
# to take into account that we have a scattered
# wave that is normalized by u0.
prefactor *= np.exp(-1j * km * (M-1) * lD)
# Perform filtering of the sinogram
projection = np.fft.fft(sino, axis=-1) * prefactor
#
# filter (2) must be applied before rotation as well
# exp( i (kx t⊥ + kₘ (M - 1) s₀) r )
#
# t⊥ = ( cos(ϕ₀), sin(ϕ₀) )
# s₀ = ( -sin(ϕ₀), cos(ϕ₀) )
#
# This filter is effectively an inverse Fourier transform
#
# exp(i kx xD) exp(i kₘ (M - 1) yD )
#
# xD = x cos(ϕ₀) + y sin(ϕ₀)
# yD = - x sin(ϕ₀) + y cos(ϕ₀)
# Everything is in pixels
center = ln / 2.0
x = np.arange(lN) - center + .5
# Meshgrid for output array
yv = x.reshape(-1, 1)
Mp = M.reshape(1, -1)
filter2 = np.exp(1j * yv * km * (Mp - 1)) # .reshape(1,lN,lN)
projection = projection.reshape(A, 1, lN) # * filter2
# Prepare complex output image
if onlyreal:
outarr = np.zeros((ln, ln))
else:
outarr = np.zeros((ln, ln), dtype=np.dtype(complex))
if count is not None:
count.value += 1
# Calculate backpropagations
for i in np.arange(A):
# Create an interpolation object of the projection.
# interpolation of the rotated fourier transformed projection
# this is already tiled onto the entire image.
sino_filtered = np.fft.ifft(projection[i] * filter2, axis=-1)
# Resize filtered sinogram back to original size
sino = sino_filtered[:ln, padl:padl + ln]
rotated_projr = scipy.ndimage.interpolation.rotate(
sino.real, -angles[i] * 180 / np.pi,
reshape=False, mode="constant", cval=0)
# Append results
outarr += rotated_projr
if not onlyreal:
outarr += 1j * scipy.ndimage.interpolation.rotate(
sino.imag, -angles[i] * 180 / np.pi,
reshape=False, mode="constant", cval=0)
if count is not None:
count.value += 1
return outarr | r"""2D backpropagation with the Fourier diffraction theorem
Two-dimensional diffraction tomography reconstruction
algorithm for scattering of a plane wave
:math:`u_0(\mathbf{r}) = u_0(x,z)`
by a dielectric object with refractive index
:math:`n(x,z)`.
This method implements the 2D backpropagation algorithm
:cite:`Mueller2015arxiv`.
.. math::
f(\mathbf{r}) =
-\frac{i k_\mathrm{m}}{2\pi}
\sum_{j=1}^{N} \! \Delta \phi_0 D_{-\phi_j} \!\!
\left \{
\text{FFT}^{-1}_{\mathrm{1D}}
\left \{
\left| k_\mathrm{Dx} \right|
\frac{\text{FFT}_{\mathrm{1D}} \left \{
u_{\mathrm{B},\phi_j}(x_\mathrm{D}) \right \}
}{u_0(l_\mathrm{D})}
\exp \! \left[i k_\mathrm{m}(M - 1) \cdot
(z_{\phi_j}-l_\mathrm{D}) \right]
\right \}
\right \}
with the forward :math:`\text{FFT}_{\mathrm{1D}}` and inverse
:math:`\text{FFT}^{-1}_{\mathrm{1D}}` 1D fast Fourier transform, the
rotational operator :math:`D_{-\phi_j}`, the angular distance between the
projections :math:`\Delta \phi_0`, the ramp filter in Fourier space
:math:`|k_\mathrm{Dx}|`, and the propagation distance
:math:`(z_{\phi_j}-l_\mathrm{D})`.
Parameters
----------
uSin: (A,N) ndarray
Two-dimensional sinogram of line recordings
:math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D})`
divided by the incident plane wave :math:`u_0(l_\mathrm{D})`
measured at the detector.
angles: (A,) ndarray
Angular positions :math:`\phi_j` of `uSin` in radians.
res: float
Vacuum wavelength of the light :math:`\lambda` in pixels.
nm: float
Refractive index of the surrounding medium :math:`n_\mathrm{m}`.
lD: float
Distance from center of rotation to detector plane
:math:`l_\mathrm{D}` in pixels.
coords: None [(2,M) ndarray]
Computes only the output image at these coordinates. This
keyword is reserved for future versions and is not
implemented yet.
weight_angles: bool
If `True`, weights each backpropagated projection with a factor
proportional to the angular distance between the neighboring
projections.
.. math::
\Delta \phi_0 \longmapsto \Delta \phi_j =
\frac{\phi_{j+1} - \phi_{j-1}}{2}
.. versionadded:: 0.1.1
onlyreal: bool
If `True`, only the real part of the reconstructed image
will be returned. This saves computation time.
padding: bool
Pad the input data to the second next power of 2 before
Fourier transforming. This reduces artifacts and speeds up
the process for input image sizes that are not powers of 2.
padval: float
The value used for padding. This is important for the Rytov
approximation, where an approximate zero in the phase might
translate to 2πi due to the unwrapping algorithm. In that
case, this value should be a multiple of 2πi.
If `padval` is `None`, then the edge values are used for
padding (see documentation of :func:`numpy.pad`).
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
verbose: int
Increment to increase verbosity.
Returns
-------
f: ndarray of shape (N,N), complex if `onlyreal` is `False`
Reconstructed object function :math:`f(\mathbf{r})` as defined
by the Helmholtz equation.
:math:`f(x,z) =
k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)`
See Also
--------
odt_to_ri: conversion of the object function :math:`f(\mathbf{r})`
to refractive index :math:`n(\mathbf{r})`
radontea.backproject: backprojection based on the Fourier slice
theorem
Notes
-----
Do not use the parameter `lD` in combination with the Rytov
approximation - the propagation is not correctly described.
Instead, numerically refocus the sinogram prior to converting
it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`)
with a numerical focusing algorithm (available in the Python
package :py:mod:`nrefocus`). | entailment |
def color(x, y):
"""triangles.
Colors:
- http://paletton.com/#uid=70l150klllletuehUpNoMgTsdcs shade 2
"""
if (x-4) > (y-4) and -(y-4) <= (x-4):
# right
return "#CDB95B"
elif (x-4) > (y-4) and -(y-4) > (x-4):
# top
return "#CD845B"
elif (x-4) <= (y-4) and -(y-4) <= (x-4):
# bottom
return "#57488E"
elif (x-4) <= (y-4) and -(y-4) > (x-4):
# left
return "#3B8772"
# should not happen
return "black" | triangles.
Colors:
- http://paletton.com/#uid=70l150klllletuehUpNoMgTsdcs shade 2 | entailment |
def single_read(self, register):
'''
Reads data from desired register only once.
'''
comm_reg = (0b00010 << 3) + register
if register == self.AD7730_STATUS_REG:
bytes_num = 1
elif register == self.AD7730_DATA_REG:
bytes_num = 3
elif register == self.AD7730_MODE_REG:
bytes_num = 2
elif register == self.AD7730_FILTER_REG:
bytes_num = 3
elif register == self.AD7730_DAC_REG:
bytes_num = 1
elif register == self.AD7730_OFFSET_REG:
bytes_num = 3
elif register == self.AD7730_GAIN_REG:
bytes_num = 3
elif register == self.AD7730_TEST_REG:
bytes_num = 3
command = [comm_reg] + ([0x00] * bytes_num)
spi.SPI_write(self.CS, command)
data = spi.SPI_read(bytes_num + 1)
return data[1:] | Reads data from desired register only once. | entailment |
def getStatus(self):
"""
RDY - Ready Bit. This bit provides the status of the RDY flag from the part. The status and function of this bit is the same as the RDY output pin. A number of events set the RDY bit high as indicated in Table XVIII in datasheet
STDY - Steady Bit. This bit is updated when the filter writes a result to the Data Register. If the filter is
in FASTStep mode (see Filter Register section) and responding to a step input, the STDY bit
remains high as the initial conversion results become available. The RDY output and bit are set
low on these initial conversions to indicate that a result is available. If the STDY is high, however,
it indicates that the result being provided is not from a fully settled second-stage FIR filter. When the
FIR filter has fully settled, the STDY bit will go low coincident with RDY. If the part is never placed
into its FASTStep mode, the STDY bit will go low at the first Data Register read and it is
not cleared by subsequent Data Register reads. A number of events set the STDY bit high as indicated in Table XVIII. STDY is set high along with RDY by all events in the table except a Data Register read.
STBY - Standby Bit. This bit indicates whether the AD7730 is in its Standby Mode or normal mode of
operation. The part can be placed in its standby mode using the STANDBY input pin or by
writing 011 to the MD2 to MD0 bits of the Mode Register. The power-on/reset status of this bit
is 0 assuming the STANDBY pin is high.
NOREF - No Reference Bit. If the voltage between the REF IN(+) and REF IN(-) pins is below 0.3 V, or either of these inputs is open-circuit, the NOREF bit goes to 1. If NOREF is active on completion of a conversion, the Data Register is loaded with all 1s. If NOREF is active on completion of a calibration, updating of the calibration registers is inhibited."""
status = self.single_read(self.AD7730_STATUS_REG)
bits_values = dict([('NOREF',status[0] & 0x10 == 0x10),
('STBY',status[0] & 0x20 == 0x20),
('STDY',status[0] & 0x40 == 0x40),
('RDY',status[0] & 0x80 == 0x80)])
return bits_values | RDY - Ready Bit. This bit provides the status of the RDY flag from the part. The status and function of this bit is the same as the RDY output pin. A number of events set the RDY bit high as indicated in Table XVIII in datasheet
STDY - Steady Bit. This bit is updated when the filter writes a result to the Data Register. If the filter is
in FASTStep mode (see Filter Register section) and responding to a step input, the STDY bit
remains high as the initial conversion results become available. The RDY output and bit are set
low on these initial conversions to indicate that a result is available. If the STDY is high, however,
it indicates that the result being provided is not from a fully settled second-stage FIR filter. When the
FIR filter has fully settled, the STDY bit will go low coincident with RDY. If the part is never placed
into its FASTStep mode, the STDY bit will go low at the first Data Register read and it is
not cleared by subsequent Data Register reads. A number of events set the STDY bit high as indicated in Table XVIII. STDY is set high along with RDY by all events in the table except a Data Register read.
STBY - Standby Bit. This bit indicates whether the AD7730 is in its Standby Mode or normal mode of
operation. The part can be placed in its standby mode using the STANDBY input pin or by
writing 011 to the MD2 to MD0 bits of the Mode Register. The power-on/reset status of this bit
is 0 assuming the STANDBY pin is high.
NOREF - No Reference Bit. If the voltage between the REF IN(+) and REF IN(-) pins is below 0.3 V, or either of these inputs is open-circuit, the NOREF bit goes to 1. If NOREF is active on completion of a conversion, the Data Register is loaded with all 1s. If NOREF is active on completion of a calibration, updating of the calibration registers is inhibited. | entailment |
def setMode(self
,mode
,polarity
,den
,iovalue
,data_length
,reference
,input_range
,clock_enable
,burn_out
,channel):
'''
def setMode(self
,mode = self.AD7730_IDLE_MODE
,polarity = self.AD7730_UNIPOLAR_MODE
,den = self.AD7730_IODISABLE_MODE
,iovalue = 0b00
,data_lenght = self.AD7730_24bitDATA_MODE
,reference = self.AD7730_REFERENCE_5V
,input_range = self.AD7730_40mVIR_MODE
,clock_enable = self.AD7730_MCLK_ENABLE_MODE
,burn_out = self.AD7730_BURNOUT_DISABLE
,channel = self.AD7730_AIN1P_AIN1N
):
'''
mode_MSB = (mode << 5) + (polarity << 4) + (den << 3) + (iovalue << 1) + data_length
mode_LSB = (reference << 7) + (0b0 << 6) + (input_range << 4) + (clock_enable << 3) + (burn_out << 2) + channel
self.single_write(self.AD7730_MODE_REG, [mode_MSB, mode_LSB]) | def setMode(self
,mode = self.AD7730_IDLE_MODE
,polarity = self.AD7730_UNIPOLAR_MODE
,den = self.AD7730_IODISABLE_MODE
,iovalue = 0b00
,data_lenght = self.AD7730_24bitDATA_MODE
,reference = self.AD7730_REFERENCE_5V
,input_range = self.AD7730_40mVIR_MODE
,clock_enable = self.AD7730_MCLK_ENABLE_MODE
,burn_out = self.AD7730_BURNOUT_DISABLE
,channel = self.AD7730_AIN1P_AIN1N
): | entailment |
def downsample(work_dir, sample_name, fastq_left_fpath, fastq_right_fpath, downsample_to, num_pairs=None):
""" get N random headers from a fastq file without reading the
whole thing into memory
modified from: http://www.biostars.org/p/6544/
"""
sample_name = sample_name or splitext(''.join(lc if lc == rc else '' for lc, rc in zip(fastq_left_fpath, fastq_right_fpath)))[0]
l_out_fpath = make_downsampled_fpath(work_dir, fastq_left_fpath)
r_out_fpath = make_downsampled_fpath(work_dir, fastq_right_fpath)
if can_reuse(l_out_fpath, [fastq_left_fpath, fastq_right_fpath]):
return l_out_fpath, r_out_fpath
info('Processing ' + sample_name)
if num_pairs is None:
info(sample_name + ': counting number of reads in fastq...')
num_pairs = _count_records_in_fastq(fastq_left_fpath)
if num_pairs > LIMIT:
info(sample_name + ' the number of reads is higher than ' + str(LIMIT) +
', sampling from only first ' + str(LIMIT))
num_pairs = LIMIT
info(sample_name + ': ' + str(num_pairs) + ' reads')
num_downsample_pairs = int(downsample_to * num_pairs) if isinstance(downsample_to, float) else downsample_to
if num_pairs <= num_downsample_pairs:
info(sample_name + ': and it is less than ' + str(num_downsample_pairs) + ', so no downsampling.')
return fastq_left_fpath, fastq_right_fpath
else:
info(sample_name + ': downsampling to ' + str(num_downsample_pairs))
rand_records = sorted(random.sample(range(num_pairs), num_downsample_pairs))
info('Opening ' + fastq_left_fpath)
fh1 = open_gzipsafe(fastq_left_fpath)
info('Opening ' + fastq_right_fpath)
fh2 = open_gzipsafe(fastq_right_fpath) if fastq_right_fpath else None
out_files = (l_out_fpath, r_out_fpath) if r_out_fpath else (l_out_fpath,)
written_records = 0
with file_transaction(work_dir, out_files) as tx_out_files:
if isinstance(tx_out_files, six.string_types):
tx_out_f1 = tx_out_files
else:
tx_out_f1, tx_out_f2 = tx_out_files
info('Opening ' + str(tx_out_f1) + ' to write')
sub1 = open_gzipsafe(tx_out_f1, "w")
info('Opening ' + str(tx_out_f2) + ' to write')
sub2 = open_gzipsafe(tx_out_f2, "w") if r_out_fpath else None
rec_no = -1
for rr in rand_records:
while rec_no < rr:
rec_no += 1
for i in range(4): fh1.readline()
if fh2:
for i in range(4): fh2.readline()
for i in range(4):
sub1.write(fh1.readline())
if sub2:
sub2.write(fh2.readline())
written_records += 1
if written_records % 10000 == 0:
info(sample_name + ': written ' + str(written_records) + ', rec_no ' + str(rec_no + 1))
if rec_no > num_pairs:
info(sample_name + ' reached the limit of ' + str(num_pairs), ' read lines, stopping.')
break
info(sample_name + ': done, written ' + str(written_records) + ', rec_no ' + str(rec_no))
fh1.close()
sub1.close()
if fastq_right_fpath:
fh2.close()
sub2.close()
info(sample_name + ': done downsampling, saved to ' + l_out_fpath + ' and ' + r_out_fpath + ', total ' + str(written_records) + ' paired reads written')
return l_out_fpath, r_out_fpath | get N random headers from a fastq file without reading the
whole thing into memory
modified from: http://www.biostars.org/p/6544/ | entailment |
def get_hum(self, raw = False):
"""
The physical value RH given above corresponds to the
relative humidity above liquid water according to World
Meteorological Organization (WMO)
"""
self.bus.write_byte(self.address, self.TRIG_RH_noHOLD); # start humidity measurement
time.sleep(0.1)
data = self.bus.read_i2c_block(self.address, 2)
value = data[0]<<8 | data[1]
value &= ~0b11 # trow out status bits
humidity = (-6.0 + 125.0*(value/65536.0))
if raw: # raw sensor output, useful for getting an idea of sensor failure status
return humidity
else:
if humidity > 100.0: # limit values to relevant physical variable (values out of that limit is sensor error state and are dependent on specific sensor piece)
return 100.0
elif humidity < 0.0:
return 0.0
else:
return humidity | The physical value RH given above corresponds to the
relative humidity above liquid water according to World
Meteorological Organization (WMO) | entailment |
def _calculate_checksum(value):
"""4.12 Checksum Calculation from an unsigned short input"""
# CRC
polynomial = 0x131 # //P(x)=x^8+x^5+x^4+1 = 100110001
crc = 0xFF
# calculates 8-Bit checksum with given polynomial
for byteCtr in [ord(x) for x in struct.pack(">H", value)]:
crc ^= byteCtr
for bit in range(8, 0, -1):
if crc & 0x80:
crc = (crc << 1) ^ polynomial
else:
crc = (crc << 1)
return crc | 4.12 Checksum Calculation from an unsigned short input | entailment |
def run(self):
"""Run when button is pressed."""
inside = 0
for draws in range(1, self.data['samples']):
# generate points and check whether they are inside the unit circle
r1, r2 = (random(), random())
if r1 ** 2 + r2 ** 2 < 1.0:
inside += 1
if draws % 1000 != 0:
continue
# debug
yield self.emit('log', {'draws': draws, 'inside': inside})
# calculate pi and its uncertainty given the current draws
p = inside / draws
pi = {
'estimate': 4.0 * inside / draws,
'uncertainty': 4.0 * math.sqrt(draws * p * (1.0 - p)) / draws,
}
# send status to frontend
yield self.set_state(pi=pi)
yield self.emit('log', {'action': 'done'}) | Run when button is pressed. | entailment |
def on(f):
"""Decorator for action handlers.
The action name is inferred from the function name.
This also decorates the method with `tornado.gen.coroutine` so that
`~tornado.concurrent.Future` can be yielded.
"""
action = f.__name__
f.action = action
@wrapt.decorator
@tornado.gen.coroutine
def _execute(wrapped, instance, args, kwargs):
return wrapped(*args, **kwargs)
return _execute(f) | Decorator for action handlers.
The action name is inferred from the function name.
This also decorates the method with `tornado.gen.coroutine` so that
`~tornado.concurrent.Future` can be yielded. | entailment |
def on_action(action):
"""Decorator for action handlers.
:param str action: explicit action name
This also decorates the method with `tornado.gen.coroutine` so that
`~tornado.concurrent.Future` can be yielded.
"""
@wrapt.decorator
@tornado.gen.coroutine
def _execute(wrapped, instance, args, kwargs):
return wrapped(*args, **kwargs)
_execute.action = action
return _execute | Decorator for action handlers.
:param str action: explicit action name
This also decorates the method with `tornado.gen.coroutine` so that
`~tornado.concurrent.Future` can be yielded. | entailment |
def init_datastores(self):
"""Initialize datastores for this analysis instance.
This creates instances of :class:`.Datastore` at ``data`` and
``class_data`` with the datastore domains being the current id
and the class name of this analysis respectively.
Overwrite this method to use other datastore backends.
"""
self.data = Datastore(self.id_)
self.data.subscribe(lambda data: self.emit('data', data))
self.class_data = Datastore(type(self).__name__)
self.class_data.subscribe(lambda data: self.emit('class_data', data)) | Initialize datastores for this analysis instance.
This creates instances of :class:`.Datastore` at ``data`` and
``class_data`` with the datastore domains being the current id
and the class name of this analysis respectively.
Overwrite this method to use other datastore backends. | entailment |
def emit(self, signal, message='__nomessagetoken__'):
"""Emit a signal to the frontend.
:param str signal: name of the signal
:param message: message to send
:returns: return value from frontend emit function
:rtype: tornado.concurrent.Future
"""
# call pre-emit hooks
if signal == 'log':
self.log_backend.info(message)
elif signal == 'warn':
self.log_backend.warn(message)
elif signal == 'error':
self.log_backend.error(message)
return self.emit_to_frontend(signal, message) | Emit a signal to the frontend.
:param str signal: name of the signal
:param message: message to send
:returns: return value from frontend emit function
:rtype: tornado.concurrent.Future | entailment |
def set(self, i, value):
"""Set value at position i and return a Future.
:rtype: tornado.concurrent.Future
"""
value_encoded = encode(value, self.get_change_trigger(i))
if i in self.data and self.data[i] == value_encoded:
return self
self.data[i] = value_encoded
return self.trigger_changed(i) | Set value at position i and return a Future.
:rtype: tornado.concurrent.Future | entailment |
def set(self, key, value):
"""Set a value at key and return a Future.
:rtype: tornado.concurrent.Future
"""
value_encoded = encode(value, self.get_change_trigger(key))
if key in self.data and self.data[key] == value_encoded:
return self
self.data[key] = value_encoded
return self.trigger_changed(key) | Set a value at key and return a Future.
:rtype: tornado.concurrent.Future | entailment |
def update(self, new_data):
"""Update."""
for k, v in new_data.items():
self[k] = v
return self | Update. | entailment |
def trigger_all_change_callbacks(self):
"""Trigger all callbacks that were set with on_change()."""
return [
ret
for key in DatastoreLegacy.store[self.domain].keys()
for ret in self.trigger_change_callbacks(key)
] | Trigger all callbacks that were set with on_change(). | entailment |
def set(self, key, value):
"""Set value at key and return a Future
:rtype: tornado.concurrent.Future
"""
return DatastoreLegacy.store[self.domain].set(key, value) | Set value at key and return a Future
:rtype: tornado.concurrent.Future | entailment |
def init(self, key_value_pairs):
"""Initialize datastore.
Only sets values for keys that are not in the datastore already.
:param dict key_value_pairs:
A set of key value pairs to use to initialize the datastore.
"""
for k, v in key_value_pairs.items():
if k not in DatastoreLegacy.store[self.domain]:
DatastoreLegacy.store[self.domain][k] = v | Initialize datastore.
Only sets values for keys that are not in the datastore already.
:param dict key_value_pairs:
A set of key value pairs to use to initialize the datastore. | entailment |
def close(self):
"""Close and delete instance."""
# remove callbacks
DatastoreLegacy.datastores[self.domain].remove(self)
# delete data after the last instance is gone
if self.release_storage and \
not DatastoreLegacy.datastores[self.domain]:
del DatastoreLegacy.store[self.domain]
del self | Close and delete instance. | entailment |
def AddIndex(node, index=None):
"""
Recursively add the current index (with respect to a repeated section) in all
data dictionaries.
"""
if isinstance(node, list):
for i, item in enumerate(node):
AddIndex(item, index=i)
elif isinstance(node, dict):
if index is not None:
node['index'] = index
for key in node:
AddIndex(node[key]) | Recursively add the current index (with respect to a repeated section) in all
data dictionaries. | entailment |
def watch():
"""Renerate documentation when it changes."""
# Start with a clean build
sphinx_build['-b', 'html', '-E', 'docs', 'docs/_build/html'] & FG
handler = ShellCommandTrick(
shell_command='sphinx-build -b html docs docs/_build/html',
patterns=['*.rst', '*.py'],
ignore_patterns=['_build/*'],
ignore_directories=['.tox'],
drop_during_process=True)
observer = Observer()
observe_with(observer, handler, pathnames=['.'], recursive=True) | Renerate documentation when it changes. | entailment |
def gen(skipdirhtml=False):
"""Generate html and dirhtml output."""
docs_changelog = 'docs/changelog.rst'
check_git_unchanged(docs_changelog)
pandoc('--from=markdown', '--to=rst', '--output=' + docs_changelog, 'CHANGELOG.md')
if not skipdirhtml:
sphinx_build['-b', 'dirhtml', '-W', '-E', 'docs', 'docs/_build/dirhtml'] & FG
sphinx_build['-b', 'html', '-W', '-E', 'docs', 'docs/_build/html'] & FG | Generate html and dirhtml output. | entailment |
def __reset_crosshair(self):
"""
redraw the cross-hair on the horizontal slice plot
Parameters
----------
x: int
the x image coordinate
y: int
the y image coordinate
Returns
-------
"""
self.lhor.set_ydata(self.y_coord)
self.lver.set_xdata(self.x_coord) | redraw the cross-hair on the horizontal slice plot
Parameters
----------
x: int
the x image coordinate
y: int
the y image coordinate
Returns
------- | entailment |
def __init_vertical_plot(self):
"""
set up the vertical profile plot
Returns
-------
"""
# clear the plot if lines have already been drawn on it
if len(self.ax2.lines) > 0:
self.ax2.cla()
# set up the vertical profile plot
self.ax2.set_ylabel(self.datalabel, fontsize=self.fontsize)
self.ax2.set_xlabel(self.spectrumlabel, fontsize=self.fontsize)
self.ax2.set_title('vertical point profiles', fontsize=self.fontsize)
self.ax2.set_xlim([1, self.bands])
# plot vertical line at the slider position
self.vline = self.ax2.axvline(self.slider.value, color='black') | set up the vertical profile plot
Returns
------- | entailment |
def __onclick(self, event):
"""
respond to mouse clicks in the plot.
This function responds to clicks on the first (horizontal slice) plot and updates the vertical profile and
slice plots
Parameters
----------
event: matplotlib.backend_bases.MouseEvent
the click event object containing image coordinates
"""
# only do something if the first plot has been clicked on
if event.inaxes == self.ax1:
# retrieve the click coordinates
self.x_coord = event.xdata
self.y_coord = event.ydata
# redraw the cross-hair
self.__reset_crosshair()
x, y = self.__map2img(self.x_coord, self.y_coord)
subset_vertical = self.__read_timeseries(x, y)
# redraw/clear the vertical profile plot in case stacking is disabled
if not self.checkbox.value:
self.__init_vertical_plot()
# plot the vertical profile
label = 'x: {0:03}; y: {1:03}'.format(x, y)
self.ax2.plot(self.timestamps, subset_vertical, label=label)
self.ax2_legend = self.ax2.legend(loc=0, prop={'size': 7}, markerscale=1) | respond to mouse clicks in the plot.
This function responds to clicks on the first (horizontal slice) plot and updates the vertical profile and
slice plots
Parameters
----------
event: matplotlib.backend_bases.MouseEvent
the click event object containing image coordinates | entailment |
def LookupChain(lookup_func_list):
"""Returns a *function* suitable for passing as the more_formatters argument
to Template.
NOTE: In Java, this would be implemented using the 'Composite' pattern. A
*list* of formatter lookup function behaves the same as a *single* formatter
lookup funcion.
Note the distinction between formatter *lookup* functions and formatter
functions here.
"""
def MoreFormatters(formatter_name):
for lookup_func in lookup_func_list:
formatter_func = lookup_func(formatter_name)
if formatter_func is not None:
return formatter_func
return MoreFormatters | Returns a *function* suitable for passing as the more_formatters argument
to Template.
NOTE: In Java, this would be implemented using the 'Composite' pattern. A
*list* of formatter lookup function behaves the same as a *single* formatter
lookup funcion.
Note the distinction between formatter *lookup* functions and formatter
functions here. | entailment |
def PythonPercentFormat(format_str):
"""Use Python % format strings as template format specifiers."""
if format_str.startswith('printf '):
fmt = format_str[len('printf '):]
return lambda value: fmt % value
else:
return None | Use Python % format strings as template format specifiers. | entailment |
def Plural(format_str):
"""Returns whether the value should be considered a plural value.
Integers greater than 1 are plural, and lists with length greater than one are
too.
"""
if format_str.startswith('plural?'):
i = len('plural?')
try:
splitchar = format_str[i] # Usually a space, but could be something else
_, plural_val, singular_val = format_str.split(splitchar)
except IndexError:
raise Error('plural? must have exactly 2 arguments')
def Formatter(value):
plural = False
if isinstance(value, int) and value > 1:
plural = True
if isinstance(value, list) and len(value) > 1:
plural = True
if plural:
return plural_val
else:
return singular_val
return Formatter
else:
return None | Returns whether the value should be considered a plural value.
Integers greater than 1 are plural, and lists with length greater than one are
too. | entailment |
def get_merged_cds(genome):
"""
Returns all CDS merged, used:
- for TargQC general reports CDS coverage statistics for WGS
- for Seq2C CNV calling when no capture BED available
"""
bed = get_all_features(genome)
debug('Filtering BEDTool for high confidence CDS and stop codons')
return bed\
.filter(lambda r: r.fields[BedCols.FEATURE] in ['CDS', 'stop_codon'])\
.filter(high_confidence_filter)\
.merge() | Returns all CDS merged, used:
- for TargQC general reports CDS coverage statistics for WGS
- for Seq2C CNV calling when no capture BED available | entailment |
def _get(relative_path, genome=None):
"""
:param relative_path: relative path of the file inside the repository
:param genome: genome name. Can contain chromosome name after comma, like hg19-chr20,
in case of BED, the returning BedTool will be with added filter.
:return: BedTools object if it's a BED file, or filepath
"""
chrom = None
if genome:
if '-chr' in genome:
genome, chrom = genome.split('-')
check_genome(genome)
relative_path = relative_path.format(genome=genome)
path = abspath(join(dirname(__file__), relative_path))
if not isfile(path) and isfile(path + '.gz'):
path += '.gz'
if path.endswith('.bed') or path.endswith('.bed.gz'):
if path.endswith('.bed.gz'):
bedtools = which('bedtools')
if not bedtools:
critical('bedtools not found in PATH: ' + str(os.environ['PATH']))
debug('BED is compressed, creating BedTool')
bed = BedTool(path)
else:
debug('BED is uncompressed, creating BedTool')
bed = BedTool(path)
if chrom:
debug('Filtering BEDTool for chrom ' + chrom)
bed = bed.filter(lambda r: r.chrom == chrom)
return bed
else:
return path | :param relative_path: relative path of the file inside the repository
:param genome: genome name. Can contain chromosome name after comma, like hg19-chr20,
in case of BED, the returning BedTool will be with added filter.
:return: BedTools object if it's a BED file, or filepath | entailment |
def run(cmd, output_fpath=None, input_fpath=None, checks=None, stdout_to_outputfile=True,
stdout_tx=True, reuse=False, env_vars=None):
"""Run the provided command, logging details and checking for errors.
"""
if output_fpath and reuse:
if verify_file(output_fpath, silent=True):
info(output_fpath + ' exists, reusing')
return output_fpath
if not output_fpath.endswith('.gz') and verify_file(output_fpath + '.gz', silent=True):
info(output_fpath + '.gz exists, reusing')
return output_fpath
env = os.environ.copy()
if env_vars:
for k, v in env_vars.items():
if v is None:
if k in env:
del env[k]
else:
env[k] = v
if checks is None:
checks = [file_nonempty_check]
def _try_run(_cmd, _output_fpath, _input_fpath):
try:
info(' '.join(str(x) for x in _cmd) if not isinstance(_cmd, six.string_types) else _cmd)
_do_run(_cmd, checks, env, _output_fpath, _input_fpath)
except:
raise
if output_fpath:
if isfile(output_fpath):
os.remove(output_fpath)
if output_fpath:
if stdout_tx:
with file_transaction(None, output_fpath) as tx_out_file:
if stdout_to_outputfile:
cmd += ' > ' + tx_out_file
else:
cmd += '\n'
cmd = cmd.replace(' ' + output_fpath + ' ', ' ' + tx_out_file + ' ') \
.replace(' "' + output_fpath + '" ', ' ' + tx_out_file + '" ') \
.replace(' \'' + output_fpath + '\' ', ' ' + tx_out_file + '\' ') \
.replace(' ' + output_fpath + '\n', ' ' + tx_out_file) \
.replace(' "' + output_fpath + '"\n', ' ' + tx_out_file + '"') \
.replace(' \'' + output_fpath + '\'\n', ' ' + tx_out_file + '\'') \
.replace('\n', '')
_try_run(cmd, tx_out_file, input_fpath)
else:
_try_run(cmd, output_fpath, input_fpath)
else:
_try_run(cmd, None, input_fpath) | Run the provided command, logging details and checking for errors. | entailment |
def _correct_qualimap_genome_results(samples):
""" fixing java.lang.Double.parseDouble error on entries like "6,082.49"
"""
for s in samples:
if verify_file(s.qualimap_genome_results_fpath):
correction_is_needed = False
with open(s.qualimap_genome_results_fpath, 'r') as f:
content = f.readlines()
metrics_started = False
for line in content:
if ">> Reference" in line:
metrics_started = True
if metrics_started:
if line.find(',') != -1:
correction_is_needed = True
break
if correction_is_needed:
with open(s.qualimap_genome_results_fpath, 'w') as f:
metrics_started = False
for line in content:
if ">> Reference" in line:
metrics_started = True
if metrics_started:
if line.find(',') != -1:
line = line.replace(',', '')
f.write(line) | fixing java.lang.Double.parseDouble error on entries like "6,082.49" | entailment |
def _correct_qualimap_insert_size_histogram(work_dir, samples):
""" replacing Qualimap insert size histogram with Picard one.
"""
for s in samples:
qualimap1_dirname = dirname(s.qualimap_ins_size_hist_fpath).replace('raw_data_qualimapReport', 'raw_data')
qualimap2_dirname = dirname(s.qualimap_ins_size_hist_fpath)
if exists(qualimap1_dirname):
if not exists(qualimap2_dirname):
shutil.move(qualimap1_dirname, qualimap2_dirname)
else:
shutil.rmtree(qualimap1_dirname)
elif not exists(qualimap2_dirname):
continue # no data from both Qualimap v.1 and Qualimap v.2
# if qualimap histogram exits and reuse_intermediate, skip
if verify_file(s.qualimap_ins_size_hist_fpath, silent=True) and tc.reuse_intermediate:
pass
else:
if verify_file(s.picard_ins_size_hist_txt_fpath):
with open(s.picard_ins_size_hist_txt_fpath, 'r') as picard_f:
one_line_to_stop = False
for line in picard_f:
if one_line_to_stop:
break
if line.startswith('## HISTOGRAM'):
one_line_to_stop = True
with file_transaction(work_dir, s.qualimap_ins_size_hist_fpath) as tx:
with open(tx, 'w') as qualimap_f:
for line in picard_f:
qualimap_f.write(line) | replacing Qualimap insert size histogram with Picard one. | entailment |
def norm_vec(vector):
"""Normalize the length of a vector to one"""
assert len(vector) == 3
v = np.array(vector)
return v/np.sqrt(np.sum(v**2)) | Normalize the length of a vector to one | entailment |
def rotate_points_to_axis(points, axis):
"""Rotate all points of a list, such that `axis==[0,1,0]`
This is accomplished by rotating in the x-z-plane by phi into the
y-z-plane, then rotation in the y-z-plane by theta up to [0,1,0],
and finally rotating back in the x-z-plane by -phi.
Parameters
----------
points: list-like with elements of length 3
The Cartesian points. These should be in the same format as
produced by `sphere_points_from_angles_and_tilt`.
axis: list-like, length 3
The reference axis that will be used to determine the
rotation angle of the points. The points will be rotated
about the origin such that `axis` matches [0,1,0].
Returns
-------
rotated_points: np.ndarray of shape (N,3)
The rotated points.
"""
axis = norm_vec(axis)
u, v, w = axis
points = np.array(points)
# Determine the rotational angle in the x-z plane
phi = np.arctan2(u, w)
# Determine the tilt angle w.r.t. the y-axis
theta = np.arccos(v)
# Negative rotation about y-axis
Rphi = np.array([
[np.cos(phi), 0, -np.sin(phi)],
[0, 1, 0],
[np.sin(phi), 0, np.cos(phi)],
])
# Negative rotation about x-axis
Rtheta = np.array([
[1, 0, 0],
[0, np.cos(theta), np.sin(theta)],
[0, -np.sin(theta), np.cos(theta)],
])
DR1 = np.dot(Rtheta, Rphi)
# Rotate back by -phi such that effective rotation was only
# towards [0,1,0].
DR = np.dot(Rphi.T, DR1)
rotpoints = np.zeros((len(points), 3))
for ii, pnt in enumerate(points):
rotpoints[ii] = np.dot(DR, pnt)
# For visualiztaion:
# import matplotlib.pylab as plt
# from mpl_toolkits.mplot3d import Axes3D
# from matplotlib.patches import FancyArrowPatch
# from mpl_toolkits.mplot3d import proj3d
#
# class Arrow3D(FancyArrowPatch):
# def __init__(self, xs, ys, zs, *args, **kwargs):
# FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
# self._verts3d = xs, ys, zs
#
# def draw(self, renderer):
# xs3d, ys3d, zs3d = self._verts3d
# xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
# self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
# FancyArrowPatch.draw(self, renderer)
#
# fig = plt.figure(figsize=(10,10))
# ax = fig.add_subplot(111, projection='3d')
# for vec in rotpoints:
# u,v,w = vec
# a = Arrow3D([0,u],[0,v],[0,w],
# mutation_scale=20, lw=1, arrowstyle="-|>")
# ax.add_artist(a)
#
# radius=1
# ax.set_xlabel('X')
# ax.set_ylabel('Y')
# ax.set_zlabel('Z')
# ax.set_xlim(-radius*1.5, radius*1.5)
# ax.set_ylim(-radius*1.5, radius*1.5)
# ax.set_zlim(-radius*1.5, radius*1.5)
# plt.tight_layout()
# plt.show()
return rotpoints | Rotate all points of a list, such that `axis==[0,1,0]`
This is accomplished by rotating in the x-z-plane by phi into the
y-z-plane, then rotation in the y-z-plane by theta up to [0,1,0],
and finally rotating back in the x-z-plane by -phi.
Parameters
----------
points: list-like with elements of length 3
The Cartesian points. These should be in the same format as
produced by `sphere_points_from_angles_and_tilt`.
axis: list-like, length 3
The reference axis that will be used to determine the
rotation angle of the points. The points will be rotated
about the origin such that `axis` matches [0,1,0].
Returns
-------
rotated_points: np.ndarray of shape (N,3)
The rotated points. | entailment |
def rotation_matrix_from_point(point, ret_inv=False):
"""Compute rotation matrix to go from [0,0,1] to `point`.
First, the matrix rotates to in the polar direction. Then,
a rotation about the y-axis is performed to match the
azimuthal angle in the x-z-plane.
This rotation matrix is required for the correct 3D orientation
of the backpropagated projections.
Parameters
----------
points: list-like, length 3
The coordinates of the point in 3D.
ret_inv: bool
Also return the inverse of the rotation matrix. The inverse
is required for :func:`scipy.ndimage.interpolation.affine_transform`
which maps the output coordinates to the input coordinates.
Returns
-------
Rmat [, Rmat_inv]: 3x3 ndarrays
The rotation matrix that rotates [0,0,1] to `point` and
optionally its inverse.
"""
x, y, z = point
# azimuthal angle
phi = np.arctan2(x, z)
# angle in polar direction (negative)
theta = -np.arctan2(y, np.sqrt(x**2+z**2))
# Rotation in polar direction
Rtheta = np.array([
[1, 0, 0],
[0, np.cos(theta), -np.sin(theta)],
[0, np.sin(theta), np.cos(theta)],
])
# rotation in x-z-plane
Rphi = np.array([
[np.cos(phi), 0, -np.sin(phi)],
[0, 1, 0],
[np.sin(phi), 0, np.cos(phi)],
])
D = np.dot(Rphi, Rtheta)
# The inverse of D
Dinv = np.dot(Rtheta.T, Rphi.T)
if ret_inv:
return D, Dinv
else:
return D | Compute rotation matrix to go from [0,0,1] to `point`.
First, the matrix rotates to in the polar direction. Then,
a rotation about the y-axis is performed to match the
azimuthal angle in the x-z-plane.
This rotation matrix is required for the correct 3D orientation
of the backpropagated projections.
Parameters
----------
points: list-like, length 3
The coordinates of the point in 3D.
ret_inv: bool
Also return the inverse of the rotation matrix. The inverse
is required for :func:`scipy.ndimage.interpolation.affine_transform`
which maps the output coordinates to the input coordinates.
Returns
-------
Rmat [, Rmat_inv]: 3x3 ndarrays
The rotation matrix that rotates [0,0,1] to `point` and
optionally its inverse. | entailment |
def rotation_matrix_from_point_planerot(point, plane_angle, ret_inv=False):
"""
Compute rotation matrix to go from [0,0,1] to `point`,
while taking into account the tilted axis of rotation.
First, the matrix rotates to in the polar direction. Then,
a rotation about the y-axis is performed to match the
azimuthal angle in the x-z-plane.
This rotation matrix is required for the correct 3D orientation
of the backpropagated projections.
Parameters
----------
points: list-like, length 3
The coordinates of the point in 3D.
axis: list-like, length 3
The coordinates of the point in 3D.
ret_inv: bool
Also return the inverse of the rotation matrix. The inverse
is required for :func:`scipy.ndimage.interpolation.affine_transform`
which maps the output coordinates to the input coordinates.
Returns
-------
Rmat [, Rmat_inv]: 3x3 ndarrays
The rotation matrix that rotates [0,0,1] to `point` and
optionally its inverse.
"""
# These matrices are correct if there is no tilt of the
# rotational axis within the detector plane (x-y).
D, Dinv = rotation_matrix_from_point(point, ret_inv=True)
# We need an additional rotation about the z-axis to correct
# for the tilt for all the the other cases.
angz = plane_angle
Rz = np.array([
[np.cos(angz), -np.sin(angz), 0],
[np.sin(angz), np.cos(angz), 0],
[0, 0, 1],
])
DR = np.dot(D, Rz)
DRinv = np.dot(Rz.T, Dinv)
if ret_inv:
return DR, DRinv
else:
return DR | Compute rotation matrix to go from [0,0,1] to `point`,
while taking into account the tilted axis of rotation.
First, the matrix rotates to in the polar direction. Then,
a rotation about the y-axis is performed to match the
azimuthal angle in the x-z-plane.
This rotation matrix is required for the correct 3D orientation
of the backpropagated projections.
Parameters
----------
points: list-like, length 3
The coordinates of the point in 3D.
axis: list-like, length 3
The coordinates of the point in 3D.
ret_inv: bool
Also return the inverse of the rotation matrix. The inverse
is required for :func:`scipy.ndimage.interpolation.affine_transform`
which maps the output coordinates to the input coordinates.
Returns
-------
Rmat [, Rmat_inv]: 3x3 ndarrays
The rotation matrix that rotates [0,0,1] to `point` and
optionally its inverse. | entailment |
def sphere_points_from_angles_and_tilt(angles, tilted_axis):
"""
For a given tilt of the rotational axis `tilted_axis`, compute
the points on a unit sphere that correspond to the distribution
`angles` along the great circle about this axis.
Parameters
----------
angles: 1d ndarray
The angles that will be distributed on the great circle.
tilted_axis: list of length 3
The tilted axis of rotation that determines the great
circle.
Notes
-----
The reference axis is always [0,1,0].
`theta` is the azimuthal angle measured down from the y-axis.
`phi` is the polar angle in the x-z plane measured from z towards x.
"""
assert len(angles.shape) == 1
# Normalize tilted axis.
tilted_axis = norm_vec(tilted_axis)
[u, v, w] = tilted_axis
# Initial distribution of points about great circle (x-z).
newang = np.zeros((angles.shape[0], 3), dtype=float)
# We subtract angles[0], because in step (a) we want that
# newang[0]==[0,0,1]. This only works if we actually start
# at that point.
newang[:, 0] = np.sin(angles-angles[0])
newang[:, 2] = np.cos(angles-angles[0])
# Compute rotational angles w.r.t. [0,1,0].
# - Draw a unit sphere with the y-axis pointing up and the
# z-axis pointing right
# - The rotation of `tilted_axis` can be described by two
# separate rotations. We will use these two angles:
# (a) Rotation from y=1 within the y-z plane: theta
# This is the rotation that is critical for data
# reconstruction. If this angle is zero, then we
# have a rotational axis in the imaging plane. If
# this angle is PI/2, then our sinogram consists
# of a rotating image and 3D reconstruction is
# impossible. This angle is counted from the y-axis
# onto the x-z plane.
# (b) Rotation in the x-z plane: phi
# This angle is responsible for matching up the angles
# with the correct sinogram images. If this angle is zero,
# then the projection of the rotational axis onto the
# x-y plane is aligned with the y-axis. If this angle is
# PI/2, then the axis and its projection onto the x-y
# plane are identical. This angle is counted from the
# positive z-axis towards the positive x-axis. By default,
# angles[0] is the point that touches the great circle
# that lies in the x-z plane. angles[1] is the next point
# towards the x-axis if phi==0.
# (a) This angle is the azimuthal angle theta measured from the
# y-axis.
theta = np.arccos(v)
# (b) This is the polar angle measured in the x-z plane starting
# at the x-axis and measured towards the positive z-axis.
if np.allclose(u, 0) and np.allclose(w, 0):
# Avoid flipping the axis of rotation due to numerical
# errors during its computation.
phi = 0
else:
phi = np.arctan2(u, w)
# Determine the projection points on the unit sphere.
# The resulting circle meets the x-z-plane at phi, and
# is tilted by theta w.r.t. the y-axis.
# (a) Create a tilted data set. This is achieved in 3 steps.
# a1) Determine radius of tilted circle and get the centered
# circle with a smaller radius.
rtilt = np.cos(theta)
newang *= rtilt
# a2) Rotate this circle about the x-axis by theta
# (right-handed/counter-clockwise/basic/elemental rotation)
Rx = np.array([
[1, 0, 0],
[0, np.cos(theta), -np.sin(theta)],
[0, np.sin(theta), np.cos(theta)]
])
for ii in range(newang.shape[0]):
newang[ii] = np.dot(Rx, newang[ii])
# a3) Shift newang such that newang[0] is located at (0,0,1)
newang = newang - (newang[0] - np.array([0, 0, 1])).reshape(1, 3)
# (b) Rotate the entire thing with phi about the y-axis
# (right-handed/counter-clockwise/basic/elemental rotation)
Ry = np.array([
[+np.cos(phi), 0, np.sin(phi)],
[0, 1, 0],
[-np.sin(phi), 0, np.cos(phi)]
])
for jj in range(newang.shape[0]):
newang[jj] = np.dot(Ry, newang[jj])
# For visualiztaion:
# import matplotlib.pylab as plt
# from mpl_toolkits.mplot3d import Axes3D
# from matplotlib.patches import FancyArrowPatch
# from mpl_toolkits.mplot3d import proj3d
#
# class Arrow3D(FancyArrowPatch):
# def __init__(self, xs, ys, zs, *args, **kwargs):
# FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
# self._verts3d = xs, ys, zs
#
# def draw(self, renderer):
# xs3d, ys3d, zs3d = self._verts3d
# xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
# self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
# FancyArrowPatch.draw(self, renderer)
#
# fig = plt.figure(figsize=(10,10))
# ax = fig.add_subplot(111, projection='3d')
# for vec in newang:
# u,v,w = vec
# a = Arrow3D([0,u],[0,v],[0,w],
# mutation_scale=20, lw=1, arrowstyle="-|>")
# ax.add_artist(a)
#
# radius=1
# ax.set_xlabel('X')
# ax.set_ylabel('Y')
# ax.set_zlabel('Z')
# ax.set_xlim(-radius*1.5, radius*1.5)
# ax.set_ylim(-radius*1.5, radius*1.5)
# ax.set_zlim(-radius*1.5, radius*1.5)
# plt.tight_layout()
# plt.show()
return newang | For a given tilt of the rotational axis `tilted_axis`, compute
the points on a unit sphere that correspond to the distribution
`angles` along the great circle about this axis.
Parameters
----------
angles: 1d ndarray
The angles that will be distributed on the great circle.
tilted_axis: list of length 3
The tilted axis of rotation that determines the great
circle.
Notes
-----
The reference axis is always [0,1,0].
`theta` is the azimuthal angle measured down from the y-axis.
`phi` is the polar angle in the x-z plane measured from z towards x. | entailment |
def backpropagate_3d_tilted(uSin, angles, res, nm, lD=0,
tilted_axis=[0, 1, 0],
coords=None, weight_angles=True, onlyreal=False,
padding=(True, True), padfac=1.75, padval=None,
intp_order=2, dtype=None,
num_cores=_ncores,
save_memory=False,
copy=True,
count=None, max_count=None,
verbose=0):
r"""3D backpropagation with a tilted axis of rotation
Three-dimensional diffraction tomography reconstruction
algorithm for scattering of a plane wave
:math:`u_0(\mathbf{r}) = u_0(x,y,z)`
by a dielectric object with refractive index
:math:`n(x,y,z)`.
This method implements the 3D backpropagation algorithm with
a rotational axis that is tilted by :math:`\theta_\mathrm{tilt}`
w.r.t. the imaging plane :cite:`Mueller2015tilted`.
.. math::
f(\mathbf{r}) =
-\frac{i k_\mathrm{m}}{2\pi}
\sum_{j=1}^{N} \! \Delta \phi_0 D_{-\phi_j}^\mathrm{tilt} \!\!
\left \{
\text{FFT}^{-1}_{\mathrm{2D}}
\left \{
\left| k_\mathrm{Dx} \cdot \cos \theta_\mathrm{tilt}\right|
\frac{\text{FFT}_{\mathrm{2D}} \left \{
u_{\mathrm{B},\phi_j}(x_\mathrm{D}, y_\mathrm{D}) \right \}}
{u_0(l_\mathrm{D})}
\exp \! \left[i k_\mathrm{m}(M - 1) \cdot
(z_{\phi_j}-l_\mathrm{D}) \right]
\right \}
\right \}
with a modified rotational operator :math:`D_{-\phi_j}^\mathrm{tilt}`
and a different filter in Fourier space
:math:`|k_\mathrm{Dx} \cdot \cos \theta_\mathrm{tilt}|` when compared
to :func:`backpropagate_3d`.
.. versionadded:: 0.1.2
Parameters
----------
uSin: (A, Ny, Nx) ndarray
Three-dimensional sinogram of plane recordings
:math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D}, y_\mathrm{D})`
divided by the incident plane wave :math:`u_0(l_\mathrm{D})`
measured at the detector.
angles: ndarray of shape (A,3) or 1D array of length A
If the shape is (A,3), then `angles` consists of vectors
on the unit sphere that correspond to the direction
of illumination and acquisition (s₀). If the shape is (A,),
then `angles` is a one-dimensional array of angles in radians
that determines the angular position :math:`\phi_j`.
In both cases, `tilted_axis` must be set according to the
tilt of the rotational axis.
res: float
Vacuum wavelength of the light :math:`\lambda` in pixels.
nm: float
Refractive index of the surrounding medium :math:`n_\mathrm{m}`.
lD: float
Distance from center of rotation to detector plane
:math:`l_\mathrm{D}` in pixels.
tilted_axis: list of floats
The coordinates [x, y, z] on a unit sphere representing the
tilted axis of rotation. The default is (0,1,0),
which corresponds to a rotation about the y-axis and
follows the behavior of :func:`odtbrain.backpropagate_3d`.
coords: None [(3, M) ndarray]
Only compute the output image at these coordinates. This
keyword is reserved for future versions and is not
implemented yet.
weight_angles: bool
If `True`, weights each backpropagated projection with a factor
proportional to the angular distance between the neighboring
projections.
.. math::
\Delta \phi_0 \longmapsto \Delta \phi_j =
\frac{\phi_{j+1} - \phi_{j-1}}{2}
This currently only works when `angles` has the shape (A,).
onlyreal: bool
If `True`, only the real part of the reconstructed image
will be returned. This saves computation time.
padding: tuple of bool
Pad the input data to the second next power of 2 before
Fourier transforming. This reduces artifacts and speeds up
the process for input image sizes that are not powers of 2.
The default is padding in x and y: `padding=(True, True)`.
For padding only in x-direction (e.g. for cylindrical
symmetries), set `padding` to `(True, False)`. To turn off
padding, set it to `(False, False)`.
padfac: float
Increase padding size of the input data. A value greater
than one will trigger padding to the second-next power of
two. For example, a value of 1.75 will lead to a padded
size of 256 for an initial size of 144, whereas for it will
lead to a padded size of 512 for an initial size of 150.
Values geater than 2 are allowed. This parameter may
greatly increase memory usage!
padval: float
The value used for padding. This is important for the Rytov
approximation, where an approximat zero in the phase might
translate to 2πi due to the unwrapping algorithm. In that
case, this value should be a multiple of 2πi.
If `padval` is `None`, then the edge values are used for
padding (see documentation of :func:`numpy.pad`).
intp_order: int between 0 and 5
Order of the interpolation for rotation.
See :func:`scipy.ndimage.interpolation.affine_transform` for details.
dtype: dtype object or argument for :func:`numpy.dtype`
The data type that is used for calculations (float or double).
Defaults to `numpy.float_`.
num_cores: int
The number of cores to use for parallel operations. This value
defaults to the number of cores on the system.
save_memory: bool
Saves memory at the cost of longer computation time.
.. versionadded:: 0.1.5
copy: bool
Copy input sinogram `uSin` for data processing. If `copy`
is set to `False`, then `uSin` will be overridden.
.. versionadded:: 0.1.5
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
verbose: int
Increment to increase verbosity.
Returns
-------
f: ndarray of shape (Nx, Ny, Nx), complex if `onlyreal==False`
Reconstructed object function :math:`f(\mathbf{r})` as defined
by the Helmholtz equation.
:math:`f(x,z) =
k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)`
See Also
--------
odt_to_ri: conversion of the object function :math:`f(\mathbf{r})`
to refractive index :math:`n(\mathbf{r})`
Notes
-----
This implementation can deal with projection angles that are not
distributed along a circle about the rotational axis. If there are
slight deviations from this circle, simply pass the 3D rotational
positions instead of the 1D angles to the `angles` argument. In
principle, this should improve the reconstruction. The general
problem here is that the backpropagation algorithm requires a
ramp filter in Fourier space that is oriented perpendicular to the
rotational axis. If the sample does not rotate about a single axis,
then a 1D parametric representation of this rotation must be found
to correctly determine the filter in Fourier space. Such a
parametric representation could e.g. be a spiral between the poles
of the unit sphere (but this kind of rotation is probably difficult
to implement experimentally).
If you have input images with rectangular shape, e.g. Nx!=Ny and
the rotational axis deviates by approximately PI/2 from the axis
(0,1,0), then data might get cropped in the reconstruction volume.
You can avoid that by rotating your input data and the rotational
axis by PI/2. For instance, change`tilted_axis` from [1,0,0] to
[0,1,0] and `np.rot90` the sinogram images.
Do not use the parameter `lD` in combination with the Rytov
approximation - the propagation is not correctly described.
Instead, numerically refocus the sinogram prior to converting
it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`)
with a numerical focusing algorithm (available in the Python
package :py:mod:`nrefocus`).
"""
A = angles.shape[0]
if angles.shape not in [(A,), (A, 1), (A, 3)]:
raise ValueError("`angles` must have shape (A,) or (A,3)!")
if len(uSin.shape) != 3:
raise ValueError("Input data `uSin` must have shape (A,Ny,Nx).")
if len(uSin) != A:
raise ValueError("`len(angles)` must be equal to `len(uSin)`.")
if len(list(padding)) != 2:
raise ValueError("`padding` must be boolean tuple of length 2!")
if np.array(padding).dtype is not np.dtype(bool):
raise ValueError("Parameter `padding` must be boolean tuple.")
if coords is not None:
raise NotImplementedError("Setting coordinates is not yet supported.")
if num_cores > _ncores:
raise ValueError("`num_cores` must not exceed number "
+ "of physical cores: {}".format(_ncores))
# setup dtype
if dtype is None:
dtype = np.float_
dtype = np.dtype(dtype)
if dtype.name not in ["float32", "float64"]:
raise ValueError("dtype must be float32 or float64!")
dtype_complex = np.dtype("complex{}".format(
2 * int(dtype.name.strip("float"))))
# progess monitoring
if max_count is not None:
max_count.value += A + 2
ne.set_num_threads(num_cores)
uSin = np.array(uSin, copy=copy)
angles = np.array(angles, copy=copy)
angles = np.squeeze(angles) # support shape (A,1)
# lengths of the input data
lny, lnx = uSin.shape[1], uSin.shape[2]
ln = lnx
# We perform zero-padding before performing the Fourier transform.
# This gets rid of artifacts due to false periodicity and also
# speeds up Fourier transforms of the input image size is not
# a power of 2.
orderx = np.int(max(64., 2**np.ceil(np.log(lnx * padfac) / np.log(2))))
ordery = np.int(max(64., 2**np.ceil(np.log(lny * padfac) / np.log(2))))
if padding[0]:
padx = orderx - lnx
else:
padx = 0
if padding[1]:
pady = ordery - lny
else:
pady = 0
padyl = np.int(np.ceil(pady / 2))
padyr = pady - padyl
padxl = np.int(np.ceil(padx / 2))
padxr = padx - padxl
# zero-padded length of sinogram.
lNx, lNy = lnx + padx, lny + pady
lNz = ln
if verbose > 0:
print("......Image size (x,y): {}x{}, padded: {}x{}".format(
lnx, lny, lNx, lNy))
# `tilted_axis` is required for several things:
# 1. the filter |kDx*v + kDy*u| with (u,v,w)==tilted_axis
# 2. the alignment of the rotational axis with the y-axis
# 3. the determination of the point coordinates if only
# angles in radians are given.
# For (1) we need the exact axis that corresponds to our input data.
# For (2) and (3) we need `tilted_axis_yz` (see below) which is the
# axis `tilted_axis` rotated in the detector plane such that its
# projection onto the detector coincides with the y-axis.
# Normalize input axis
tilted_axis = norm_vec(tilted_axis)
# `tilted_axis_yz` is computed by performing the inverse rotation in
# the x-y plane with `angz`. We will again use `angz` in the transform
# within the for-loop to rotate each projection according to its
# acquisition angle.
angz = np.arctan2(tilted_axis[0], tilted_axis[1])
rotmat = np.array([
[np.cos(angz), -np.sin(angz), 0],
[np.sin(angz), np.cos(angz), 0],
[0, 0, 1],
])
# rotate `tilted_axis` onto the y-z plane.
tilted_axis_yz = norm_vec(np.dot(rotmat, tilted_axis))
if len(angles.shape) == 1:
if weight_angles:
weights = util.compute_angle_weights_1d(angles).reshape(-1, 1, 1)
# compute the 3D points from tilted axis
angles = sphere_points_from_angles_and_tilt(angles, tilted_axis_yz)
else:
if weight_angles:
warnings.warn("3D angular weighting not yet supported!")
weights = 1
# normalize and rotate angles
for ii in range(angles.shape[0]):
# angles[ii] = norm_vec(angles[ii]) #-> not correct
# instead rotate like `tilted_axis` onto the y-z plane.
angles[ii] = norm_vec(np.dot(rotmat, angles[ii]))
if weight_angles:
uSin *= weights
# Cut-Off frequency
# km [1/px]
km = (2 * np.pi * nm) / res
# The notation in the our optical tomography script for
# a wave propagating to the right is:
#
# u0(x) = exp(ikx)
#
# However, in physics usually we use the other sign convention:
#
# u0(x) = exp(-ikx)
#
# In order to be consistent with programs like Meep or our
# scattering script for a dielectric cylinder, we want to use the
# latter sign convention.
# This is not a big problem. We only need to multiply the imaginary
# part of the scattered wave by -1.
# Ask for the filter. Do not include zero (first element).
#
# Integrals over ϕ₀ [0,2π]; kx [-kₘ,kₘ]
# - double coverage factor 1/2 already included
# - unitary angular frequency to unitary ordinary frequency
# conversion performed in calculation of UB=FT(uB).
#
# f(r) = -i kₘ / ((2π)² a₀) (prefactor)
# * iiint dϕ₀ dkx dky (prefactor)
# * |kx| (prefactor)
# * exp(-i kₘ M lD ) (prefactor)
# * UBϕ₀(kx) (dependent on ϕ₀)
# * exp( i (kx t⊥ + kₘ (M - 1) s₀) r ) (dependent on ϕ₀ and r)
# (r and s₀ are vectors. The last term contains a dot-product)
#
# kₘM = sqrt( kₘ² - kx² - ky² )
# t⊥ = ( cos(ϕ₀), ky/kx, sin(ϕ₀) )
# s₀ = ( -sin(ϕ₀), 0 , cos(ϕ₀) )
#
# The filter can be split into two parts
#
# 1) part without dependence on the z-coordinate
#
# -i kₘ / ((2π)² a₀)
# * iiint dϕ₀ dkx dky
# * |kx|
# * exp(-i kₘ M lD )
#
# 2) part with dependence of the z-coordinate
#
# exp( i (kx t⊥ + kₘ (M - 1) s₀) r )
#
# The filter (1) can be performed using the classical filter process
# as in the backprojection algorithm.
#
#
# if lNx != lNy:
# raise NotImplementedError("Input data must be square shaped!")
# Corresponding sample frequencies
fx = np.fft.fftfreq(lNx) # 1D array
fy = np.fft.fftfreq(lNy) # 1D array
# kx is a 1D array.
kx = 2 * np.pi * fx
ky = 2 * np.pi * fy
# Differentials for integral
dphi0 = 2 * np.pi / A
# We will later multiply with phi0.
# a, y, x
kx = kx.reshape(1, -1)
ky = ky.reshape(-1, 1)
# Low-pass filter:
# less-than-or-equal would give us zero division error.
filter_klp = (kx**2 + ky**2 < km**2)
# Filter M so there are no nans from the root
M = 1. / km * np.sqrt((km**2 - kx**2 - ky**2) * filter_klp)
prefactor = -1j * km / (2 * np.pi)
prefactor *= dphi0
# Also filter the prefactor, so nothing outside the required
# low-pass contributes to the sum.
# The filter is now dependent on the rotational position of the
# specimen. We have to include information from the angles.
# We want to estimate the rotational axis for every frame. We
# do that by computing the cross-product of the vectors in
# angles from the current and previous image.
u, v, _w = tilted_axis
filterabs = np.abs(kx*v+ky*u) * filter_klp
# new in version 0.1.4:
# We multiply by the factor (M-1) instead of just (M)
# to take into account that we have a scattered
# wave that is normalized by u0.
prefactor *= np.exp(-1j * km * (M-1) * lD)
if count is not None:
count.value += 1
#
#
# filter (2) must be applied before rotation as well
# exp( i (kx t⊥ + kₘ (M - 1) s₀) r )
#
# kₘM = sqrt( kₘ² - kx² - ky² )
# t⊥ = ( cos(ϕ₀), ky/kx, sin(ϕ₀) )
# s₀ = ( -sin(ϕ₀), 0 , cos(ϕ₀) )
#
# This filter is effectively an inverse Fourier transform
#
# exp(i kx xD) exp(i ky yD) exp(i kₘ (M - 1) zD )
#
# xD = x cos(ϕ₀) + z sin(ϕ₀)
# zD = - x sin(ϕ₀) + z cos(ϕ₀)
# Everything is in pixels
center = lNz / 2.0
# x = np.linspace(-centerx, centerx, lNx, endpoint=False)
# x = np.arange(lNx) - center + .5
# Meshgrid for output array
# zv, yv, xv = np.meshgrid(x,x,x)
# z, y, x
# xv = x.reshape( 1, 1,-1)
# yv = x.reshape( 1,-1, 1)
# z = np.arange(ln) - center + .5
z = np.linspace(-center, center, lNz, endpoint=False)
zv = z.reshape(-1, 1, 1)
# y, x
Mp = M.reshape(lNy, lNx)
# filter2 = np.exp(1j * zv * km * (Mp - 1))
f2_exp_fac = 1j * km * (Mp - 1)
if save_memory:
# compute filter2 later
pass
else:
# compute filter2 now
# (this requires more RAM but is faster)
filter2 = ne.evaluate("exp(factor * zv)",
local_dict={"factor": f2_exp_fac,
"zv": zv})
if count is not None:
count.value += 1
# Prepare complex output image
if onlyreal:
outarr = np.zeros((ln, lny, lnx), dtype=dtype)
else:
outarr = np.zeros((ln, lny, lnx), dtype=dtype_complex)
# Create plan for FFTW:
# Flag is "estimate":
# specifies that, instead of actual measurements of different
# algorithms, a simple heuristic is used to pick a (probably
# sub-optimal) plan quickly. With this flag, the input/output
# arrays are not overwritten during planning.
# Byte-aligned arrays
oneslice = pyfftw.empty_aligned((lNy, lNx), dtype_complex)
myfftw_plan = pyfftw.FFTW(oneslice, oneslice, threads=num_cores,
flags=["FFTW_ESTIMATE"], axes=(0, 1))
# Create plan for IFFTW:
inarr = pyfftw.empty_aligned((lNy, lNx), dtype_complex)
# plan is "patient":
# FFTW_PATIENT is like FFTW_MEASURE, but considers a wider range
# of algorithms and often produces a “more optimal” plan
# (especially for large transforms), but at the expense of
# several times longer planning time (especially for large
# transforms).
# print(inarr.flags)
myifftw_plan = pyfftw.FFTW(inarr, inarr, threads=num_cores,
axes=(0, 1),
direction="FFTW_BACKWARD",
flags=["FFTW_MEASURE"])
# filtered projections in loop
filtered_proj = np.zeros((ln, lny, lnx), dtype=dtype_complex)
# Rotate all points such that we are effectively rotating everything
# about the y-axis.
angles = rotate_points_to_axis(points=angles, axis=tilted_axis_yz)
for aa in np.arange(A):
if padval is None:
oneslice[:] = np.pad(uSin[aa],
((padyl, padyr), (padxl, padxr)),
mode="edge")
else:
oneslice[:] = np.pad(uSin[aa],
((padyl, padyr), (padxl, padxr)),
mode="linear_ramp",
end_values=(padval,))
myfftw_plan.execute()
# normalize to (lNx * lNy) for FFTW and multiply with prefactor, filter
oneslice *= filterabs * prefactor / (lNx * lNy)
for p in range(len(zv)):
if save_memory:
# compute filter2 here;
# this is comparatively slower than the other case
ne.evaluate("exp(factor * zvp) * projectioni",
local_dict={"zvp": zv[p],
"projectioni": oneslice,
"factor": f2_exp_fac},
out=inarr)
else:
# use universal functions
np.multiply(filter2[p], oneslice, out=inarr)
myifftw_plan.execute()
filtered_proj[p, :, :] = inarr[padyl:padyl+lny, padxl:padxl+lnx]
# The Cartesian axes in our array are ordered like this: [z,y,x]
# However, the rotation matrix requires [x,y,z]. Therefore, we
# need to np.transpose the first and last axis and also invert the
# y-axis.
fil_p_t = filtered_proj.transpose(2, 1, 0)[:, ::-1, :]
# get rotation matrix for this point and also rotate in plane
_drot, drotinv = rotation_matrix_from_point_planerot(angles[aa],
plane_angle=angz,
ret_inv=True)
# apply offset required by affine_transform
# The offset is only required for the rotation in
# the x-z-plane.
# This could be achieved like so:
# The offset "-.5" assures that we are rotating about
# the center of the image and not the value at the center
# of the array (this is also what `scipy.ndimage.rotate` does.
c = 0.5 * np.array(fil_p_t.shape) - .5
offset = c - np.dot(drotinv, c)
# Perform rotation
# We cannot split the inplace-rotation into multiple subrotations
# as we did in _Back_3d_tilted.backpropagate_3d, because the rotation
# axis is arbitrarily placed in the 3d array. Rotating single
# slices does not yield the same result as rotating the entire
# array. Instead of using affine_transform, map_coordinates might
# be faster for multiple cores.
# Also undo the axis transposition that we performed previously.
outarr.real += scipy.ndimage.interpolation.affine_transform(
fil_p_t.real, drotinv,
offset=offset,
mode="constant",
cval=0,
order=intp_order).transpose(2, 1, 0)[:, ::-1, :]
if not onlyreal:
outarr.imag += scipy.ndimage.interpolation.affine_transform(
fil_p_t.imag, drotinv,
offset=offset,
mode="constant",
cval=0,
order=intp_order).transpose(2, 1, 0)[:, ::-1, :]
if count is not None:
count.value += 1
return outarr | r"""3D backpropagation with a tilted axis of rotation
Three-dimensional diffraction tomography reconstruction
algorithm for scattering of a plane wave
:math:`u_0(\mathbf{r}) = u_0(x,y,z)`
by a dielectric object with refractive index
:math:`n(x,y,z)`.
This method implements the 3D backpropagation algorithm with
a rotational axis that is tilted by :math:`\theta_\mathrm{tilt}`
w.r.t. the imaging plane :cite:`Mueller2015tilted`.
.. math::
f(\mathbf{r}) =
-\frac{i k_\mathrm{m}}{2\pi}
\sum_{j=1}^{N} \! \Delta \phi_0 D_{-\phi_j}^\mathrm{tilt} \!\!
\left \{
\text{FFT}^{-1}_{\mathrm{2D}}
\left \{
\left| k_\mathrm{Dx} \cdot \cos \theta_\mathrm{tilt}\right|
\frac{\text{FFT}_{\mathrm{2D}} \left \{
u_{\mathrm{B},\phi_j}(x_\mathrm{D}, y_\mathrm{D}) \right \}}
{u_0(l_\mathrm{D})}
\exp \! \left[i k_\mathrm{m}(M - 1) \cdot
(z_{\phi_j}-l_\mathrm{D}) \right]
\right \}
\right \}
with a modified rotational operator :math:`D_{-\phi_j}^\mathrm{tilt}`
and a different filter in Fourier space
:math:`|k_\mathrm{Dx} \cdot \cos \theta_\mathrm{tilt}|` when compared
to :func:`backpropagate_3d`.
.. versionadded:: 0.1.2
Parameters
----------
uSin: (A, Ny, Nx) ndarray
Three-dimensional sinogram of plane recordings
:math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D}, y_\mathrm{D})`
divided by the incident plane wave :math:`u_0(l_\mathrm{D})`
measured at the detector.
angles: ndarray of shape (A,3) or 1D array of length A
If the shape is (A,3), then `angles` consists of vectors
on the unit sphere that correspond to the direction
of illumination and acquisition (s₀). If the shape is (A,),
then `angles` is a one-dimensional array of angles in radians
that determines the angular position :math:`\phi_j`.
In both cases, `tilted_axis` must be set according to the
tilt of the rotational axis.
res: float
Vacuum wavelength of the light :math:`\lambda` in pixels.
nm: float
Refractive index of the surrounding medium :math:`n_\mathrm{m}`.
lD: float
Distance from center of rotation to detector plane
:math:`l_\mathrm{D}` in pixels.
tilted_axis: list of floats
The coordinates [x, y, z] on a unit sphere representing the
tilted axis of rotation. The default is (0,1,0),
which corresponds to a rotation about the y-axis and
follows the behavior of :func:`odtbrain.backpropagate_3d`.
coords: None [(3, M) ndarray]
Only compute the output image at these coordinates. This
keyword is reserved for future versions and is not
implemented yet.
weight_angles: bool
If `True`, weights each backpropagated projection with a factor
proportional to the angular distance between the neighboring
projections.
.. math::
\Delta \phi_0 \longmapsto \Delta \phi_j =
\frac{\phi_{j+1} - \phi_{j-1}}{2}
This currently only works when `angles` has the shape (A,).
onlyreal: bool
If `True`, only the real part of the reconstructed image
will be returned. This saves computation time.
padding: tuple of bool
Pad the input data to the second next power of 2 before
Fourier transforming. This reduces artifacts and speeds up
the process for input image sizes that are not powers of 2.
The default is padding in x and y: `padding=(True, True)`.
For padding only in x-direction (e.g. for cylindrical
symmetries), set `padding` to `(True, False)`. To turn off
padding, set it to `(False, False)`.
padfac: float
Increase padding size of the input data. A value greater
than one will trigger padding to the second-next power of
two. For example, a value of 1.75 will lead to a padded
size of 256 for an initial size of 144, whereas for it will
lead to a padded size of 512 for an initial size of 150.
Values geater than 2 are allowed. This parameter may
greatly increase memory usage!
padval: float
The value used for padding. This is important for the Rytov
approximation, where an approximat zero in the phase might
translate to 2πi due to the unwrapping algorithm. In that
case, this value should be a multiple of 2πi.
If `padval` is `None`, then the edge values are used for
padding (see documentation of :func:`numpy.pad`).
intp_order: int between 0 and 5
Order of the interpolation for rotation.
See :func:`scipy.ndimage.interpolation.affine_transform` for details.
dtype: dtype object or argument for :func:`numpy.dtype`
The data type that is used for calculations (float or double).
Defaults to `numpy.float_`.
num_cores: int
The number of cores to use for parallel operations. This value
defaults to the number of cores on the system.
save_memory: bool
Saves memory at the cost of longer computation time.
.. versionadded:: 0.1.5
copy: bool
Copy input sinogram `uSin` for data processing. If `copy`
is set to `False`, then `uSin` will be overridden.
.. versionadded:: 0.1.5
count, max_count: multiprocessing.Value or `None`
Can be used to monitor the progress of the algorithm.
Initially, the value of `max_count.value` is incremented
by the total number of steps. At each step, the value
of `count.value` is incremented.
verbose: int
Increment to increase verbosity.
Returns
-------
f: ndarray of shape (Nx, Ny, Nx), complex if `onlyreal==False`
Reconstructed object function :math:`f(\mathbf{r})` as defined
by the Helmholtz equation.
:math:`f(x,z) =
k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)`
See Also
--------
odt_to_ri: conversion of the object function :math:`f(\mathbf{r})`
to refractive index :math:`n(\mathbf{r})`
Notes
-----
This implementation can deal with projection angles that are not
distributed along a circle about the rotational axis. If there are
slight deviations from this circle, simply pass the 3D rotational
positions instead of the 1D angles to the `angles` argument. In
principle, this should improve the reconstruction. The general
problem here is that the backpropagation algorithm requires a
ramp filter in Fourier space that is oriented perpendicular to the
rotational axis. If the sample does not rotate about a single axis,
then a 1D parametric representation of this rotation must be found
to correctly determine the filter in Fourier space. Such a
parametric representation could e.g. be a spiral between the poles
of the unit sphere (but this kind of rotation is probably difficult
to implement experimentally).
If you have input images with rectangular shape, e.g. Nx!=Ny and
the rotational axis deviates by approximately PI/2 from the axis
(0,1,0), then data might get cropped in the reconstruction volume.
You can avoid that by rotating your input data and the rotational
axis by PI/2. For instance, change`tilted_axis` from [1,0,0] to
[0,1,0] and `np.rot90` the sinogram images.
Do not use the parameter `lD` in combination with the Rytov
approximation - the propagation is not correctly described.
Instead, numerically refocus the sinogram prior to converting
it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`)
with a numerical focusing algorithm (available in the Python
package :py:mod:`nrefocus`). | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.