sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def _value_ref(self, column, value, *, dumped=False, inner=False):
"""inner=True uses column.typedef.inner_type instead of column.typedef"""
ref = ":v{}".format(self.next_index)
# Need to dump this value
if not dumped:
typedef = column.typedef
for segment in path_of(column):
typedef = typedef[segment]
if inner:
typedef = typedef.inner_typedef
value = self.engine._dump(typedef, value)
self.attr_values[ref] = value
self.counts[ref] += 1
return ref, value
|
inner=True uses column.typedef.inner_type instead of column.typedef
|
entailment
|
def any_ref(self, *, column, value=missing, dumped=False, inner=False):
"""Returns a NamedTuple of (name, type, value) for any type of reference.
.. code-block:: python
# Name ref
>>> tracker.any_ref(column=User.email)
Reference(name='email', type='name', value=None)
# Value ref
>>> tracker.any_ref(column=User.email, value='user@domain')
Reference(name='email', type='value', value={'S': 'user@domain'})
# Passed as value ref, but value is another column
>>> tracker.any_ref(column=User.email, value=User.other_column)
Reference(name='other_column', type='name', value=None)
:param column: The column to reference. If ``value`` is None, this will render a name ref for this column.
:type column: :class:`~bloop.conditions.ComparisonMixin`
:param value: *(Optional)* If provided, this is likely a value ref. If ``value`` is also a column,
this will render a name ref for that column (not the ``column`` parameter).
:param bool dumped: *(Optional)* True if the value has already been dumped and should not be dumped
through the column's typedef again. Commonly used with atomic conditions (which store the object's dumped
representation). Default is False.
:param bool inner: *(Optional)* True if this is a value ref and it should be dumped through a collection's
inner type, and not the collection type itself. Default is False.
:return: A name or value reference
:rtype: :class:`bloop.conditions.Reference`
"""
# Can't use None since it's a legal value for comparisons (attribute_not_exists)
if value is missing:
# Simple path ref to the column.
name = self._path_ref(column=column)
ref_type = "name"
value = None
elif isinstance(value, ComparisonMixin):
# value is also a column! Also a path ref.
name = self._path_ref(column=value)
ref_type = "name"
value = None
else:
# Simple value ref.
name, value = self._value_ref(column=column, value=value, dumped=dumped, inner=inner)
ref_type = "value"
return Reference(name=name, type=ref_type, value=value)
|
Returns a NamedTuple of (name, type, value) for any type of reference.
.. code-block:: python
# Name ref
>>> tracker.any_ref(column=User.email)
Reference(name='email', type='name', value=None)
# Value ref
>>> tracker.any_ref(column=User.email, value='user@domain')
Reference(name='email', type='value', value={'S': 'user@domain'})
# Passed as value ref, but value is another column
>>> tracker.any_ref(column=User.email, value=User.other_column)
Reference(name='other_column', type='name', value=None)
:param column: The column to reference. If ``value`` is None, this will render a name ref for this column.
:type column: :class:`~bloop.conditions.ComparisonMixin`
:param value: *(Optional)* If provided, this is likely a value ref. If ``value`` is also a column,
this will render a name ref for that column (not the ``column`` parameter).
:param bool dumped: *(Optional)* True if the value has already been dumped and should not be dumped
through the column's typedef again. Commonly used with atomic conditions (which store the object's dumped
representation). Default is False.
:param bool inner: *(Optional)* True if this is a value ref and it should be dumped through a collection's
inner type, and not the collection type itself. Default is False.
:return: A name or value reference
:rtype: :class:`bloop.conditions.Reference`
|
entailment
|
def pop_refs(self, *refs):
"""Decrement the usage of each ref by 1.
If this was the last use of a ref, remove it from attr_names or attr_values.
"""
for ref in refs:
name = ref.name
count = self.counts[name]
# Not tracking this ref
if count < 1:
continue
# Someone else is using this ref
elif count > 1:
self.counts[name] -= 1
# Last reference
else:
logger.debug("popping last usage of {}".format(ref))
self.counts[name] -= 1
if ref.type == "value":
del self.attr_values[name]
else:
# Clean up both name indexes
path_segment = self.attr_names[name]
del self.attr_names[name]
del self.name_attr_index[path_segment]
|
Decrement the usage of each ref by 1.
If this was the last use of a ref, remove it from attr_names or attr_values.
|
entailment
|
def render(self, obj=None, condition=None, atomic=False, update=False, filter=None, projection=None, key=None):
"""Main entry point for rendering multiple expressions. All parameters are optional, except obj when
atomic or update are True.
:param obj: *(Optional)* An object to render an atomic condition or update expression for. Required if
update or atomic are true. Default is False.
:param condition: *(Optional)* Rendered as a "ConditionExpression" for a conditional operation.
If atomic is True, the two are rendered in an AND condition. Default is None.
:type condition: :class:`~bloop.conditions.BaseCondition`
:param bool atomic: *(Optional)* True if an atomic condition should be created for ``obj`` and rendered as
a "ConditionExpression". Default is False.
:param bool update: *(Optional)* True if an "UpdateExpression" should be rendered for ``obj``.
Default is False.
:param filter: *(Optional)* A filter condition for a query or scan, rendered as a "FilterExpression".
Default is None.
:type filter: :class:`~bloop.conditions.BaseCondition`
:param projection: *(Optional)* A set of Columns to include in a query or scan, redered as a
"ProjectionExpression". Default is None.
:type projection: set :class:`~bloop.models.Column`
:param key: *(Optional)* A key condition for queries, rendered as a "KeyConditionExpression". Default is None.
:type key: :class:`~bloop.conditions.BaseCondition`
"""
if (atomic or update) and not obj:
raise InvalidCondition("An object is required to render atomic conditions or updates without an object.")
if filter:
self.render_filter_expression(filter)
if projection:
self.render_projection_expression(projection)
if key:
self.render_key_expression(key)
# Condition requires a bit of work, because either one can be empty/false
condition = (condition or Condition()) & (get_snapshot(obj) if atomic else Condition())
if condition:
self.render_condition_expression(condition)
if update:
self.render_update_expression(obj)
|
Main entry point for rendering multiple expressions. All parameters are optional, except obj when
atomic or update are True.
:param obj: *(Optional)* An object to render an atomic condition or update expression for. Required if
update or atomic are true. Default is False.
:param condition: *(Optional)* Rendered as a "ConditionExpression" for a conditional operation.
If atomic is True, the two are rendered in an AND condition. Default is None.
:type condition: :class:`~bloop.conditions.BaseCondition`
:param bool atomic: *(Optional)* True if an atomic condition should be created for ``obj`` and rendered as
a "ConditionExpression". Default is False.
:param bool update: *(Optional)* True if an "UpdateExpression" should be rendered for ``obj``.
Default is False.
:param filter: *(Optional)* A filter condition for a query or scan, rendered as a "FilterExpression".
Default is None.
:type filter: :class:`~bloop.conditions.BaseCondition`
:param projection: *(Optional)* A set of Columns to include in a query or scan, redered as a
"ProjectionExpression". Default is None.
:type projection: set :class:`~bloop.models.Column`
:param key: *(Optional)* A key condition for queries, rendered as a "KeyConditionExpression". Default is None.
:type key: :class:`~bloop.conditions.BaseCondition`
|
entailment
|
def rendered(self):
"""The rendered wire format for all conditions that have been rendered. Rendered conditions are never
cleared. A new :class:`~bloop.conditions.ConditionRenderer` should be used for each operation."""
expressions = {k: v for (k, v) in self.expressions.items() if v is not None}
if self.refs.attr_names:
expressions["ExpressionAttributeNames"] = self.refs.attr_names
if self.refs.attr_values:
expressions["ExpressionAttributeValues"] = self.refs.attr_values
return expressions
|
The rendered wire format for all conditions that have been rendered. Rendered conditions are never
cleared. A new :class:`~bloop.conditions.ConditionRenderer` should be used for each operation.
|
entailment
|
def _unpack(self, record, key, expected):
"""Replaces the attr dict at the given key with an instance of a Model"""
attrs = record.get(key)
if attrs is None:
return
obj = unpack_from_dynamodb(
attrs=attrs,
expected=expected,
model=self.model,
engine=self.engine
)
object_loaded.send(self.engine, engine=self.engine, obj=obj)
record[key] = obj
|
Replaces the attr dict at the given key with an instance of a Model
|
entailment
|
def reformat_record(record):
"""Repack a record into a cleaner structure for consumption."""
return {
"key": record["dynamodb"].get("Keys", None),
"new": record["dynamodb"].get("NewImage", None),
"old": record["dynamodb"].get("OldImage", None),
"meta": {
"created_at": record["dynamodb"]["ApproximateCreationDateTime"],
"event": {
"id": record["eventID"],
"type": record["eventName"].lower(),
"version": record["eventVersion"]
},
"sequence_number": record["dynamodb"]["SequenceNumber"],
}
}
|
Repack a record into a cleaner structure for consumption.
|
entailment
|
def unpack_shards(shards, stream_arn, session):
"""List[Dict] -> Dict[shard_id, Shard].
Each Shards' parent/children are hooked up with the other Shards in the list.
"""
if not shards:
return {}
# When unpacking tokens, shard id key is "shard_id"
# When unpacking DescribeStream responses, shard id key is "ShardId"
if "ShardId" in shards[0]:
shards = _translate_shards(shards)
by_id = {shard_token["shard_id"]:
Shard(stream_arn=stream_arn, shard_id=shard_token["shard_id"],
iterator_type=shard_token.get("iterator_type"), sequence_number=shard_token.get("sequence_number"),
parent=shard_token.get("parent"), session=session)
for shard_token in shards}
for shard in by_id.values():
if shard.parent:
shard.parent = by_id[shard.parent]
shard.parent.children.append(shard)
return by_id
|
List[Dict] -> Dict[shard_id, Shard].
Each Shards' parent/children are hooked up with the other Shards in the list.
|
entailment
|
def token(self):
"""JSON-serializable representation of the current Shard state.
The token is enough to rebuild the Shard as part of rebuilding a Stream.
:returns: Shard state as a json-friendly dict
:rtype: dict
"""
if self.iterator_type in RELATIVE_ITERATORS:
logger.warning("creating shard token at non-exact location \"{}\"".format(self.iterator_type))
token = {
"stream_arn": self.stream_arn,
"shard_id": self.shard_id,
"iterator_type": self.iterator_type,
"sequence_number": self.sequence_number,
}
if self.parent:
token["parent"] = self.parent.shard_id
if not self.iterator_type:
del token["iterator_type"]
if not self.sequence_number:
del token["sequence_number"]
return token
|
JSON-serializable representation of the current Shard state.
The token is enough to rebuild the Shard as part of rebuilding a Stream.
:returns: Shard state as a json-friendly dict
:rtype: dict
|
entailment
|
def walk_tree(self):
"""Generator that yields each :class:`~bloop.stream.shard.Shard` by walking the shard's children in order."""
shards = collections.deque([self])
while shards:
shard = shards.popleft()
yield shard
shards.extend(shard.children)
|
Generator that yields each :class:`~bloop.stream.shard.Shard` by walking the shard's children in order.
|
entailment
|
def jump_to(self, *, iterator_type, sequence_number=None):
"""Move to a new position in the shard using the standard parameters to GetShardIterator.
:param str iterator_type: "trim_horizon", "at_sequence", "after_sequence", "latest"
:param str sequence_number: *(Optional)* Sequence number to use with at/after sequence. Default is None.
"""
# Just a simple wrapper; let the caller handle RecordsExpired
self.iterator_id = self.session.get_shard_iterator(
stream_arn=self.stream_arn,
shard_id=self.shard_id,
iterator_type=iterator_type,
sequence_number=sequence_number)
self.iterator_type = iterator_type
self.sequence_number = sequence_number
self.empty_responses = 0
|
Move to a new position in the shard using the standard parameters to GetShardIterator.
:param str iterator_type: "trim_horizon", "at_sequence", "after_sequence", "latest"
:param str sequence_number: *(Optional)* Sequence number to use with at/after sequence. Default is None.
|
entailment
|
def seek_to(self, position):
"""Move the Shard's iterator to the earliest record after the :class:`~datetime.datetime` time.
Returns the first records at or past ``position``. If the list is empty,
the seek failed to find records, either because the Shard is exhausted or it
reached the HEAD of an open Shard.
:param position: The position in time to move to.
:type position: :class:`~datetime.datetime`
:returns: A list of the first records found after ``position``. May be empty.
"""
# 0) We have no way to associate the date with a position,
# so we have to scan the shard from the beginning.
self.jump_to(iterator_type="trim_horizon")
position = int(position.timestamp())
while (not self.exhausted) and (self.empty_responses < CALLS_TO_REACH_HEAD):
records = self.get_records()
# We can skip the whole record set if the newest (last) record isn't new enough.
if records and records[-1]["meta"]["created_at"].timestamp() >= position:
# Looking for the first number *below* the position.
for offset, record in enumerate(reversed(records)):
if record["meta"]["created_at"].timestamp() < position:
index = len(records) - offset
return records[index:]
return records
# Either exhausted the Shard or caught up to HEAD.
return []
|
Move the Shard's iterator to the earliest record after the :class:`~datetime.datetime` time.
Returns the first records at or past ``position``. If the list is empty,
the seek failed to find records, either because the Shard is exhausted or it
reached the HEAD of an open Shard.
:param position: The position in time to move to.
:type position: :class:`~datetime.datetime`
:returns: A list of the first records found after ``position``. May be empty.
|
entailment
|
def load_children(self):
"""If the Shard doesn't have any children, tries to find some from DescribeStream.
If the Shard is open this won't find any children, so an empty response doesn't
mean the Shard will **never** have children.
"""
# Child count is fixed the first time any of the following happen:
# 0 :: stream closed or throughput decreased
# 1 :: shard was open for ~4 hours
# 2 :: throughput increased
if self.children:
return self.children
# ParentShardId -> [Shard, ...]
by_parent = collections.defaultdict(list)
# ShardId -> Shard
by_id = {}
for shard in self.session.describe_stream(
stream_arn=self.stream_arn,
first_shard=self.shard_id)["Shards"]:
parent_list = by_parent[shard.get("ParentShardId")]
shard = Shard(
stream_arn=self.stream_arn,
shard_id=shard["ShardId"],
parent=shard.get("ParentShardId"),
session=self.session)
parent_list.append(shard)
by_id[shard.shard_id] = shard
# Find this shard when looking up shards by ParentShardId
by_id[self.shard_id] = self
# Insert this shard's children, then handle its child's descendants etc.
to_insert = collections.deque(by_parent[self.shard_id])
while to_insert:
shard = to_insert.popleft()
# ParentShardId -> Shard
shard.parent = by_id[shard.parent]
shard.parent.children.append(shard)
# Continue for any shards that have this shard as their parent
to_insert.extend(by_parent[shard.shard_id])
return self.children
|
If the Shard doesn't have any children, tries to find some from DescribeStream.
If the Shard is open this won't find any children, so an empty response doesn't
mean the Shard will **never** have children.
|
entailment
|
def get_records(self):
"""Get the next set of records in this shard. An empty list doesn't guarantee the shard is exhausted.
:returns: A list of reformatted records. May be empty.
"""
# Won't be able to find new records.
if self.exhausted:
return []
# Already caught up, just the one call please.
if self.empty_responses >= CALLS_TO_REACH_HEAD:
return self._apply_get_records_response(self.session.get_stream_records(self.iterator_id))
# Up to 5 calls to try and find a result
while self.empty_responses < CALLS_TO_REACH_HEAD and not self.exhausted:
records = self._apply_get_records_response(self.session.get_stream_records(self.iterator_id))
if records:
return records
return []
|
Get the next set of records in this shard. An empty list doesn't guarantee the shard is exhausted.
:returns: A list of reformatted records. May be empty.
|
entailment
|
def bind(self, model, *, skip_table_setup=False):
"""Create backing tables for a model and its non-abstract subclasses.
:param model: Base model to bind. Can be abstract.
:param skip_table_setup: Don't create or verify the table in DynamoDB. Default is False.
:raises bloop.exceptions.InvalidModel: if ``model`` is not a subclass of :class:`~bloop.models.BaseModel`.
"""
# Make sure we're looking at models
validate_is_model(model)
concrete = set(filter(lambda m: not m.Meta.abstract, walk_subclasses(model)))
if not model.Meta.abstract:
concrete.add(model)
logger.debug("binding non-abstract models {}".format(
sorted(c.__name__ for c in concrete)
))
# create_table doesn't block until ACTIVE or validate.
# It also doesn't throw when the table already exists, making it safe
# to call multiple times for the same unbound model.
if skip_table_setup:
logger.info("skip_table_setup is True; not trying to create tables or validate models during bind")
else:
self.session.clear_cache()
is_creating = {}
for model in concrete:
table_name = self._compute_table_name(model)
before_create_table.send(self, engine=self, model=model)
if not skip_table_setup:
if table_name in is_creating:
continue
creating = self.session.create_table(table_name, model)
is_creating[table_name] = creating
for model in concrete:
if not skip_table_setup:
table_name = self._compute_table_name(model)
if is_creating[table_name]:
# polls until table is active
self.session.describe_table(table_name)
if model.Meta.ttl:
self.session.enable_ttl(table_name, model)
if model.Meta.backups and model.Meta.backups["enabled"]:
self.session.enable_backups(table_name, model)
self.session.validate_table(table_name, model)
model_validated.send(self, engine=self, model=model)
model_bound.send(self, engine=self, model=model)
logger.info("successfully bound {} models to the engine".format(len(concrete)))
|
Create backing tables for a model and its non-abstract subclasses.
:param model: Base model to bind. Can be abstract.
:param skip_table_setup: Don't create or verify the table in DynamoDB. Default is False.
:raises bloop.exceptions.InvalidModel: if ``model`` is not a subclass of :class:`~bloop.models.BaseModel`.
|
entailment
|
def delete(self, *objs, condition=None, atomic=False):
"""Delete one or more objects.
:param objs: objects to delete.
:param condition: only perform each delete if this condition holds.
:param bool atomic: only perform each delete if the local and DynamoDB versions of the object match.
:raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met.
"""
objs = set(objs)
validate_not_abstract(*objs)
for obj in objs:
self.session.delete_item({
"TableName": self._compute_table_name(obj.__class__),
"Key": dump_key(self, obj),
**render(self, obj=obj, atomic=atomic, condition=condition)
})
object_deleted.send(self, engine=self, obj=obj)
logger.info("successfully deleted {} objects".format(len(objs)))
|
Delete one or more objects.
:param objs: objects to delete.
:param condition: only perform each delete if this condition holds.
:param bool atomic: only perform each delete if the local and DynamoDB versions of the object match.
:raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met.
|
entailment
|
def load(self, *objs, consistent=False):
"""Populate objects from DynamoDB.
:param objs: objects to delete.
:param bool consistent: Use `strongly consistent reads`__ if True. Default is False.
:raises bloop.exceptions.MissingKey: if any object doesn't provide a value for a key column.
:raises bloop.exceptions.MissingObjects: if one or more objects aren't loaded.
__ http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html
"""
get_table_name = self._compute_table_name
objs = set(objs)
validate_not_abstract(*objs)
table_index, object_index, request = {}, {}, {}
for obj in objs:
table_name = get_table_name(obj.__class__)
key = dump_key(self, obj)
index = index_for(key)
if table_name not in object_index:
table_index[table_name] = list(sorted(key.keys()))
object_index[table_name] = {}
request[table_name] = {"Keys": [], "ConsistentRead": consistent}
if index not in object_index[table_name]:
request[table_name]["Keys"].append(key)
object_index[table_name][index] = set()
object_index[table_name][index].add(obj)
response = self.session.load_items(request)
for table_name, list_of_attrs in response.items():
for attrs in list_of_attrs:
key_shape = table_index[table_name]
key = extract_key(key_shape, attrs)
index = index_for(key)
for obj in object_index[table_name].pop(index):
unpack_from_dynamodb(
attrs=attrs, expected=obj.Meta.columns, engine=self, obj=obj)
object_loaded.send(self, engine=self, obj=obj)
if not object_index[table_name]:
object_index.pop(table_name)
if object_index:
not_loaded = set()
for index in object_index.values():
for index_set in index.values():
not_loaded.update(index_set)
logger.info("loaded {} of {} objects".format(len(objs) - len(not_loaded), len(objs)))
raise MissingObjects("Failed to load some objects.", objects=not_loaded)
logger.info("successfully loaded {} objects".format(len(objs)))
|
Populate objects from DynamoDB.
:param objs: objects to delete.
:param bool consistent: Use `strongly consistent reads`__ if True. Default is False.
:raises bloop.exceptions.MissingKey: if any object doesn't provide a value for a key column.
:raises bloop.exceptions.MissingObjects: if one or more objects aren't loaded.
__ http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html
|
entailment
|
def query(self, model_or_index, key, filter=None, projection="all", consistent=False, forward=True):
"""Create a reusable :class:`~bloop.search.QueryIterator`.
:param model_or_index: A model or index to query. For example, ``User`` or ``User.by_email``.
:param key:
Key condition. This must include an equality against the hash key, and optionally one
of a restricted set of conditions on the range key.
:param filter: Filter condition. Only matching objects will be included in the results.
:param projection:
"all", "count", a list of column names, or a list of :class:`~bloop.models.Column`. When projection is
"count", you must advance the iterator to retrieve the count.
:param bool consistent: Use `strongly consistent reads`__ if True. Default is False.
:param bool forward: Query in ascending or descending order. Default is True (ascending).
:return: A reusable query iterator with helper methods.
:rtype: :class:`~bloop.search.QueryIterator`
__ http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html
"""
if isinstance(model_or_index, Index):
model, index = model_or_index.model, model_or_index
else:
model, index = model_or_index, None
validate_not_abstract(model)
q = Search(
mode="query", engine=self, model=model, index=index, key=key, filter=filter,
projection=projection, consistent=consistent, forward=forward)
return iter(q.prepare())
|
Create a reusable :class:`~bloop.search.QueryIterator`.
:param model_or_index: A model or index to query. For example, ``User`` or ``User.by_email``.
:param key:
Key condition. This must include an equality against the hash key, and optionally one
of a restricted set of conditions on the range key.
:param filter: Filter condition. Only matching objects will be included in the results.
:param projection:
"all", "count", a list of column names, or a list of :class:`~bloop.models.Column`. When projection is
"count", you must advance the iterator to retrieve the count.
:param bool consistent: Use `strongly consistent reads`__ if True. Default is False.
:param bool forward: Query in ascending or descending order. Default is True (ascending).
:return: A reusable query iterator with helper methods.
:rtype: :class:`~bloop.search.QueryIterator`
__ http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html
|
entailment
|
def save(self, *objs, condition=None, atomic=False):
"""Save one or more objects.
:param objs: objects to save.
:param condition: only perform each save if this condition holds.
:param bool atomic: only perform each save if the local and DynamoDB versions of the object match.
:raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met.
"""
objs = set(objs)
validate_not_abstract(*objs)
for obj in objs:
self.session.save_item({
"TableName": self._compute_table_name(obj.__class__),
"Key": dump_key(self, obj),
**render(self, obj=obj, atomic=atomic, condition=condition, update=True)
})
object_saved.send(self, engine=self, obj=obj)
logger.info("successfully saved {} objects".format(len(objs)))
|
Save one or more objects.
:param objs: objects to save.
:param condition: only perform each save if this condition holds.
:param bool atomic: only perform each save if the local and DynamoDB versions of the object match.
:raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met.
|
entailment
|
def scan(self, model_or_index, filter=None, projection="all", consistent=False, parallel=None):
"""Create a reusable :class:`~bloop.search.ScanIterator`.
:param model_or_index: A model or index to scan. For example, ``User`` or ``User.by_email``.
:param filter: Filter condition. Only matching objects will be included in the results.
:param projection:
"all", "count", a list of column names, or a list of :class:`~bloop.models.Column`. When projection is
"count", you must exhaust the iterator to retrieve the count.
:param bool consistent: Use `strongly consistent reads`__ if True. Default is False.
:param tuple parallel: Perform a `parallel scan`__. A tuple of (Segment, TotalSegments)
for this portion the scan. Default is None.
:return: A reusable scan iterator with helper methods.
:rtype: :class:`~bloop.search.ScanIterator`
__ http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html
__ http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#QueryAndScanParallelScan
"""
if isinstance(model_or_index, Index):
model, index = model_or_index.model, model_or_index
else:
model, index = model_or_index, None
validate_not_abstract(model)
s = Search(
mode="scan", engine=self, model=model, index=index, filter=filter,
projection=projection, consistent=consistent, parallel=parallel)
return iter(s.prepare())
|
Create a reusable :class:`~bloop.search.ScanIterator`.
:param model_or_index: A model or index to scan. For example, ``User`` or ``User.by_email``.
:param filter: Filter condition. Only matching objects will be included in the results.
:param projection:
"all", "count", a list of column names, or a list of :class:`~bloop.models.Column`. When projection is
"count", you must exhaust the iterator to retrieve the count.
:param bool consistent: Use `strongly consistent reads`__ if True. Default is False.
:param tuple parallel: Perform a `parallel scan`__. A tuple of (Segment, TotalSegments)
for this portion the scan. Default is None.
:return: A reusable scan iterator with helper methods.
:rtype: :class:`~bloop.search.ScanIterator`
__ http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html
__ http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#QueryAndScanParallelScan
|
entailment
|
def stream(self, model, position):
"""Create a :class:`~bloop.stream.Stream` that provides approximate chronological ordering.
.. code-block:: pycon
# Create a user so we have a record
>>> engine = Engine()
>>> user = User(id=3, email="user@domain.com")
>>> engine.save(user)
>>> user.email = "admin@domain.com"
>>> engine.save(user)
# First record lacks an "old" value since it's an insert
>>> stream = engine.stream(User, "trim_horizon")
>>> next(stream)
{'key': None,
'old': None,
'new': User(email='user@domain.com', id=3, verified=None),
'meta': {
'created_at': datetime.datetime(2016, 10, 23, ...),
'event': {
'id': '3fe6d339b7cb19a1474b3d853972c12a',
'type': 'insert',
'version': '1.1'},
'sequence_number': '700000000007366876916'}
}
:param model: The model to stream records from.
:param position: "trim_horizon", "latest", a stream token, or a :class:`datetime.datetime`.
:return: An iterator for records in all shards.
:rtype: :class:`~bloop.stream.Stream`
:raises bloop.exceptions.InvalidStream: if the model does not have a stream.
"""
validate_not_abstract(model)
if not model.Meta.stream or not model.Meta.stream.get("arn"):
raise InvalidStream("{!r} does not have a stream arn".format(model))
stream = Stream(model=model, engine=self)
stream.move_to(position=position)
return stream
|
Create a :class:`~bloop.stream.Stream` that provides approximate chronological ordering.
.. code-block:: pycon
# Create a user so we have a record
>>> engine = Engine()
>>> user = User(id=3, email="user@domain.com")
>>> engine.save(user)
>>> user.email = "admin@domain.com"
>>> engine.save(user)
# First record lacks an "old" value since it's an insert
>>> stream = engine.stream(User, "trim_horizon")
>>> next(stream)
{'key': None,
'old': None,
'new': User(email='user@domain.com', id=3, verified=None),
'meta': {
'created_at': datetime.datetime(2016, 10, 23, ...),
'event': {
'id': '3fe6d339b7cb19a1474b3d853972c12a',
'type': 'insert',
'version': '1.1'},
'sequence_number': '700000000007366876916'}
}
:param model: The model to stream records from.
:param position: "trim_horizon", "latest", a stream token, or a :class:`datetime.datetime`.
:return: An iterator for records in all shards.
:rtype: :class:`~bloop.stream.Stream`
:raises bloop.exceptions.InvalidStream: if the model does not have a stream.
|
entailment
|
def transaction(self, mode="w"):
"""
Create a new :class:`~bloop.transactions.ReadTransaction` or :class:`~bloop.transactions.WriteTransaction`.
As a context manager, calling commit when the block exits:
.. code-block:: pycon
>>> engine = Engine()
>>> user = User(id=3, email="user@domain.com")
>>> tweet = Tweet(id=42, data="hello, world")
>>> with engine.transaction("w") as tx:
... tx.delete(user)
... tx.save(tweet, condition=Tweet.id.is_(None))
Or manually calling prepare and commit:
.. code-block:: pycon
>>> engine = Engine()
>>> user = User(id=3, email="user@domain.com")
>>> tweet = Tweet(id=42, data="hello, world")
>>> tx = engine.transaction("w")
>>> tx.delete(user)
>>> tx.save(tweet, condition=Tweet.id.is_(None))
>>> tx.prepare().commit()
:param str mode: Either "r" or "w" to create a ReadTransaction or WriteTransaction. Default is "w"
:return: A new transaction that can be committed.
:rtype: :class:`~bloop.transactions.ReadTransaction` or :class:`~bloop.transactions.WriteTransaction`
"""
if mode == "r":
cls = ReadTransaction
elif mode == "w":
cls = WriteTransaction
else:
raise ValueError(f"unknown mode {mode}")
return cls(self)
|
Create a new :class:`~bloop.transactions.ReadTransaction` or :class:`~bloop.transactions.WriteTransaction`.
As a context manager, calling commit when the block exits:
.. code-block:: pycon
>>> engine = Engine()
>>> user = User(id=3, email="user@domain.com")
>>> tweet = Tweet(id=42, data="hello, world")
>>> with engine.transaction("w") as tx:
... tx.delete(user)
... tx.save(tweet, condition=Tweet.id.is_(None))
Or manually calling prepare and commit:
.. code-block:: pycon
>>> engine = Engine()
>>> user = User(id=3, email="user@domain.com")
>>> tweet = Tweet(id=42, data="hello, world")
>>> tx = engine.transaction("w")
>>> tx.delete(user)
>>> tx.save(tweet, condition=Tweet.id.is_(None))
>>> tx.prepare().commit()
:param str mode: Either "r" or "w" to create a ReadTransaction or WriteTransaction. Default is "w"
:return: A new transaction that can be committed.
:rtype: :class:`~bloop.transactions.ReadTransaction` or :class:`~bloop.transactions.WriteTransaction`
|
entailment
|
def _dump(self, value, **kwargs):
"""Entry point for serializing values. Most custom types should use :func:`~bloop.types.Type.dynamo_dump`.
This wraps the return value of :func:`~bloop.types.Type.dynamo_dump` in DynamoDB's wire format.
For example, serializing a string enum to an int:
.. code-block:: python
value = "green"
# dynamo_dump("green") = 2
_dump(value) == {"N": 2}
If a complex type calls this function with ``None``, it will forward ``None`` to
:func:`~bloop.types.Type.dynamo_dump`. This can happen when dumping eg. a sparse
:class:`~.bloop.types.Map`, or a missing (not set) value.
"""
value = self.dynamo_dump(value, **kwargs)
if value is None:
return value
return {self.backing_type: value}
|
Entry point for serializing values. Most custom types should use :func:`~bloop.types.Type.dynamo_dump`.
This wraps the return value of :func:`~bloop.types.Type.dynamo_dump` in DynamoDB's wire format.
For example, serializing a string enum to an int:
.. code-block:: python
value = "green"
# dynamo_dump("green") = 2
_dump(value) == {"N": 2}
If a complex type calls this function with ``None``, it will forward ``None`` to
:func:`~bloop.types.Type.dynamo_dump`. This can happen when dumping eg. a sparse
:class:`~.bloop.types.Map`, or a missing (not set) value.
|
entailment
|
def _load(self, value, **kwargs):
"""Entry point for deserializing values. Most custom types should use :func:`~bloop.types.Type.dynamo_load`.
This unpacks DynamoDB's wire format and calls :func:`~bloop.types.Type.dynamo_load` on the inner value.
For example, deserializing an int to a string enum:
.. code-block:: python
value = {"N": 2}
# dynamo_load(2) = "green"
_load(value) == "green"
If a complex type calls this function with ``None``, it will forward ``None`` to
:func:`~bloop.types.Type.dynamo_load`. This can happen when loading eg. a sparse :class:`~bloop.types.Map`.
"""
if value is not None:
value = next(iter(value.values()))
return self.dynamo_load(value, **kwargs)
|
Entry point for deserializing values. Most custom types should use :func:`~bloop.types.Type.dynamo_load`.
This unpacks DynamoDB's wire format and calls :func:`~bloop.types.Type.dynamo_load` on the inner value.
For example, deserializing an int to a string enum:
.. code-block:: python
value = {"N": 2}
# dynamo_load(2) = "green"
_load(value) == "green"
If a complex type calls this function with ``None``, it will forward ``None`` to
:func:`~bloop.types.Type.dynamo_load`. This can happen when loading eg. a sparse :class:`~bloop.types.Map`.
|
entailment
|
def backing_type_for(value):
"""Returns the DynamoDB backing type for a given python value's type
::
4 -> 'N'
['x', 3] -> 'L'
{2, 4} -> 'SS'
"""
if isinstance(value, str):
vtype = "S"
elif isinstance(value, bytes):
vtype = "B"
# NOTE: numbers.Number check must come **AFTER** bool check since isinstance(True, numbers.Number)
elif isinstance(value, bool):
vtype = "BOOL"
elif isinstance(value, numbers.Number):
vtype = "N"
elif isinstance(value, dict):
vtype = "M"
elif isinstance(value, list):
vtype = "L"
elif isinstance(value, set):
if not value:
vtype = "SS" # doesn't matter, Set(x) should dump an empty set the same for all x
else:
inner = next(iter(value))
if isinstance(inner, str):
vtype = "SS"
elif isinstance(inner, bytes):
vtype = "BS"
elif isinstance(inner, numbers.Number):
vtype = "NS"
else:
raise ValueError(f"Unknown set type for inner value {inner!r}")
else:
raise ValueError(f"Can't dump unexpected type {type(value)!r} for value {value!r}")
return vtype
|
Returns the DynamoDB backing type for a given python value's type
::
4 -> 'N'
['x', 3] -> 'L'
{2, 4} -> 'SS'
|
entailment
|
def stream_replicate():
"""Monitor changes in approximately real-time and replicate them"""
stream = primary.stream(SomeDataBlob, "trim_horizon")
next_heartbeat = pendulum.now()
while True:
now = pendulum.now()
if now >= next_heartbeat:
stream.heartbeat()
next_heartbeat = now.add(minutes=10)
record = next(stream)
if record is None:
continue
if record["new"] is not None:
replica.save(record["new"])
else:
replica.delete(record["old"])
|
Monitor changes in approximately real-time and replicate them
|
entailment
|
def _move_stream_endpoint(coordinator, position):
"""Move to the "trim_horizon" or "latest" of the entire stream."""
# 0) Everything will be rebuilt from DescribeStream.
stream_arn = coordinator.stream_arn
coordinator.roots.clear()
coordinator.active.clear()
coordinator.buffer.clear()
# 1) Build a Dict[str, Shard] of the current Stream from a DescribeStream call
current_shards = coordinator.session.describe_stream(stream_arn=stream_arn)["Shards"]
current_shards = unpack_shards(current_shards, stream_arn, coordinator.session)
# 2) Roots are any shards without parents.
coordinator.roots.extend(shard for shard in current_shards.values() if not shard.parent)
# 3.0) Stream trim_horizon is the combined trim_horizon of all roots.
if position == "trim_horizon":
for shard in coordinator.roots:
shard.jump_to(iterator_type="trim_horizon")
coordinator.active.extend(coordinator.roots)
# 3.1) Stream latest is the combined latest of all shards without children.
else:
for root in coordinator.roots:
for shard in root.walk_tree():
if not shard.children:
shard.jump_to(iterator_type="latest")
coordinator.active.append(shard)
|
Move to the "trim_horizon" or "latest" of the entire stream.
|
entailment
|
def _move_stream_time(coordinator, time):
"""Scan through the *entire* Stream for the first record after ``time``.
This is an extremely expensive, naive algorithm that starts at trim_horizon and simply
dumps records into the void until the first hit. General improvements in performance are
tough; we can use the fact that Shards have a max life of 24hr to pick a pretty-good starting
point for any Shard trees with 6 generations. Even then we can't know how close the oldest one
is to rolling off so we either hit trim_horizon, or iterate an extra Shard more than we need to.
The corner cases are worse; short trees, recent splits, trees with different branch heights.
"""
if time > datetime.datetime.now(datetime.timezone.utc):
_move_stream_endpoint(coordinator, "latest")
return
_move_stream_endpoint(coordinator, "trim_horizon")
shard_trees = collections.deque(coordinator.roots)
while shard_trees:
shard = shard_trees.popleft()
records = shard.seek_to(time)
# Success! This section of some Shard tree is at the desired time.
if records:
coordinator.buffer.push_all((record, shard) for record in records)
# Closed shard, keep searching its children.
elif shard.exhausted:
coordinator.remove_shard(shard, drop_buffered_records=True)
shard_trees.extend(shard.children)
|
Scan through the *entire* Stream for the first record after ``time``.
This is an extremely expensive, naive algorithm that starts at trim_horizon and simply
dumps records into the void until the first hit. General improvements in performance are
tough; we can use the fact that Shards have a max life of 24hr to pick a pretty-good starting
point for any Shard trees with 6 generations. Even then we can't know how close the oldest one
is to rolling off so we either hit trim_horizon, or iterate an extra Shard more than we need to.
The corner cases are worse; short trees, recent splits, trees with different branch heights.
|
entailment
|
def _move_stream_token(coordinator, token):
"""Move to the Stream position described by the token.
The following rules are applied when interpolation is required:
- If a shard does not exist (past the trim_horizon) it is ignored. If that
shard had children, its children are also checked against the existing shards.
- If none of the shards in the token exist, then InvalidStream is raised.
- If a Shard expects its iterator to point to a SequenceNumber that is now past
that Shard's trim_horizon, the Shard instead points to trim_horizon.
"""
stream_arn = coordinator.stream_arn = token["stream_arn"]
# 0) Everything will be rebuilt from the DescribeStream masked by the token.
coordinator.roots.clear()
coordinator.active.clear()
coordinator.closed.clear()
coordinator.buffer.clear()
# Injecting the token gives us access to the standard shard management functions
token_shards = unpack_shards(token["shards"], stream_arn, coordinator.session)
coordinator.roots = [shard for shard in token_shards.values() if not shard.parent]
coordinator.active.extend(token_shards[shard_id] for shard_id in token["active"])
# 1) Build a Dict[str, Shard] of the current Stream from a DescribeStream call
current_shards = coordinator.session.describe_stream(stream_arn=stream_arn)["Shards"]
current_shards = unpack_shards(current_shards, stream_arn, coordinator.session)
# 2) Trying to find an intersection with the actual Stream by walking each root shard's tree.
# Prune any Shard with no children that's not part of the actual Stream.
# Raise InvalidStream if the entire token is pruned.
unverified = collections.deque(coordinator.roots)
while unverified:
shard = unverified.popleft()
if shard.shard_id not in current_shards:
logger.info("Unknown or expired shard \"{}\" - pruning from stream token".format(shard.shard_id))
coordinator.remove_shard(shard, drop_buffered_records=True)
unverified.extend(shard.children)
# 3) Everything was pruned, so the token describes an unknown stream.
if not coordinator.roots:
raise InvalidStream("This token has no relation to the actual Stream.")
# 4) Now that everything's verified, grab new iterators for the coordinator's active Shards.
for shard in coordinator.active:
try:
if shard.iterator_type is None:
# Descendant of an unknown shard
shard.iterator_type = "trim_horizon"
# Move back to the token's specified position
shard.jump_to(iterator_type=shard.iterator_type, sequence_number=shard.sequence_number)
except RecordsExpired:
# This token shard's sequence_number is beyond the trim_horizon.
# The next closest record is at trim_horizon.
msg = "SequenceNumber \"{}\" in shard \"{}\" beyond trim horizon: jumping to trim_horizon"
logger.info(msg.format(shard.sequence_number, shard.shard_id))
shard.jump_to(iterator_type="trim_horizon")
|
Move to the Stream position described by the token.
The following rules are applied when interpolation is required:
- If a shard does not exist (past the trim_horizon) it is ignored. If that
shard had children, its children are also checked against the existing shards.
- If none of the shards in the token exist, then InvalidStream is raised.
- If a Shard expects its iterator to point to a SequenceNumber that is now past
that Shard's trim_horizon, the Shard instead points to trim_horizon.
|
entailment
|
def advance_shards(self):
"""Poll active shards for records and insert them into the buffer. Rotate exhausted shards.
Returns immediately if the buffer isn't empty.
"""
# Don't poll shards when there are pending records.
if self.buffer:
return
# 0) Collect new records from all active shards.
record_shard_pairs = []
for shard in self.active:
records = next(shard)
if records:
record_shard_pairs.extend((record, shard) for record in records)
self.buffer.push_all(record_shard_pairs)
self.migrate_closed_shards()
|
Poll active shards for records and insert them into the buffer. Rotate exhausted shards.
Returns immediately if the buffer isn't empty.
|
entailment
|
def heartbeat(self):
"""Keep active shards with "trim_horizon", "latest" iterators alive by advancing their iterators."""
for shard in self.active:
if shard.sequence_number is None:
records = next(shard)
# Success! This shard now has an ``at_sequence`` iterator
if records:
self.buffer.push_all((record, shard) for record in records)
self.migrate_closed_shards()
|
Keep active shards with "trim_horizon", "latest" iterators alive by advancing their iterators.
|
entailment
|
def token(self):
"""JSON-serializable representation of the current Stream state.
Use :func:`Engine.stream(YourModel, token) <bloop.engine.Engine.stream>` to create an identical stream,
or :func:`stream.move_to(token) <bloop.stream.Stream.move_to>` to move an existing stream to this position.
:returns: Stream state as a json-friendly dict
:rtype: dict
"""
# 0) Trace roots and active shards
active_ids = []
shard_tokens = []
for root in self.roots:
for shard in root.walk_tree():
shard_tokens.append(shard.token)
# dedupe, stream_arn will be in the root token
shard_tokens[-1].pop("stream_arn")
active_ids.extend((shard.shard_id for shard in self.active))
# 1) Inject closed shards
for shard in self.closed.keys():
active_ids.append(shard.shard_id)
shard_tokens.append(shard.token)
shard_tokens[-1].pop("stream_arn")
return {
"stream_arn": self.stream_arn,
"active": active_ids,
"shards": shard_tokens
}
|
JSON-serializable representation of the current Stream state.
Use :func:`Engine.stream(YourModel, token) <bloop.engine.Engine.stream>` to create an identical stream,
or :func:`stream.move_to(token) <bloop.stream.Stream.move_to>` to move an existing stream to this position.
:returns: Stream state as a json-friendly dict
:rtype: dict
|
entailment
|
def remove_shard(self, shard, drop_buffered_records=False):
"""Remove a Shard from the Coordinator. Drops all buffered records from the Shard.
If the Shard is active or a root, it is removed and any children promoted to those roles.
:param shard: The shard to remove
:type shard: :class:`~bloop.stream.shard.Shard`
:param bool drop_buffered_records:
Whether records from this shard should be removed.
Default is False.
"""
try:
self.roots.remove(shard)
except ValueError:
# Wasn't a root Shard
pass
else:
self.roots.extend(shard.children)
try:
self.active.remove(shard)
except ValueError:
# Wasn't an active Shard
pass
else:
self.active.extend(shard.children)
if drop_buffered_records:
# TODO can this be improved? Gets expensive for high-volume streams with large buffers
heap = self.buffer.heap
# Clear buffered records from the shard. Each record is (ordering, record, shard)
to_remove = [x for x in heap if x[2] is shard]
for x in to_remove:
heap.remove(x)
|
Remove a Shard from the Coordinator. Drops all buffered records from the Shard.
If the Shard is active or a root, it is removed and any children promoted to those roles.
:param shard: The shard to remove
:type shard: :class:`~bloop.stream.shard.Shard`
:param bool drop_buffered_records:
Whether records from this shard should be removed.
Default is False.
|
entailment
|
def move_to(self, position):
"""Set the Coordinator to a specific endpoint or time, or load state from a token.
:param position: "trim_horizon", "latest", :class:`~datetime.datetime`, or a
:attr:`Coordinator.token <bloop.stream.coordinator.Coordinator.token>`
"""
if isinstance(position, collections.abc.Mapping):
move = _move_stream_token
elif hasattr(position, "timestamp") and callable(position.timestamp):
move = _move_stream_time
elif isinstance(position, str) and position.lower() in ["latest", "trim_horizon"]:
move = _move_stream_endpoint
else:
raise InvalidPosition("Don't know how to move to position {!r}".format(position))
move(self, position)
|
Set the Coordinator to a specific endpoint or time, or load state from a token.
:param position: "trim_horizon", "latest", :class:`~datetime.datetime`, or a
:attr:`Coordinator.token <bloop.stream.coordinator.Coordinator.token>`
|
entailment
|
def heap_item(clock, record, shard):
"""Create a tuple of (ordering, (record, shard)) for use in a RecordBuffer."""
# Primary ordering is by event creation time.
# However, creation time is *approximate* and has whole-second resolution.
# This means two events in the same shard within one second can't be ordered.
ordering = record["meta"]["created_at"]
# From testing, SequenceNumber isn't a guaranteed ordering either. However,
# it is guaranteed to be unique within a shard. This will be tie-breaker
# for multiple records within the same shard, within the same second.
second_ordering = int(record["meta"]["sequence_number"])
# It's possible though unlikely, that sequence numbers will collide across
# multiple shards, within the same second. The final tie-breaker is
# a monotonically increasing integer from the buffer.
total_ordering = (ordering, second_ordering, clock())
return total_ordering, record, shard
|
Create a tuple of (ordering, (record, shard)) for use in a RecordBuffer.
|
entailment
|
def push(self, record, shard):
"""Push a new record into the buffer
:param dict record: new record
:param shard: Shard the record came from
:type shard: :class:`~bloop.stream.shard.Shard`
"""
heapq.heappush(self.heap, heap_item(self.clock, record, shard))
|
Push a new record into the buffer
:param dict record: new record
:param shard: Shard the record came from
:type shard: :class:`~bloop.stream.shard.Shard`
|
entailment
|
def push_all(self, record_shard_pairs):
"""Push multiple (record, shard) pairs at once, with only one :meth:`heapq.heapify` call to maintain order.
:param record_shard_pairs: list of ``(record, shard)`` tuples
(see :func:`~bloop.stream.buffer.RecordBuffer.push`).
"""
# Faster than inserting one at a time; the heap is sorted once after all inserts.
for record, shard in record_shard_pairs:
item = heap_item(self.clock, record, shard)
self.heap.append(item)
heapq.heapify(self.heap)
|
Push multiple (record, shard) pairs at once, with only one :meth:`heapq.heapify` call to maintain order.
:param record_shard_pairs: list of ``(record, shard)`` tuples
(see :func:`~bloop.stream.buffer.RecordBuffer.push`).
|
entailment
|
def loaded_columns(obj: BaseModel):
"""Yields each (name, value) tuple for all columns in an object that aren't missing"""
for column in sorted(obj.Meta.columns, key=lambda c: c.name):
value = getattr(obj, column.name, missing)
if value is not missing:
yield column.name, value
|
Yields each (name, value) tuple for all columns in an object that aren't missing
|
entailment
|
def unpack_from_dynamodb(*, attrs, expected, model=None, obj=None, engine=None, context=None, **kwargs):
"""Push values by dynamo_name into an object"""
context = context or {"engine": engine}
engine = engine or context.get("engine", None)
if not engine:
raise ValueError("You must provide engine or a context with an engine.")
if model is None and obj is None:
raise ValueError("You must provide a model or obj to unpack.")
if model is not None and obj is not None:
raise ValueError("Only specify model or obj.")
if model:
obj = model.Meta.init()
for column in expected:
value = attrs.get(column.dynamo_name, None)
value = engine._load(column.typedef, value, context=context, **kwargs)
setattr(obj, column.name, value)
return obj
|
Push values by dynamo_name into an object
|
entailment
|
def setdefault(obj, field, default):
"""Set an object's field to default if it doesn't have a value"""
setattr(obj, field, getattr(obj, field, default))
|
Set an object's field to default if it doesn't have a value
|
entailment
|
def bind_column(model, name, column, force=False, recursive=False, copy=False) -> Column:
"""Bind a column to the model with the given name.
This method is primarily used during BaseModel.__init_subclass__, although it can be used to easily
attach a new column to an existing model:
.. code-block:: python
import bloop.models
class User(BaseModel):
id = Column(String, hash_key=True)
email = Column(String, dynamo_name="e")
bound = bloop.models.bind_column(User, "email", email)
assert bound is email
# rebind with force, and use a copy
bound = bloop.models.bind_column(User, "email", email, force=True, copy=True)
assert bound is not email
If an existing index refers to this column, it will be updated to point to the new column
using :meth:`~bloop.models.refresh_index`, including recalculating the index projection.
Meta attributes including ``Meta.columns``, ``Meta.hash_key``, etc. will be updated if necessary.
If ``name`` or the column's ``dynamo_name`` conflicts with an existing column or index on the model, raises
:exc:`~bloop.exceptions.InvalidModel` unless ``force`` is True. If ``recursive`` is ``True`` and there are
existing subclasses of ``model``, a copy of the column will attempt to bind to each subclass. The recursive
calls will not force the bind, and will always use a new copy. If ``copy`` is ``True`` then a copy of the
provided column is used. This uses a shallow copy via :meth:`~bloop.models.Column.__copy__`.
:param model:
The model to bind the column to.
:param name:
The name to bind the column as. In effect, used for ``setattr(model, name, column)``
:param column:
The column to bind to the model.
:param force:
Unbind existing columns or indexes with the same name or dynamo_name. Default is False.
:param recursive:
Bind to each subclass of this model. Default is False.
:param copy:
Use a copy of the column instead of the column directly. Default is False.
:return:
The bound column. This is a new column when ``copy`` is True, otherwise the input column.
"""
if not subclassof(model, BaseModel):
raise InvalidModel(f"{model} is not a subclass of BaseModel")
meta = model.Meta
if copy:
column = copyfn(column)
# TODO elif column.model is not None: logger.warning(f"Trying to rebind column bound to {column.model}")
column._name = name
safe_repr = unbound_repr(column)
# Guard against name, dynamo_name collisions; if force=True, unbind any matches
same_dynamo_name = (
util.index(meta.columns, "dynamo_name").get(column.dynamo_name) or
util.index(meta.indexes, "dynamo_name").get(column.dynamo_name)
)
same_name = (
meta.columns_by_name.get(column.name) or
util.index(meta.indexes, "name").get(column.name)
)
if column.hash_key and column.range_key:
raise InvalidModel(f"Tried to bind {safe_repr} as both a hash and range key.")
if force:
if same_name:
unbind(meta, name=column.name)
if same_dynamo_name:
unbind(meta, dynamo_name=column.dynamo_name)
else:
if same_name:
raise InvalidModel(
f"The column {safe_repr} has the same name as an existing column "
f"or index {same_name}. Did you mean to bind with force=True?")
if same_dynamo_name:
raise InvalidModel(
f"The column {safe_repr} has the same dynamo_name as an existing "
f"column or index {same_name}. Did you mean to bind with force=True?")
if column.hash_key and meta.hash_key:
raise InvalidModel(
f"Tried to bind {safe_repr} but {meta.model} "
f"already has a different hash_key: {meta.hash_key}")
if column.range_key and meta.range_key:
raise InvalidModel(
f"Tried to bind {safe_repr} but {meta.model} "
f"already has a different range_key: {meta.range_key}")
# success!
# --------------------------------
column.model = meta.model
meta.columns.add(column)
meta.columns_by_name[name] = column
setattr(meta.model, name, column)
if column.hash_key:
meta.hash_key = column
meta.keys.add(column)
if column.range_key:
meta.range_key = column
meta.keys.add(column)
try:
for index in meta.indexes:
refresh_index(meta, index)
except KeyError as e:
raise InvalidModel(
f"Binding column {column} removed a required column for index {unbound_repr(index)}") from e
if recursive:
for subclass in util.walk_subclasses(meta.model):
try:
bind_column(subclass, name, column, force=False, recursive=False, copy=True)
except InvalidModel:
pass
return column
|
Bind a column to the model with the given name.
This method is primarily used during BaseModel.__init_subclass__, although it can be used to easily
attach a new column to an existing model:
.. code-block:: python
import bloop.models
class User(BaseModel):
id = Column(String, hash_key=True)
email = Column(String, dynamo_name="e")
bound = bloop.models.bind_column(User, "email", email)
assert bound is email
# rebind with force, and use a copy
bound = bloop.models.bind_column(User, "email", email, force=True, copy=True)
assert bound is not email
If an existing index refers to this column, it will be updated to point to the new column
using :meth:`~bloop.models.refresh_index`, including recalculating the index projection.
Meta attributes including ``Meta.columns``, ``Meta.hash_key``, etc. will be updated if necessary.
If ``name`` or the column's ``dynamo_name`` conflicts with an existing column or index on the model, raises
:exc:`~bloop.exceptions.InvalidModel` unless ``force`` is True. If ``recursive`` is ``True`` and there are
existing subclasses of ``model``, a copy of the column will attempt to bind to each subclass. The recursive
calls will not force the bind, and will always use a new copy. If ``copy`` is ``True`` then a copy of the
provided column is used. This uses a shallow copy via :meth:`~bloop.models.Column.__copy__`.
:param model:
The model to bind the column to.
:param name:
The name to bind the column as. In effect, used for ``setattr(model, name, column)``
:param column:
The column to bind to the model.
:param force:
Unbind existing columns or indexes with the same name or dynamo_name. Default is False.
:param recursive:
Bind to each subclass of this model. Default is False.
:param copy:
Use a copy of the column instead of the column directly. Default is False.
:return:
The bound column. This is a new column when ``copy`` is True, otherwise the input column.
|
entailment
|
def bind_index(model, name, index, force=False, recursive=True, copy=False) -> Index:
"""Bind an index to the model with the given name.
This method is primarily used during BaseModel.__init_subclass__, although it can be used to easily
attach a new index to an existing model:
.. code-block:: python
import bloop.models
class User(BaseModel):
id = Column(String, hash_key=True)
email = Column(String, dynamo_name="e")
by_email = GlobalSecondaryIndex(projection="keys", hash_key="email")
bound = bloop.models.bind_index(User, "by_email", by_email)
assert bound is by_email
# rebind with force, and use a copy
bound = bloop.models.bind_index(User, "by_email", by_email, force=True, copy=True)
assert bound is not by_email
If ``name`` or the index's ``dynamo_name`` conflicts with an existing column or index on the model, raises
:exc:`~bloop.exceptions.InvalidModel` unless ``force`` is True. If ``recursive`` is ``True`` and there are
existing subclasses of ``model``, a copy of the index will attempt to bind to each subclass. The recursive
calls will not force the bind, and will always use a new copy. If ``copy`` is ``True`` then a copy of the
provided index is used. This uses a shallow copy via :meth:`~bloop.models.Index.__copy__`.
:param model:
The model to bind the index to.
:param name:
The name to bind the index as. In effect, used for ``setattr(model, name, index)``
:param index:
The index to bind to the model.
:param force:
Unbind existing columns or indexes with the same name or dynamo_name. Default is False.
:param recursive:
Bind to each subclass of this model. Default is False.
:param copy:
Use a copy of the index instead of the index directly. Default is False.
:return:
The bound index. This is a new column when ``copy`` is True, otherwise the input index.
"""
if not subclassof(model, BaseModel):
raise InvalidModel(f"{model} is not a subclass of BaseModel")
meta = model.Meta
if copy:
index = copyfn(index)
# TODO elif index.model is not None: logger.warning(f"Trying to rebind index bound to {index.model}")
index._name = name
safe_repr = unbound_repr(index)
# Guard against name, dynamo_name collisions; if force=True, unbind any matches
same_dynamo_name = (
util.index(meta.columns, "dynamo_name").get(index.dynamo_name) or
util.index(meta.indexes, "dynamo_name").get(index.dynamo_name)
)
same_name = (
meta.columns_by_name.get(index.name) or
util.index(meta.indexes, "name").get(index.name)
)
if isinstance(index, LocalSecondaryIndex) and not meta.range_key:
raise InvalidModel("An LSI requires the Model to have a range key.")
if force:
if same_name:
unbind(meta, name=index.name)
if same_dynamo_name:
unbind(meta, dynamo_name=index.dynamo_name)
else:
if same_name:
raise InvalidModel(
f"The index {safe_repr} has the same name as an existing index "
f"or column {same_name}. Did you mean to bind with force=True?")
if same_dynamo_name:
raise InvalidModel(
f"The index {safe_repr} has the same dynamo_name as an existing "
f"index or column {same_name}. Did you mean to bind with force=True?")
# success!
# --------------------------------
index.model = meta.model
meta.indexes.add(index)
setattr(meta.model, name, index)
if isinstance(index, LocalSecondaryIndex):
meta.lsis.add(index)
if isinstance(index, GlobalSecondaryIndex):
meta.gsis.add(index)
try:
refresh_index(meta, index)
except KeyError as e:
raise InvalidModel("Index expected a hash or range key that does not exist") from e
if recursive:
for subclass in util.walk_subclasses(meta.model):
try:
bind_index(subclass, name, index, force=False, recursive=False, copy=True)
except InvalidModel:
pass
return index
|
Bind an index to the model with the given name.
This method is primarily used during BaseModel.__init_subclass__, although it can be used to easily
attach a new index to an existing model:
.. code-block:: python
import bloop.models
class User(BaseModel):
id = Column(String, hash_key=True)
email = Column(String, dynamo_name="e")
by_email = GlobalSecondaryIndex(projection="keys", hash_key="email")
bound = bloop.models.bind_index(User, "by_email", by_email)
assert bound is by_email
# rebind with force, and use a copy
bound = bloop.models.bind_index(User, "by_email", by_email, force=True, copy=True)
assert bound is not by_email
If ``name`` or the index's ``dynamo_name`` conflicts with an existing column or index on the model, raises
:exc:`~bloop.exceptions.InvalidModel` unless ``force`` is True. If ``recursive`` is ``True`` and there are
existing subclasses of ``model``, a copy of the index will attempt to bind to each subclass. The recursive
calls will not force the bind, and will always use a new copy. If ``copy`` is ``True`` then a copy of the
provided index is used. This uses a shallow copy via :meth:`~bloop.models.Index.__copy__`.
:param model:
The model to bind the index to.
:param name:
The name to bind the index as. In effect, used for ``setattr(model, name, index)``
:param index:
The index to bind to the model.
:param force:
Unbind existing columns or indexes with the same name or dynamo_name. Default is False.
:param recursive:
Bind to each subclass of this model. Default is False.
:param copy:
Use a copy of the index instead of the index directly. Default is False.
:return:
The bound index. This is a new column when ``copy`` is True, otherwise the input index.
|
entailment
|
def refresh_index(meta, index) -> None:
"""Recalculate the projection, hash_key, and range_key for the given index.
:param meta: model.Meta to find columns by name
:param index: The index to refresh
"""
# All projections include model + index keys
projection_keys = set.union(meta.keys, index.keys)
proj = index.projection
mode = proj["mode"]
if mode == "keys":
proj["included"] = projection_keys
elif mode == "all":
proj["included"] = meta.columns
elif mode == "include": # pragma: no branch
if all(isinstance(p, str) for p in proj["included"]):
proj["included"] = set(meta.columns_by_name[n] for n in proj["included"])
else:
proj["included"] = set(proj["included"])
proj["included"].update(projection_keys)
if proj["strict"]:
proj["available"] = proj["included"]
else:
proj["available"] = meta.columns
|
Recalculate the projection, hash_key, and range_key for the given index.
:param meta: model.Meta to find columns by name
:param index: The index to refresh
|
entailment
|
def unbind(meta, name=None, dynamo_name=None) -> None:
"""Unconditionally remove any columns or indexes bound to the given name or dynamo_name.
.. code-block:: python
import bloop.models
class User(BaseModel):
id = Column(String, hash_key=True)
email = Column(String, dynamo_name="e")
by_email = GlobalSecondaryIndex(projection="keys", hash_key=email)
for dynamo_name in ("id", "e", "by_email"):
bloop.models.unbind(User.Meta, dynamo_name=dynamo_name)
assert not User.Meta.columns
assert not User.Meta.indexes
assert not User.Meta.keys
.. warning::
This method does not pre- or post- validate the model with the requested changes. You are responsible
for ensuring the model still has a hash key, that required columns exist for each index, etc.
:param meta: model.Meta to remove the columns or indexes from
:param name: column or index name to unbind by. Default is None.
:param dynamo_name: column or index name to unbind by. Default is None.
"""
if name is not None:
columns = {x for x in meta.columns if x.name == name}
indexes = {x for x in meta.indexes if x.name == name}
elif dynamo_name is not None:
columns = {x for x in meta.columns if x.dynamo_name == dynamo_name}
indexes = {x for x in meta.indexes if x.dynamo_name == dynamo_name}
else:
raise RuntimeError("Must provide name= or dynamo_name= to unbind from meta")
# Nothing in bloop should allow name or dynamo_name
# collisions to exist, so this is either a bug or
# the user manually hacked up meta.
assert len(columns) <= 1
assert len(indexes) <= 1
assert not (columns and indexes)
if columns:
[column] = columns
meta.columns.remove(column)
# If these don't line up, there's likely a bug in bloop
# or the user manually hacked up columns_by_name
expect_same = meta.columns_by_name[column.name]
assert expect_same is column
meta.columns_by_name.pop(column.name)
if column in meta.keys:
meta.keys.remove(column)
if meta.hash_key is column:
meta.hash_key = None
if meta.range_key is column:
meta.range_key = None
delattr(meta.model, column.name)
if indexes:
[index] = indexes
meta.indexes.remove(index)
if index in meta.gsis:
meta.gsis.remove(index)
if index in meta.lsis:
meta.lsis.remove(index)
delattr(meta.model, index.name)
|
Unconditionally remove any columns or indexes bound to the given name or dynamo_name.
.. code-block:: python
import bloop.models
class User(BaseModel):
id = Column(String, hash_key=True)
email = Column(String, dynamo_name="e")
by_email = GlobalSecondaryIndex(projection="keys", hash_key=email)
for dynamo_name in ("id", "e", "by_email"):
bloop.models.unbind(User.Meta, dynamo_name=dynamo_name)
assert not User.Meta.columns
assert not User.Meta.indexes
assert not User.Meta.keys
.. warning::
This method does not pre- or post- validate the model with the requested changes. You are responsible
for ensuring the model still has a hash key, that required columns exist for each index, etc.
:param meta: model.Meta to remove the columns or indexes from
:param name: column or index name to unbind by. Default is None.
:param dynamo_name: column or index name to unbind by. Default is None.
|
entailment
|
def _load(cls, attrs, *, context, **kwargs):
""" dict (dynamo name) -> obj """
return unpack_from_dynamodb(
model=cls,
attrs=attrs or {},
expected=cls.Meta.columns,
context=context, **kwargs)
|
dict (dynamo name) -> obj
|
entailment
|
def _dump(cls, obj, *, context, **kwargs):
""" obj -> dict """
if obj is None:
return None
dump = context["engine"]._dump
filtered = filter(
lambda item: item[1] is not None,
((
column.dynamo_name,
dump(column.typedef, getattr(obj, column.name, None), context=context, **kwargs)
) for column in cls.Meta.columns))
return dict(filtered) or None
|
obj -> dict
|
entailment
|
def is_valid_superset(actual_projection, index):
"""Returns True if the actual index is a valid superset of the expected index"""
projection_type = actual_projection["ProjectionType"]
if projection_type == "ALL":
return True
meta = index.model.Meta
# all index types provide index keys and model keys
provides = set.union(meta.keys, index.keys)
if projection_type == "KEYS_ONLY":
pass
elif projection_type == "INCLUDE": # pragma: no branch (unknown projections break loud)
by_dynamo_name = {column.dynamo_name: column for column in meta.columns}
provides.update(
by_dynamo_name[name]
for name in actual_projection["NonKeyAttributes"]
if name in by_dynamo_name # ignore columns the projection provides if the model doesn't care about them
)
else:
logger.info(f"unexpected index ProjectionType '{projection_type}'")
return False
expects = index.projection["included"]
return provides.issuperset(expects)
|
Returns True if the actual index is a valid superset of the expected index
|
entailment
|
def save_item(self, item):
"""Save an object to DynamoDB.
:param item: Unpacked into kwargs for :func:`boto3.DynamoDB.Client.update_item`.
:raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met.
"""
try:
self.dynamodb_client.update_item(**item)
except botocore.exceptions.ClientError as error:
handle_constraint_violation(error)
|
Save an object to DynamoDB.
:param item: Unpacked into kwargs for :func:`boto3.DynamoDB.Client.update_item`.
:raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met.
|
entailment
|
def delete_item(self, item):
"""Delete an object in DynamoDB.
:param item: Unpacked into kwargs for :func:`boto3.DynamoDB.Client.delete_item`.
:raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met.
"""
try:
self.dynamodb_client.delete_item(**item)
except botocore.exceptions.ClientError as error:
handle_constraint_violation(error)
|
Delete an object in DynamoDB.
:param item: Unpacked into kwargs for :func:`boto3.DynamoDB.Client.delete_item`.
:raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met.
|
entailment
|
def load_items(self, items):
"""Loads any number of items in chunks, handling continuation tokens.
:param items: Unpacked in chunks into "RequestItems" for :func:`boto3.DynamoDB.Client.batch_get_item`.
"""
loaded_items = {}
requests = collections.deque(create_batch_get_chunks(items))
while requests:
request = requests.pop()
try:
response = self.dynamodb_client.batch_get_item(RequestItems=request)
except botocore.exceptions.ClientError as error:
raise BloopException("Unexpected error while loading items.") from error
# Accumulate results
for table_name, table_items in response.get("Responses", {}).items():
loaded_items.setdefault(table_name, []).extend(table_items)
# Push additional request onto the deque.
# "UnprocessedKeys" is {} if this request is done
if response["UnprocessedKeys"]:
requests.append(response["UnprocessedKeys"])
return loaded_items
|
Loads any number of items in chunks, handling continuation tokens.
:param items: Unpacked in chunks into "RequestItems" for :func:`boto3.DynamoDB.Client.batch_get_item`.
|
entailment
|
def search_items(self, mode, request):
"""Invoke query/scan by name.
Response always includes "Count" and "ScannedCount"
:param str mode: "query" or "scan"
:param request: Unpacked into :func:`boto3.DynamoDB.Client.query` or :func:`boto3.DynamoDB.Client.scan`
"""
validate_search_mode(mode)
method = getattr(self.dynamodb_client, mode)
try:
response = method(**request)
except botocore.exceptions.ClientError as error:
raise BloopException("Unexpected error during {}.".format(mode)) from error
standardize_query_response(response)
return response
|
Invoke query/scan by name.
Response always includes "Count" and "ScannedCount"
:param str mode: "query" or "scan"
:param request: Unpacked into :func:`boto3.DynamoDB.Client.query` or :func:`boto3.DynamoDB.Client.scan`
|
entailment
|
def create_table(self, table_name, model):
"""Create the model's table. Returns True if the table is being created, False otherwise.
Does not wait for the table to create, and does not validate an existing table.
Will not raise "ResourceInUseException" if the table exists or is being created.
:param str table_name: The name of the table to create for the model.
:param model: The :class:`~bloop.models.BaseModel` to create the table for.
:return: True if the table is being created, False if the table exists
:rtype: bool
"""
table = create_table_request(table_name, model)
try:
self.dynamodb_client.create_table(**table)
is_creating = True
except botocore.exceptions.ClientError as error:
handle_table_exists(error, model)
is_creating = False
return is_creating
|
Create the model's table. Returns True if the table is being created, False otherwise.
Does not wait for the table to create, and does not validate an existing table.
Will not raise "ResourceInUseException" if the table exists or is being created.
:param str table_name: The name of the table to create for the model.
:param model: The :class:`~bloop.models.BaseModel` to create the table for.
:return: True if the table is being created, False if the table exists
:rtype: bool
|
entailment
|
def describe_table(self, table_name):
"""
Polls until the table is ready, then returns the first result when the table was ready.
The returned dict is standardized to ensure all fields are present, even when empty or across different
DynamoDB API versions.
TTL information is also inserted.
:param table_name: The name of the table to describe
:return: The (sanitized) result of DescribeTable["Table"]
:rtype: dict
"""
if table_name in self._tables:
return self._tables[table_name]
status, description = None, {}
calls = 0
while status is not ready:
calls += 1
try:
description = self.dynamodb_client.describe_table(TableName=table_name)["Table"]
except botocore.exceptions.ClientError as error:
raise BloopException("Unexpected error while describing table.") from error
status = simple_table_status(description)
logger.debug("describe_table: table \"{}\" was in ACTIVE state after {} calls".format(table_name, calls))
try:
ttl = self.dynamodb_client.describe_time_to_live(TableName=table_name)
except botocore.exceptions.ClientError as error:
raise BloopException("Unexpected error while describing ttl.") from error
try:
backups = self.dynamodb_client.describe_continuous_backups(TableName=table_name)
except botocore.exceptions.ClientError as error:
raise BloopException("Unexpected error while describing continuous backups.") from error
description["TimeToLiveDescription"] = {
"AttributeName": _read_field(ttl, None, "TimeToLiveDescription", "AttributeName"),
"TimeToLiveStatus": _read_field(ttl, None, "TimeToLiveDescription", "TimeToLiveStatus"),
}
description["ContinuousBackupsDescription"] = {
"ContinuousBackupsStatus": _read_field(
backups, None, "ContinuousBackupsDescription", "ContinuousBackupsStatus"),
}
table = self._tables[table_name] = sanitize_table_description(description)
return table
|
Polls until the table is ready, then returns the first result when the table was ready.
The returned dict is standardized to ensure all fields are present, even when empty or across different
DynamoDB API versions.
TTL information is also inserted.
:param table_name: The name of the table to describe
:return: The (sanitized) result of DescribeTable["Table"]
:rtype: dict
|
entailment
|
def validate_table(self, table_name, model):
"""Polls until a creating table is ready, then verifies the description against the model's requirements.
The model may have a subset of all GSIs and LSIs on the table, but the key structure must be exactly
the same. The table must have a stream if the model expects one, but not the other way around. When read or
write units are not specified for the model or any GSI, the existing values will always pass validation.
:param str table_name: The name of the table to validate the model against.
:param model: The :class:`~bloop.models.BaseModel` to validate the table of.
:raises bloop.exceptions.TableMismatch: When the table does not meet the constraints of the model.
"""
actual = self.describe_table(table_name)
if not compare_tables(model, actual):
raise TableMismatch("The expected and actual tables for {!r} do not match.".format(model.__name__))
# Fill in values that Meta doesn't know ahead of time (such as arns).
# These won't be populated unless Meta explicitly cares about the value
if model.Meta.stream:
stream_arn = model.Meta.stream["arn"] = actual["LatestStreamArn"]
logger.debug(f"Set {model.__name__}.Meta.stream['arn'] to '{stream_arn}' from DescribeTable response")
if model.Meta.ttl:
ttl_enabled = actual["TimeToLiveDescription"]["TimeToLiveStatus"].lower() == "enabled"
model.Meta.ttl["enabled"] = ttl_enabled
logger.debug(f"Set {model.__name__}.Meta.ttl['enabled'] to '{ttl_enabled}' from DescribeTable response")
# Fill in meta values that the table didn't care about (eg. billing=None)
if model.Meta.encryption is None:
sse_enabled = actual["SSEDescription"]["Status"].lower() == "enabled"
model.Meta.encryption = {"enabled": sse_enabled}
logger.debug(
f"Set {model.__name__}.Meta.encryption['enabled'] to '{sse_enabled}' from DescribeTable response")
if model.Meta.backups is None:
backups = actual["ContinuousBackupsDescription"]["ContinuousBackupsStatus"] == "ENABLED"
model.Meta.backups = {"enabled": backups}
logger.debug(f"Set {model.__name__}.Meta.backups['enabled'] to '{backups}' from DescribeTable response")
if model.Meta.billing is None:
billing_mode = {
"PAY_PER_REQUEST": "on_demand",
"PROVISIONED": "provisioned"
}[actual["BillingModeSummary"]["BillingMode"]]
model.Meta.billing = {"mode": billing_mode}
logger.debug(f"Set {model.__name__}.Meta.billing['mode'] to '{billing_mode}' from DescribeTable response")
if model.Meta.read_units is None:
read_units = model.Meta.read_units = actual["ProvisionedThroughput"]["ReadCapacityUnits"]
logger.debug(
f"Set {model.__name__}.Meta.read_units to {read_units} from DescribeTable response")
if model.Meta.write_units is None:
write_units = model.Meta.write_units = actual["ProvisionedThroughput"]["WriteCapacityUnits"]
logger.debug(
f"Set {model.__name__}.Meta.write_units to {write_units} from DescribeTable response")
# Replace any ``None`` values for read_units, write_units in GSIs with their actual values
gsis = {index["IndexName"]: index for index in actual["GlobalSecondaryIndexes"]}
for index in model.Meta.gsis:
read_units = gsis[index.dynamo_name]["ProvisionedThroughput"]["ReadCapacityUnits"]
write_units = gsis[index.dynamo_name]["ProvisionedThroughput"]["WriteCapacityUnits"]
if index.read_units is None:
index.read_units = read_units
logger.debug(
f"Set {model.__name__}.{index.name}.read_units to {read_units} from DescribeTable response")
if index.write_units is None:
index.write_units = write_units
logger.debug(
f"Set {model.__name__}.{index.name}.write_units to {write_units} from DescribeTable response")
|
Polls until a creating table is ready, then verifies the description against the model's requirements.
The model may have a subset of all GSIs and LSIs on the table, but the key structure must be exactly
the same. The table must have a stream if the model expects one, but not the other way around. When read or
write units are not specified for the model or any GSI, the existing values will always pass validation.
:param str table_name: The name of the table to validate the model against.
:param model: The :class:`~bloop.models.BaseModel` to validate the table of.
:raises bloop.exceptions.TableMismatch: When the table does not meet the constraints of the model.
|
entailment
|
def enable_ttl(self, table_name, model):
"""Calls UpdateTimeToLive on the table according to model.Meta["ttl"]
:param table_name: The name of the table to enable the TTL setting on
:param model: The model to get TTL settings from
"""
self._tables.pop(table_name, None)
ttl_name = model.Meta.ttl["column"].dynamo_name
request = {
"TableName": table_name,
"TimeToLiveSpecification": {"AttributeName": ttl_name, "Enabled": True}
}
try:
self.dynamodb_client.update_time_to_live(**request)
except botocore.exceptions.ClientError as error:
raise BloopException("Unexpected error while setting TTL.") from error
|
Calls UpdateTimeToLive on the table according to model.Meta["ttl"]
:param table_name: The name of the table to enable the TTL setting on
:param model: The model to get TTL settings from
|
entailment
|
def enable_backups(self, table_name, model):
"""Calls UpdateContinuousBackups on the table according to model.Meta["continuous_backups"]
:param table_name: The name of the table to enable Continuous Backups on
:param model: The model to get Continuous Backups settings from
"""
self._tables.pop(table_name, None)
request = {
"TableName": table_name,
"PointInTimeRecoverySpecification": {"PointInTimeRecoveryEnabled": True}
}
try:
self.dynamodb_client.update_continuous_backups(**request)
except botocore.exceptions.ClientError as error:
raise BloopException("Unexpected error while setting Continuous Backups.") from error
|
Calls UpdateContinuousBackups on the table according to model.Meta["continuous_backups"]
:param table_name: The name of the table to enable Continuous Backups on
:param model: The model to get Continuous Backups settings from
|
entailment
|
def describe_stream(self, stream_arn, first_shard=None):
"""Wraps :func:`boto3.DynamoDBStreams.Client.describe_stream`, handling continuation tokens.
:param str stream_arn: Stream arn, usually from the model's ``Meta.stream["arn"]``.
:param str first_shard: *(Optional)* If provided, only shards after this shard id will be returned.
:return: All shards in the stream, or a subset if ``first_shard`` is provided.
:rtype: dict
"""
description = {"Shards": []}
request = {"StreamArn": stream_arn, "ExclusiveStartShardId": first_shard}
# boto3 isn't down with literal Nones.
if first_shard is None:
request.pop("ExclusiveStartShardId")
while request.get("ExclusiveStartShardId") is not missing:
try:
response = self.stream_client.describe_stream(**request)["StreamDescription"]
except botocore.exceptions.ClientError as error:
if error.response["Error"]["Code"] == "ResourceNotFoundException":
raise InvalidStream(f"The stream arn {stream_arn!r} does not exist.") from error
raise BloopException("Unexpected error while describing stream.") from error
# Docs aren't clear if the terminal value is null, or won't exist.
# Since we don't terminate the loop on None, the "or missing" here
# will ensure we stop on a falsey value.
request["ExclusiveStartShardId"] = response.pop("LastEvaluatedShardId", None) or missing
description["Shards"].extend(response.pop("Shards", []))
description.update(response)
return description
|
Wraps :func:`boto3.DynamoDBStreams.Client.describe_stream`, handling continuation tokens.
:param str stream_arn: Stream arn, usually from the model's ``Meta.stream["arn"]``.
:param str first_shard: *(Optional)* If provided, only shards after this shard id will be returned.
:return: All shards in the stream, or a subset if ``first_shard`` is provided.
:rtype: dict
|
entailment
|
def get_shard_iterator(self, *, stream_arn, shard_id, iterator_type, sequence_number=None):
"""Wraps :func:`boto3.DynamoDBStreams.Client.get_shard_iterator`.
:param str stream_arn: Stream arn. Usually :data:`Shard.stream_arn <bloop.stream.shard.Shard.stream_arn>`.
:param str shard_id: Shard identifier. Usually :data:`Shard.shard_id <bloop.stream.shard.Shard.shard_id>`.
:param str iterator_type: "sequence_at", "sequence_after", "trim_horizon", or "latest"
:param sequence_number:
:return: Iterator id, valid for 15 minutes.
:rtype: str
:raises bloop.exceptions.RecordsExpired: Tried to get an iterator beyond the Trim Horizon.
"""
real_iterator_type = validate_stream_iterator_type(iterator_type)
request = {
"StreamArn": stream_arn,
"ShardId": shard_id,
"ShardIteratorType": real_iterator_type,
"SequenceNumber": sequence_number
}
# boto3 isn't down with literal Nones.
if sequence_number is None:
request.pop("SequenceNumber")
try:
return self.stream_client.get_shard_iterator(**request)["ShardIterator"]
except botocore.exceptions.ClientError as error:
if error.response["Error"]["Code"] == "TrimmedDataAccessException":
raise RecordsExpired from error
raise BloopException("Unexpected error while creating shard iterator") from error
|
Wraps :func:`boto3.DynamoDBStreams.Client.get_shard_iterator`.
:param str stream_arn: Stream arn. Usually :data:`Shard.stream_arn <bloop.stream.shard.Shard.stream_arn>`.
:param str shard_id: Shard identifier. Usually :data:`Shard.shard_id <bloop.stream.shard.Shard.shard_id>`.
:param str iterator_type: "sequence_at", "sequence_after", "trim_horizon", or "latest"
:param sequence_number:
:return: Iterator id, valid for 15 minutes.
:rtype: str
:raises bloop.exceptions.RecordsExpired: Tried to get an iterator beyond the Trim Horizon.
|
entailment
|
def get_stream_records(self, iterator_id):
"""Wraps :func:`boto3.DynamoDBStreams.Client.get_records`.
:param iterator_id: Iterator id. Usually :data:`Shard.iterator_id <bloop.stream.shard.Shard.iterator_id>`.
:return: Dict with "Records" list (may be empty) and "NextShardIterator" str (may not exist).
:rtype: dict
:raises bloop.exceptions.RecordsExpired: The iterator moved beyond the Trim Horizon since it was created.
:raises bloop.exceptions.ShardIteratorExpired: The iterator was created more than 15 minutes ago.
"""
try:
return self.stream_client.get_records(ShardIterator=iterator_id)
except botocore.exceptions.ClientError as error:
if error.response["Error"]["Code"] == "TrimmedDataAccessException":
raise RecordsExpired from error
elif error.response["Error"]["Code"] == "ExpiredIteratorException":
raise ShardIteratorExpired from error
raise BloopException("Unexpected error while getting records.") from error
|
Wraps :func:`boto3.DynamoDBStreams.Client.get_records`.
:param iterator_id: Iterator id. Usually :data:`Shard.iterator_id <bloop.stream.shard.Shard.iterator_id>`.
:return: Dict with "Records" list (may be empty) and "NextShardIterator" str (may not exist).
:rtype: dict
:raises bloop.exceptions.RecordsExpired: The iterator moved beyond the Trim Horizon since it was created.
:raises bloop.exceptions.ShardIteratorExpired: The iterator was created more than 15 minutes ago.
|
entailment
|
def transaction_read(self, items):
"""
Wraps :func:`boto3.DynamoDB.Client.db.transact_get_items`.
:param items: Unpacked into "TransactionItems" for :func:`boto3.DynamoDB.Client.transact_get_items`
:raises bloop.exceptions.TransactionCanceled: if the transaction was canceled.
:return: Dict with "Records" list
"""
try:
return self.dynamodb_client.transact_get_items(TransactItems=items)
except botocore.exceptions.ClientError as error:
if error.response["Error"]["Code"] == "TransactionCanceledException":
raise TransactionCanceled from error
raise BloopException("Unexpected error during transaction read.") from error
|
Wraps :func:`boto3.DynamoDB.Client.db.transact_get_items`.
:param items: Unpacked into "TransactionItems" for :func:`boto3.DynamoDB.Client.transact_get_items`
:raises bloop.exceptions.TransactionCanceled: if the transaction was canceled.
:return: Dict with "Records" list
|
entailment
|
def transaction_write(self, items, client_request_token):
"""
Wraps :func:`boto3.DynamoDB.Client.db.transact_write_items`.
:param items: Unpacked into "TransactionItems" for :func:`boto3.DynamoDB.Client.transact_write_items`
:param client_request_token: Idempotency token valid for 10 minutes from first use.
Unpacked into "ClientRequestToken"
:raises bloop.exceptions.TransactionCanceled: if the transaction was canceled.
"""
try:
self.dynamodb_client.transact_write_items(
TransactItems=items,
ClientRequestToken=client_request_token
)
except botocore.exceptions.ClientError as error:
if error.response["Error"]["Code"] == "TransactionCanceledException":
raise TransactionCanceled from error
raise BloopException("Unexpected error during transaction write.") from error
|
Wraps :func:`boto3.DynamoDB.Client.db.transact_write_items`.
:param items: Unpacked into "TransactionItems" for :func:`boto3.DynamoDB.Client.transact_write_items`
:param client_request_token: Idempotency token valid for 10 minutes from first use.
Unpacked into "ClientRequestToken"
:raises bloop.exceptions.TransactionCanceled: if the transaction was canceled.
|
entailment
|
def check_hash_key(query_on, key):
"""Only allows == against query_on.hash_key"""
return (
isinstance(key, BaseCondition) and
(key.operation == "==") and
(key.column is query_on.hash_key)
)
|
Only allows == against query_on.hash_key
|
entailment
|
def check_range_key(query_on, key):
"""BeginsWith, Between, or any Comparison except '!=' against query_on.range_key"""
return (
isinstance(key, BaseCondition) and
key.operation in ("begins_with", "between", "<", ">", "<=", ">=", "==") and
key.column is query_on.range_key
)
|
BeginsWith, Between, or any Comparison except '!=' against query_on.range_key
|
entailment
|
def prepare(self):
"""Constructs a :class:`~bloop.search.PreparedSearch`."""
p = PreparedSearch()
p.prepare(
engine=self.engine,
mode=self.mode,
model=self.model,
index=self.index,
key=self.key,
filter=self.filter,
projection=self.projection,
consistent=self.consistent,
forward=self.forward,
parallel=self.parallel
)
return p
|
Constructs a :class:`~bloop.search.PreparedSearch`.
|
entailment
|
def prepare(
self, engine=None, mode=None, model=None, index=None, key=None,
filter=None, projection=None, consistent=None, forward=None, parallel=None):
"""Validates the search parameters and builds the base request dict for each Query/Scan call."""
self.prepare_iterator_cls(engine, mode)
self.prepare_model(model, index, consistent)
self.prepare_key(key)
self.prepare_projection(projection)
self.prepare_filter(filter)
self.prepare_constraints(forward, parallel)
self.prepare_request()
|
Validates the search parameters and builds the base request dict for each Query/Scan call.
|
entailment
|
def count(self):
"""Number of items that have been loaded from DynamoDB so far, including buffered items."""
if self.request["Select"] == "COUNT":
while not self.exhausted:
next(self, None)
return self._count
|
Number of items that have been loaded from DynamoDB so far, including buffered items.
|
entailment
|
def scanned(self):
"""Number of items that DynamoDB evaluated, before any filter was applied."""
if self.request["Select"] == "COUNT":
while not self.exhausted:
next(self, None)
return self._scanned
|
Number of items that DynamoDB evaluated, before any filter was applied.
|
entailment
|
def first(self):
"""Return the first result. If there are no results, raises :exc:`~bloop.exceptions.ConstraintViolation`.
:return: The first result.
:raises bloop.exceptions.ConstraintViolation: No results.
"""
self.reset()
value = next(self, None)
if value is None:
raise ConstraintViolation("{} did not find any results.".format(self.mode.capitalize()))
return value
|
Return the first result. If there are no results, raises :exc:`~bloop.exceptions.ConstraintViolation`.
:return: The first result.
:raises bloop.exceptions.ConstraintViolation: No results.
|
entailment
|
def one(self):
"""Return the unique result. If there is not exactly one result,
raises :exc:`~bloop.exceptions.ConstraintViolation`.
:return: The unique result.
:raises bloop.exceptions.ConstraintViolation: Not exactly one result.
"""
first = self.first()
second = next(self, None)
if second is not None:
raise ConstraintViolation("{} found more than one result.".format(self.mode.capitalize()))
return first
|
Return the unique result. If there is not exactly one result,
raises :exc:`~bloop.exceptions.ConstraintViolation`.
:return: The unique result.
:raises bloop.exceptions.ConstraintViolation: Not exactly one result.
|
entailment
|
def reset(self):
"""Reset to the initial state, clearing the buffer and zeroing count and scanned."""
self.buffer.clear()
self._count = 0
self._scanned = 0
self._exhausted = False
self.request.pop("ExclusiveStartKey", None)
|
Reset to the initial state, clearing the buffer and zeroing count and scanned.
|
entailment
|
def by_alias(cls, name: str) -> "TxType":
"""get a type by the common bloop operation name: get/check/delete/save"""
return {
"get": TxType.Get,
"check": TxType.Check,
"delete": TxType.Delete,
"save": TxType.Update,
}[name]
|
get a type by the common bloop operation name: get/check/delete/save
|
entailment
|
def prepare(self):
"""
Create a new PreparedTransaction that can be committed.
This is called automatically when exiting the transaction as a context:
.. code-block:: python
>>> engine = Engine()
>>> tx = WriteTransaction(engine)
>>> prepared = tx.prepare()
>>> prepared.commit()
# automatically calls commit when exiting
>>> with WriteTransaction(engine) as tx:
... # modify the transaction here
... pass
>>> # tx commits here
:return:
"""
tx = PreparedTransaction()
tx.prepare(
engine=self.engine,
mode=self.mode,
items=self._items,
)
return tx
|
Create a new PreparedTransaction that can be committed.
This is called automatically when exiting the transaction as a context:
.. code-block:: python
>>> engine = Engine()
>>> tx = WriteTransaction(engine)
>>> prepared = tx.prepare()
>>> prepared.commit()
# automatically calls commit when exiting
>>> with WriteTransaction(engine) as tx:
... # modify the transaction here
... pass
>>> # tx commits here
:return:
|
entailment
|
def prepare(self, engine, mode, items) -> None:
"""
Create a unique transaction id and dumps the items into a cached request object.
"""
self.tx_id = str(uuid.uuid4()).replace("-", "")
self.engine = engine
self.mode = mode
self.items = items
self._prepare_request()
|
Create a unique transaction id and dumps the items into a cached request object.
|
entailment
|
def commit(self) -> None:
"""
Commit the transaction with a fixed transaction id.
A read transaction can call commit() any number of times, while a write transaction can only use the
same tx_id for 10 minutes from the first call.
"""
now = datetime.now(timezone.utc)
if self.first_commit_at is None:
self.first_commit_at = now
if self.mode == "r":
response = self.engine.session.transaction_read(self._request)
elif self.mode == "w":
if now - self.first_commit_at > MAX_TOKEN_LIFETIME:
raise TransactionTokenExpired
response = self.engine.session.transaction_write(self._request, self.tx_id)
else:
raise ValueError(f"unrecognized mode {self.mode}")
self._handle_response(response)
|
Commit the transaction with a fixed transaction id.
A read transaction can call commit() any number of times, while a write transaction can only use the
same tx_id for 10 minutes from the first call.
|
entailment
|
def load(self, *objs) -> "ReadTransaction":
"""
Add one or more objects to be loaded in this transaction.
At most 10 items can be loaded in the same transaction. All objects will be loaded each time you
call commit().
:param objs: Objects to add to the set that are loaded in this transaction.
:return: this transaction for chaining
:raises bloop.exceptions.MissingObjects: if one or more objects aren't loaded.
"""
self._extend([TxItem.new("get", obj) for obj in objs])
return self
|
Add one or more objects to be loaded in this transaction.
At most 10 items can be loaded in the same transaction. All objects will be loaded each time you
call commit().
:param objs: Objects to add to the set that are loaded in this transaction.
:return: this transaction for chaining
:raises bloop.exceptions.MissingObjects: if one or more objects aren't loaded.
|
entailment
|
def check(self, obj, condition) -> "WriteTransaction":
"""
Add a condition which must be met for the transaction to commit.
While the condition is checked against the provided object, that object will not be modified. It is only
used to provide the hash and range key to apply the condition to.
At most 10 items can be checked, saved, or deleted in the same transaction. The same idempotency token will
be used for a single prepared transaction, which allows you to safely call commit on the PreparedCommit object
multiple times.
:param obj: The object to use for the transaction condition. This object will not be modified.
:param condition: A condition on an object which must hold for the transaction to commit.
:return: this transaction for chaining
"""
self._extend([TxItem.new("check", obj, condition)])
return self
|
Add a condition which must be met for the transaction to commit.
While the condition is checked against the provided object, that object will not be modified. It is only
used to provide the hash and range key to apply the condition to.
At most 10 items can be checked, saved, or deleted in the same transaction. The same idempotency token will
be used for a single prepared transaction, which allows you to safely call commit on the PreparedCommit object
multiple times.
:param obj: The object to use for the transaction condition. This object will not be modified.
:param condition: A condition on an object which must hold for the transaction to commit.
:return: this transaction for chaining
|
entailment
|
def save(self, *objs, condition=None, atomic=False) -> "WriteTransaction":
"""
Add one or more objects to be saved in this transaction.
At most 10 items can be checked, saved, or deleted in the same transaction. The same idempotency token will
be used for a single prepared transaction, which allows you to safely call commit on the PreparedCommit object
multiple times.
:param objs: Objects to add to the set that are updated in this transaction.
:param condition: A condition for these objects which must hold for the transaction to commit.
:param bool atomic: only commit the transaction if the local and DynamoDB versions of the object match.
:return: this transaction for chaining
"""
self._extend([TxItem.new("save", obj, condition, atomic) for obj in objs])
return self
|
Add one or more objects to be saved in this transaction.
At most 10 items can be checked, saved, or deleted in the same transaction. The same idempotency token will
be used for a single prepared transaction, which allows you to safely call commit on the PreparedCommit object
multiple times.
:param objs: Objects to add to the set that are updated in this transaction.
:param condition: A condition for these objects which must hold for the transaction to commit.
:param bool atomic: only commit the transaction if the local and DynamoDB versions of the object match.
:return: this transaction for chaining
|
entailment
|
def encode(self, cube_dimensions):
"""
Produces a numpy array of integers which encode
the supplied cube dimensions.
"""
return np.asarray([getattr(cube_dimensions[d], s)
for d in self._dimensions
for s in self._schema],
dtype=np.int32)
|
Produces a numpy array of integers which encode
the supplied cube dimensions.
|
entailment
|
def decode(self, descriptor):
""" Produce a list of dictionaries for each dimension in this transcoder """
i = iter(descriptor)
n = len(self._schema)
# Add the name key to our schema
schema = self._schema + ('name',)
# For each dimensions, generator takes n items off iterator
# wrapping the descriptor, making a tuple with the dimension
# name appended
tuple_gen = (tuple(itertools.islice(i, n)) + (d, )
for d in self._dimensions)
# Generate dictionary by mapping schema keys to generated tuples
return [{ k: v for k, v in zip(schema, t) } for t in tuple_gen]
|
Produce a list of dictionaries for each dimension in this transcoder
|
entailment
|
def dl_cub(cub_url, cub_archive_name):
""" Download cub archive from cub_url and store it in cub_archive_name """
with open(cub_archive_name, 'wb') as f:
remote_file = urllib2.urlopen(cub_url)
meta = remote_file.info()
# The server may provide us with the size of the file.
cl_header = meta.getheaders("Content-Length")
remote_file_size = int(cl_header[0]) if len(cl_header) > 0 else None
# Initialise variables
local_file_size = 0
block_size = 128*1024
# Do the download
while True:
data = remote_file.read(block_size)
if not data:
break
f.write(data)
local_file_size += len(data)
if (remote_file_size is not None and
not local_file_size == remote_file_size):
log.warn("Local file size '{}' "
"does not match remote '{}'".format(
local_file_size, remote_file_size))
remote_file.close()
|
Download cub archive from cub_url and store it in cub_archive_name
|
entailment
|
def sha_hash_file(filename):
""" Compute the SHA1 hash of filename """
hash_sha = hashlib.sha1()
with open(filename, 'rb') as f:
for chunk in iter(lambda: f.read(1024*1024), b""):
hash_sha.update(chunk)
return hash_sha.hexdigest()
|
Compute the SHA1 hash of filename
|
entailment
|
def install_cub(mb_inc_path):
""" Downloads and installs cub into mb_inc_path """
cub_url = 'https://github.com/NVlabs/cub/archive/1.6.4.zip'
cub_sha_hash = '0d5659200132c2576be0b3959383fa756de6105d'
cub_version_str = 'Current release: v1.6.4 (12/06/2016)'
cub_zip_file = 'cub.zip'
cub_zip_dir = 'cub-1.6.4'
cub_unzipped_path = os.path.join(mb_inc_path, cub_zip_dir)
cub_new_unzipped_path = os.path.join(mb_inc_path, 'cub')
cub_header = os.path.join(cub_new_unzipped_path, 'cub', 'cub.cuh')
cub_readme = os.path.join(cub_new_unzipped_path, 'README.md' )
# Check for a reasonably valid install
cub_installed, _ = is_cub_installed(cub_readme, cub_header, cub_version_str)
if cub_installed:
log.info("NVIDIA cub installation found "
"at '{}'".format(cub_new_unzipped_path))
return
log.info("No NVIDIA cub installation found")
# Do we already have a valid cub zip file
have_valid_cub_file = (os.path.exists(cub_zip_file) and
os.path.isfile(cub_zip_file) and
sha_hash_file(cub_zip_file) == cub_sha_hash)
if have_valid_cub_file:
log.info("Valid NVIDIA cub archive found '{}'".format(cub_zip_file))
# Download if we don't have a valid file
else:
log.info("Downloading cub archive '{}'".format(cub_url))
dl_cub(cub_url, cub_zip_file)
cub_file_sha_hash = sha_hash_file(cub_zip_file)
# Compare against our supplied hash
if cub_sha_hash != cub_file_sha_hash:
msg = ('Hash of file %s downloaded from %s '
'is %s and does not match the expected '
'hash of %s. Please manually download '
'as per the README.md instructions.') % (
cub_zip_file, cub_url,
cub_file_sha_hash, cub_sha_hash)
raise InstallCubException(msg)
# Unzip into montblanc/include/cub
with zipfile.ZipFile(cub_zip_file, 'r') as zip_file:
# Remove any existing installs
shutil.rmtree(cub_unzipped_path, ignore_errors=True)
shutil.rmtree(cub_new_unzipped_path, ignore_errors=True)
# Unzip
zip_file.extractall(mb_inc_path)
# Rename. cub_unzipped_path is mb_inc_path/cub_zip_dir
shutil.move(cub_unzipped_path, cub_new_unzipped_path)
log.info("NVIDIA cub archive unzipped into '{}'".format(
cub_new_unzipped_path))
there, reason = is_cub_installed(cub_readme, cub_header, cub_version_str)
if not there:
raise InstallCubException(reason)
|
Downloads and installs cub into mb_inc_path
|
entailment
|
def cuda_architecture_flags(device_info):
"""
Emit a list of architecture flags for each CUDA device found
['--gpu-architecture=sm_30', '--gpu-architecture=sm_52']
"""
# Figure out the necessary device architectures
if len(device_info['devices']) == 0:
archs = ['--gpu-architecture=sm_30']
log.info("No CUDA devices found, defaulting to architecture '{}'".format(archs[0]))
else:
archs = set()
for device in device_info['devices']:
arch_str = '--gpu-architecture=sm_{}{}'.format(device['major'], device['minor'])
log.info("Using '{}' for '{}'".format(arch_str, device['name']))
archs.add(arch_str)
return list(archs)
|
Emit a list of architecture flags for each CUDA device found
['--gpu-architecture=sm_30', '--gpu-architecture=sm_52']
|
entailment
|
def create_tensorflow_extension(nvcc_settings, device_info):
""" Create an extension that builds the custom tensorflow ops """
import tensorflow as tf
import glob
use_cuda = (bool(nvcc_settings['cuda_available'])
and tf.test.is_built_with_cuda())
# Source and includes
source_path = os.path.join('montblanc', 'impl', 'rime', 'tensorflow', 'rime_ops')
sources = glob.glob(os.path.join(source_path, '*.cpp'))
# Header dependencies
depends = glob.glob(os.path.join(source_path, '*.h'))
# Include directories
tf_inc = tf.sysconfig.get_include()
include_dirs = [os.path.join('montblanc', 'include'), source_path]
include_dirs += [tf_inc, os.path.join(tf_inc, "external", "nsync", "public")]
# Libraries
library_dirs = [tf.sysconfig.get_lib()]
libraries = ['tensorflow_framework']
extra_link_args = ['-fPIC', '-fopenmp', '-g0']
# Macros
define_macros = [
('_MWAITXINTRIN_H_INCLUDED', None),
('_FORCE_INLINES', None),
('_GLIBCXX_USE_CXX11_ABI', 0)]
# Common flags
flags = ['-std=c++11']
gcc_flags = flags + ['-g0', '-fPIC', '-fopenmp', '-O2']
gcc_flags += ['-march=native', '-mtune=native']
nvcc_flags = flags + []
# Add cuda specific build information, if it is available
if use_cuda:
# CUDA source files
sources += glob.glob(os.path.join(source_path, '*.cu'))
# CUDA include directories
include_dirs += nvcc_settings['include_dirs']
# CUDA header dependencies
depends += glob.glob(os.path.join(source_path, '*.cuh'))
# CUDA libraries
library_dirs += nvcc_settings['library_dirs']
libraries += nvcc_settings['libraries']
# Flags
nvcc_flags += ['-x', 'cu']
nvcc_flags += ['--compiler-options', '"-fPIC"']
# --gpu-architecture=sm_xy flags
nvcc_flags += cuda_architecture_flags(device_info)
# Ideally this would be set in define_macros, but
# this must be set differently for gcc and nvcc
nvcc_flags += ['-DGOOGLE_CUDA=%d' % int(use_cuda)]
return Extension(tensorflow_extension_name,
sources=sources,
include_dirs=include_dirs,
depends=depends,
library_dirs=library_dirs,
libraries=libraries,
define_macros=define_macros,
# this syntax is specific to this build system
# we're only going to use certain compiler args with nvcc and not with gcc
# the implementation of this trick is in customize_compiler_for_nvcc() above
extra_compile_args={ 'gcc': gcc_flags, 'nvcc': nvcc_flags },
extra_link_args=extra_link_args,
)
|
Create an extension that builds the custom tensorflow ops
|
entailment
|
def updated_dimensions(self):
""" Inform montblanc about dimension sizes """
return [("ntime", args.ntime), # Timesteps
("nchan", args.nchan), # Channels
("na", args.na), # Antenna
("npsrc", len(lm_coords))]
|
Inform montblanc about dimension sizes
|
entailment
|
def point_lm(self, context):
""" Supply point source lm coordinates to montblanc """
# Shape (npsrc, 2)
(ls, us), _ = context.array_extents(context.name)
return np.asarray(lm_coords[ls:us], dtype=context.dtype)
|
Supply point source lm coordinates to montblanc
|
entailment
|
def point_stokes(self, context):
""" Supply point source stokes parameters to montblanc """
# Shape (npsrc, ntime, 4)
(ls, us), (lt, ut), (l, u) = context.array_extents(context.name)
data = np.empty(context.shape, context.dtype)
data[ls:us,:,l:u] = np.asarray(lm_stokes)[ls:us,None,:]
return data
|
Supply point source stokes parameters to montblanc
|
entailment
|
def uvw(self, context):
""" Supply UVW antenna coordinates to montblanc """
# Shape (ntime, na, 3)
(lt, ut), (la, ua), (l, u) = context.array_extents(context.name)
# Create empty UVW coordinates
data = np.empty(context.shape, context.dtype)
data[:,:,0] = np.arange(la+1, ua+1) # U = antenna index
data[:,:,1] = 0 # V = 0
data[:,:,2] = 0 # W = 0
return data
|
Supply UVW antenna coordinates to montblanc
|
entailment
|
def reinitialize_command(self, command, reinit_subcommands):
"""
Monkeypatch distutils.Distribution.reinitialize_command() to match behavior
of Distribution.get_command_obj()
This fixes a problem where 'pip install -e' does not reinitialise options
using the setup(options={...}) variable for the build_ext command.
This also effects other option sourcs such as setup.cfg.
"""
cmd_obj = _DISTUTILS_REINIT(self, command, reinit_subcommands)
options = self.command_options.get(command)
if options:
self._set_command_options(cmd_obj, options)
return cmd_obj
|
Monkeypatch distutils.Distribution.reinitialize_command() to match behavior
of Distribution.get_command_obj()
This fixes a problem where 'pip install -e' does not reinitialise options
using the setup(options={...}) variable for the build_ext command.
This also effects other option sourcs such as setup.cfg.
|
entailment
|
def nr_of_baselines(na, auto_correlations=False):
"""
Compute the number of baselines for the
given number of antenna. Can specify whether
auto-correlations should be taken into
account
"""
m = (na-1) if auto_correlations is False else (na+1)
return (na*m)//2
|
Compute the number of baselines for the
given number of antenna. Can specify whether
auto-correlations should be taken into
account
|
entailment
|
def nr_of_antenna(nbl, auto_correlations=False):
"""
Compute the number of antenna for the
given number of baselines. Can specify whether
auto-correlations should be taken into
account
"""
t = 1 if auto_correlations is False else -1
return int(t + math.sqrt(1 + 8*nbl)) // 2
|
Compute the number of antenna for the
given number of baselines. Can specify whether
auto-correlations should be taken into
account
|
entailment
|
def array_bytes(shape, dtype):
""" Estimates the memory in bytes required for an array of the supplied shape and dtype """
return np.product(shape)*np.dtype(dtype).itemsize
|
Estimates the memory in bytes required for an array of the supplied shape and dtype
|
entailment
|
def random_like(ary=None, shape=None, dtype=None):
"""
Returns a random array of the same shape and type as the
supplied array argument, or the supplied shape and dtype
"""
if ary is not None:
shape, dtype = ary.shape, ary.dtype
elif shape is None or dtype is None:
raise ValueError((
'random_like(ary, shape, dtype) must be supplied '
'with either an array argument, or the shape and dtype '
'of the desired random array.'))
if np.issubdtype(dtype, np.complexfloating):
return (np.random.random(size=shape) + \
np.random.random(size=shape)*1j).astype(dtype)
else:
return np.random.random(size=shape).astype(dtype)
|
Returns a random array of the same shape and type as the
supplied array argument, or the supplied shape and dtype
|
entailment
|
def flatten(nested):
""" Return a flatten version of the nested argument """
flat_return = list()
def __inner_flat(nested,flat):
for i in nested:
__inner_flat(i, flat) if isinstance(i, list) else flat.append(i)
return flat
__inner_flat(nested,flat_return)
return flat_return
|
Return a flatten version of the nested argument
|
entailment
|
def dict_array_bytes(ary, template):
"""
Return the number of bytes required by an array
Arguments
---------------
ary : dict
Dictionary representation of an array
template : dict
A dictionary of key-values, used to replace any
string values in the array with concrete integral
values
Returns
-----------
The number of bytes required to represent
the array.
"""
shape = shape_from_str_tuple(ary['shape'], template)
dtype = dtype_from_str(ary['dtype'], template)
return array_bytes(shape, dtype)
|
Return the number of bytes required by an array
Arguments
---------------
ary : dict
Dictionary representation of an array
template : dict
A dictionary of key-values, used to replace any
string values in the array with concrete integral
values
Returns
-----------
The number of bytes required to represent
the array.
|
entailment
|
def dict_array_bytes_required(arrays, template):
"""
Return the number of bytes required by
a dictionary of arrays.
Arguments
---------------
arrays : list
A list of dictionaries defining the arrays
template : dict
A dictionary of key-values, used to replace any
string values in the arrays with concrete integral
values
Returns
-----------
The number of bytes required to represent
all the arrays.
"""
return np.sum([dict_array_bytes(ary, template)
for ary in arrays])
|
Return the number of bytes required by
a dictionary of arrays.
Arguments
---------------
arrays : list
A list of dictionaries defining the arrays
template : dict
A dictionary of key-values, used to replace any
string values in the arrays with concrete integral
values
Returns
-----------
The number of bytes required to represent
all the arrays.
|
entailment
|
def viable_dim_config(bytes_available, arrays, template,
dim_ord, nsolvers=1):
"""
Returns the number of timesteps possible, given the registered arrays
and a memory budget defined by bytes_available
Arguments
----------------
bytes_available : int
The memory budget, or available number of bytes
for solving the problem.
arrays : list
List of dictionaries describing the arrays
template : dict
Dictionary containing key-values that will be used
to replace any string representations of dimensions
and types. slvr.template_dict() will return something
suitable.
dim_ord : list
list of dimension string names that the problem should be
subdivided by. e.g. ['ntime', 'nbl', 'nchan'].
Multple dimensions can be reduced simultaneously using
the following syntax 'nbl&na'. This is mostly useful for
the baseline-antenna equivalence.
nsolvers : int
Number of solvers to budget for. Defaults to one.
Returns
----------
A tuple (boolean, dict). The boolean is True if the problem
can fit within the supplied budget, False otherwise.
THe dictionary contains the reduced dimensions as key and
the reduced size as value.
e.g. (True, { 'time' : 1, 'nbl' : 1 })
For a dim_ord = ['ntime', 'nbl', 'nchan'], this method will try and fit
a ntime x nbl x nchan problem into the available number of bytes.
If this is not possible, it will first set ntime=1, and then try fit an
1 x nbl x nchan problem into the budget, then a 1 x 1 x nchan
problem.
One can specify reductions for specific dimensions.
For e.g. ['ntime=20', 'nbl=1&na=2', 'nchan=50%']
will reduce ntime to 20, but no lower. nbl=1&na=2 sets
both nbl and na to 1 and 2 in the same operation respectively.
nchan=50\% will continuously halve the nchan dimension
until it reaches a value of 1.
"""
if not isinstance(dim_ord, list):
raise TypeError('dim_ord should be a list')
# Don't accept non-negative memory budgets
if bytes_available < 0:
bytes_available = 0
modified_dims = {}
T = template.copy()
bytes_used = dict_array_bytes_required(arrays, T)*nsolvers
# While more bytes are used than are available, set
# dimensions to one in the order specified by the
# dim_ord argument.
while bytes_used > bytes_available:
try:
dims = dim_ord.pop(0)
montblanc.log.debug('Applying reduction {s}. '
'Bytes available: {a} used: {u}'.format(
s=dims,
a=fmt_bytes(bytes_available),
u=fmt_bytes(bytes_used)))
dims = dims.strip().split('&')
except IndexError:
# No more dimensions available for reducing
# the problem size. Unable to fit the problem
# within the specified memory budget
return False, modified_dims
# Can't fit everything into memory,
# Lower dimensions and re-evaluate
for dim in dims:
match = re.match(__DIM_REDUCTION_RE, dim)
if not match:
raise ValueError(
"{d} is an invalid dimension reduction string "
"Valid strings are for e.g. "
"'ntime', 'ntime=20' or 'ntime=20%'"
.format(d=dim))
dim_name = match.group('name')
dim_value = match.group('value')
dim_percent = match.group('percent')
dim_value = 1 if dim_value is None else int(dim_value)
# Attempt reduction by a percentage
if dim_percent == '%':
dim_value = int(T[dim_name] * int(dim_value) / 100.0)
if dim_value < 1:
# This can't be reduced any further
dim_value = 1
else:
# Allows another attempt at reduction
# by percentage on this dimension
dim_ord.insert(0, dim)
# Apply the dimension reduction
if T[dim_name] > dim_value:
modified_dims[dim_name] = dim_value
T[dim_name] = dim_value
else:
montblanc.log.info(('Ignored reduction of {d} '
'of size {s} to {v}. ').format(
d=dim_name, s=T[dim_name], v=dim_value))
bytes_used = dict_array_bytes_required(arrays, T)*nsolvers
return True, modified_dims
|
Returns the number of timesteps possible, given the registered arrays
and a memory budget defined by bytes_available
Arguments
----------------
bytes_available : int
The memory budget, or available number of bytes
for solving the problem.
arrays : list
List of dictionaries describing the arrays
template : dict
Dictionary containing key-values that will be used
to replace any string representations of dimensions
and types. slvr.template_dict() will return something
suitable.
dim_ord : list
list of dimension string names that the problem should be
subdivided by. e.g. ['ntime', 'nbl', 'nchan'].
Multple dimensions can be reduced simultaneously using
the following syntax 'nbl&na'. This is mostly useful for
the baseline-antenna equivalence.
nsolvers : int
Number of solvers to budget for. Defaults to one.
Returns
----------
A tuple (boolean, dict). The boolean is True if the problem
can fit within the supplied budget, False otherwise.
THe dictionary contains the reduced dimensions as key and
the reduced size as value.
e.g. (True, { 'time' : 1, 'nbl' : 1 })
For a dim_ord = ['ntime', 'nbl', 'nchan'], this method will try and fit
a ntime x nbl x nchan problem into the available number of bytes.
If this is not possible, it will first set ntime=1, and then try fit an
1 x nbl x nchan problem into the budget, then a 1 x 1 x nchan
problem.
One can specify reductions for specific dimensions.
For e.g. ['ntime=20', 'nbl=1&na=2', 'nchan=50%']
will reduce ntime to 20, but no lower. nbl=1&na=2 sets
both nbl and na to 1 and 2 in the same operation respectively.
nchan=50\% will continuously halve the nchan dimension
until it reaches a value of 1.
|
entailment
|
def shape_from_str_tuple(sshape, variables, ignore=None):
"""
Substitutes string values in the supplied shape parameter
with integer variables stored in a dictionary
Parameters
----------
sshape : tuple/string composed of integers and strings.
The strings should related to integral properties
registered with this Solver object
variables : dictionary
Keys with associated integer values. Used to replace
string values within the tuple
ignore : list
A list of tuple strings to ignore
>>> print self.shape_from_str_tuple((4,'na','ntime'),ignore=['ntime'])
(4, 3)
"""
if ignore is None: ignore = []
if not isinstance(sshape, tuple) and not isinstance(sshape, list):
raise TypeError, 'sshape argument must be a tuple or list'
if not isinstance(ignore, list):
raise TypeError, 'ignore argument must be a list'
return tuple([int(eval_expr(v,variables)) if isinstance(v,str) else int(v)
for v in sshape if v not in ignore])
|
Substitutes string values in the supplied shape parameter
with integer variables stored in a dictionary
Parameters
----------
sshape : tuple/string composed of integers and strings.
The strings should related to integral properties
registered with this Solver object
variables : dictionary
Keys with associated integer values. Used to replace
string values within the tuple
ignore : list
A list of tuple strings to ignore
>>> print self.shape_from_str_tuple((4,'na','ntime'),ignore=['ntime'])
(4, 3)
|
entailment
|
def shape_list(l,shape,dtype):
""" Shape a list of lists into the appropriate shape and data type """
return np.array(l, dtype=dtype).reshape(shape)
|
Shape a list of lists into the appropriate shape and data type
|
entailment
|
def array_convert_function(sshape_one, sshape_two, variables):
""" Return a function defining the conversion process between two NumPy
arrays of different shapes """
if not isinstance(sshape_one, tuple): sshape_one = (sshape_one,)
if not isinstance(sshape_two, tuple): sshape_two = (sshape_two,)
s_one = flatten([eval_expr_names_and_nrs(d) if isinstance(d,str) else d
for d in sshape_one])
s_two = flatten([eval_expr_names_and_nrs(d) if isinstance(d,str) else d
for d in sshape_two])
if len(s_one) != len(s_two):
raise ValueError, ('Flattened shapes %s and %s '\
'do not have the same length. '
'Original shapes were %s and %s') % \
(s_one, s_two, sshape_one, sshape_two)
# Reason about the transpose
t_idx = tuple([s_one.index(v) for v in s_two])
# Figure out the actual numeric shape values to use
n_one = shape_from_str_tuple(s_one, variables)
n_two = [eval_expr(d,variables)
if isinstance(d,str) else d for d in sshape_two]
def f(ary): return np.reshape(ary, n_one).transpose(t_idx).reshape(n_two)
return f
|
Return a function defining the conversion process between two NumPy
arrays of different shapes
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.