repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
numberoverzero/bloop
bloop/conditions.py
ReferenceTracker._value_ref
def _value_ref(self, column, value, *, dumped=False, inner=False): """inner=True uses column.typedef.inner_type instead of column.typedef""" ref = ":v{}".format(self.next_index) # Need to dump this value if not dumped: typedef = column.typedef for segment in path_of(column): typedef = typedef[segment] if inner: typedef = typedef.inner_typedef value = self.engine._dump(typedef, value) self.attr_values[ref] = value self.counts[ref] += 1 return ref, value
python
def _value_ref(self, column, value, *, dumped=False, inner=False): """inner=True uses column.typedef.inner_type instead of column.typedef""" ref = ":v{}".format(self.next_index) # Need to dump this value if not dumped: typedef = column.typedef for segment in path_of(column): typedef = typedef[segment] if inner: typedef = typedef.inner_typedef value = self.engine._dump(typedef, value) self.attr_values[ref] = value self.counts[ref] += 1 return ref, value
[ "def", "_value_ref", "(", "self", ",", "column", ",", "value", ",", "*", ",", "dumped", "=", "False", ",", "inner", "=", "False", ")", ":", "ref", "=", "\":v{}\"", ".", "format", "(", "self", ".", "next_index", ")", "# Need to dump this value", "if", "not", "dumped", ":", "typedef", "=", "column", ".", "typedef", "for", "segment", "in", "path_of", "(", "column", ")", ":", "typedef", "=", "typedef", "[", "segment", "]", "if", "inner", ":", "typedef", "=", "typedef", ".", "inner_typedef", "value", "=", "self", ".", "engine", ".", "_dump", "(", "typedef", ",", "value", ")", "self", ".", "attr_values", "[", "ref", "]", "=", "value", "self", ".", "counts", "[", "ref", "]", "+=", "1", "return", "ref", ",", "value" ]
inner=True uses column.typedef.inner_type instead of column.typedef
[ "inner", "=", "True", "uses", "column", ".", "typedef", ".", "inner_type", "instead", "of", "column", ".", "typedef" ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/conditions.py#L166-L181
numberoverzero/bloop
bloop/conditions.py
ReferenceTracker.any_ref
def any_ref(self, *, column, value=missing, dumped=False, inner=False): """Returns a NamedTuple of (name, type, value) for any type of reference. .. code-block:: python # Name ref >>> tracker.any_ref(column=User.email) Reference(name='email', type='name', value=None) # Value ref >>> tracker.any_ref(column=User.email, value='user@domain') Reference(name='email', type='value', value={'S': 'user@domain'}) # Passed as value ref, but value is another column >>> tracker.any_ref(column=User.email, value=User.other_column) Reference(name='other_column', type='name', value=None) :param column: The column to reference. If ``value`` is None, this will render a name ref for this column. :type column: :class:`~bloop.conditions.ComparisonMixin` :param value: *(Optional)* If provided, this is likely a value ref. If ``value`` is also a column, this will render a name ref for that column (not the ``column`` parameter). :param bool dumped: *(Optional)* True if the value has already been dumped and should not be dumped through the column's typedef again. Commonly used with atomic conditions (which store the object's dumped representation). Default is False. :param bool inner: *(Optional)* True if this is a value ref and it should be dumped through a collection's inner type, and not the collection type itself. Default is False. :return: A name or value reference :rtype: :class:`bloop.conditions.Reference` """ # Can't use None since it's a legal value for comparisons (attribute_not_exists) if value is missing: # Simple path ref to the column. name = self._path_ref(column=column) ref_type = "name" value = None elif isinstance(value, ComparisonMixin): # value is also a column! Also a path ref. name = self._path_ref(column=value) ref_type = "name" value = None else: # Simple value ref. name, value = self._value_ref(column=column, value=value, dumped=dumped, inner=inner) ref_type = "value" return Reference(name=name, type=ref_type, value=value)
python
def any_ref(self, *, column, value=missing, dumped=False, inner=False): """Returns a NamedTuple of (name, type, value) for any type of reference. .. code-block:: python # Name ref >>> tracker.any_ref(column=User.email) Reference(name='email', type='name', value=None) # Value ref >>> tracker.any_ref(column=User.email, value='user@domain') Reference(name='email', type='value', value={'S': 'user@domain'}) # Passed as value ref, but value is another column >>> tracker.any_ref(column=User.email, value=User.other_column) Reference(name='other_column', type='name', value=None) :param column: The column to reference. If ``value`` is None, this will render a name ref for this column. :type column: :class:`~bloop.conditions.ComparisonMixin` :param value: *(Optional)* If provided, this is likely a value ref. If ``value`` is also a column, this will render a name ref for that column (not the ``column`` parameter). :param bool dumped: *(Optional)* True if the value has already been dumped and should not be dumped through the column's typedef again. Commonly used with atomic conditions (which store the object's dumped representation). Default is False. :param bool inner: *(Optional)* True if this is a value ref and it should be dumped through a collection's inner type, and not the collection type itself. Default is False. :return: A name or value reference :rtype: :class:`bloop.conditions.Reference` """ # Can't use None since it's a legal value for comparisons (attribute_not_exists) if value is missing: # Simple path ref to the column. name = self._path_ref(column=column) ref_type = "name" value = None elif isinstance(value, ComparisonMixin): # value is also a column! Also a path ref. name = self._path_ref(column=value) ref_type = "name" value = None else: # Simple value ref. name, value = self._value_ref(column=column, value=value, dumped=dumped, inner=inner) ref_type = "value" return Reference(name=name, type=ref_type, value=value)
[ "def", "any_ref", "(", "self", ",", "*", ",", "column", ",", "value", "=", "missing", ",", "dumped", "=", "False", ",", "inner", "=", "False", ")", ":", "# Can't use None since it's a legal value for comparisons (attribute_not_exists)", "if", "value", "is", "missing", ":", "# Simple path ref to the column.", "name", "=", "self", ".", "_path_ref", "(", "column", "=", "column", ")", "ref_type", "=", "\"name\"", "value", "=", "None", "elif", "isinstance", "(", "value", ",", "ComparisonMixin", ")", ":", "# value is also a column! Also a path ref.", "name", "=", "self", ".", "_path_ref", "(", "column", "=", "value", ")", "ref_type", "=", "\"name\"", "value", "=", "None", "else", ":", "# Simple value ref.", "name", ",", "value", "=", "self", ".", "_value_ref", "(", "column", "=", "column", ",", "value", "=", "value", ",", "dumped", "=", "dumped", ",", "inner", "=", "inner", ")", "ref_type", "=", "\"value\"", "return", "Reference", "(", "name", "=", "name", ",", "type", "=", "ref_type", ",", "value", "=", "value", ")" ]
Returns a NamedTuple of (name, type, value) for any type of reference. .. code-block:: python # Name ref >>> tracker.any_ref(column=User.email) Reference(name='email', type='name', value=None) # Value ref >>> tracker.any_ref(column=User.email, value='user@domain') Reference(name='email', type='value', value={'S': 'user@domain'}) # Passed as value ref, but value is another column >>> tracker.any_ref(column=User.email, value=User.other_column) Reference(name='other_column', type='name', value=None) :param column: The column to reference. If ``value`` is None, this will render a name ref for this column. :type column: :class:`~bloop.conditions.ComparisonMixin` :param value: *(Optional)* If provided, this is likely a value ref. If ``value`` is also a column, this will render a name ref for that column (not the ``column`` parameter). :param bool dumped: *(Optional)* True if the value has already been dumped and should not be dumped through the column's typedef again. Commonly used with atomic conditions (which store the object's dumped representation). Default is False. :param bool inner: *(Optional)* True if this is a value ref and it should be dumped through a collection's inner type, and not the collection type itself. Default is False. :return: A name or value reference :rtype: :class:`bloop.conditions.Reference`
[ "Returns", "a", "NamedTuple", "of", "(", "name", "type", "value", ")", "for", "any", "type", "of", "reference", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/conditions.py#L183-L227
numberoverzero/bloop
bloop/conditions.py
ReferenceTracker.pop_refs
def pop_refs(self, *refs): """Decrement the usage of each ref by 1. If this was the last use of a ref, remove it from attr_names or attr_values. """ for ref in refs: name = ref.name count = self.counts[name] # Not tracking this ref if count < 1: continue # Someone else is using this ref elif count > 1: self.counts[name] -= 1 # Last reference else: logger.debug("popping last usage of {}".format(ref)) self.counts[name] -= 1 if ref.type == "value": del self.attr_values[name] else: # Clean up both name indexes path_segment = self.attr_names[name] del self.attr_names[name] del self.name_attr_index[path_segment]
python
def pop_refs(self, *refs): """Decrement the usage of each ref by 1. If this was the last use of a ref, remove it from attr_names or attr_values. """ for ref in refs: name = ref.name count = self.counts[name] # Not tracking this ref if count < 1: continue # Someone else is using this ref elif count > 1: self.counts[name] -= 1 # Last reference else: logger.debug("popping last usage of {}".format(ref)) self.counts[name] -= 1 if ref.type == "value": del self.attr_values[name] else: # Clean up both name indexes path_segment = self.attr_names[name] del self.attr_names[name] del self.name_attr_index[path_segment]
[ "def", "pop_refs", "(", "self", ",", "*", "refs", ")", ":", "for", "ref", "in", "refs", ":", "name", "=", "ref", ".", "name", "count", "=", "self", ".", "counts", "[", "name", "]", "# Not tracking this ref", "if", "count", "<", "1", ":", "continue", "# Someone else is using this ref", "elif", "count", ">", "1", ":", "self", ".", "counts", "[", "name", "]", "-=", "1", "# Last reference", "else", ":", "logger", ".", "debug", "(", "\"popping last usage of {}\"", ".", "format", "(", "ref", ")", ")", "self", ".", "counts", "[", "name", "]", "-=", "1", "if", "ref", ".", "type", "==", "\"value\"", ":", "del", "self", ".", "attr_values", "[", "name", "]", "else", ":", "# Clean up both name indexes", "path_segment", "=", "self", ".", "attr_names", "[", "name", "]", "del", "self", ".", "attr_names", "[", "name", "]", "del", "self", ".", "name_attr_index", "[", "path_segment", "]" ]
Decrement the usage of each ref by 1. If this was the last use of a ref, remove it from attr_names or attr_values.
[ "Decrement", "the", "usage", "of", "each", "ref", "by", "1", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/conditions.py#L229-L253
numberoverzero/bloop
bloop/conditions.py
ConditionRenderer.render
def render(self, obj=None, condition=None, atomic=False, update=False, filter=None, projection=None, key=None): """Main entry point for rendering multiple expressions. All parameters are optional, except obj when atomic or update are True. :param obj: *(Optional)* An object to render an atomic condition or update expression for. Required if update or atomic are true. Default is False. :param condition: *(Optional)* Rendered as a "ConditionExpression" for a conditional operation. If atomic is True, the two are rendered in an AND condition. Default is None. :type condition: :class:`~bloop.conditions.BaseCondition` :param bool atomic: *(Optional)* True if an atomic condition should be created for ``obj`` and rendered as a "ConditionExpression". Default is False. :param bool update: *(Optional)* True if an "UpdateExpression" should be rendered for ``obj``. Default is False. :param filter: *(Optional)* A filter condition for a query or scan, rendered as a "FilterExpression". Default is None. :type filter: :class:`~bloop.conditions.BaseCondition` :param projection: *(Optional)* A set of Columns to include in a query or scan, redered as a "ProjectionExpression". Default is None. :type projection: set :class:`~bloop.models.Column` :param key: *(Optional)* A key condition for queries, rendered as a "KeyConditionExpression". Default is None. :type key: :class:`~bloop.conditions.BaseCondition` """ if (atomic or update) and not obj: raise InvalidCondition("An object is required to render atomic conditions or updates without an object.") if filter: self.render_filter_expression(filter) if projection: self.render_projection_expression(projection) if key: self.render_key_expression(key) # Condition requires a bit of work, because either one can be empty/false condition = (condition or Condition()) & (get_snapshot(obj) if atomic else Condition()) if condition: self.render_condition_expression(condition) if update: self.render_update_expression(obj)
python
def render(self, obj=None, condition=None, atomic=False, update=False, filter=None, projection=None, key=None): """Main entry point for rendering multiple expressions. All parameters are optional, except obj when atomic or update are True. :param obj: *(Optional)* An object to render an atomic condition or update expression for. Required if update or atomic are true. Default is False. :param condition: *(Optional)* Rendered as a "ConditionExpression" for a conditional operation. If atomic is True, the two are rendered in an AND condition. Default is None. :type condition: :class:`~bloop.conditions.BaseCondition` :param bool atomic: *(Optional)* True if an atomic condition should be created for ``obj`` and rendered as a "ConditionExpression". Default is False. :param bool update: *(Optional)* True if an "UpdateExpression" should be rendered for ``obj``. Default is False. :param filter: *(Optional)* A filter condition for a query or scan, rendered as a "FilterExpression". Default is None. :type filter: :class:`~bloop.conditions.BaseCondition` :param projection: *(Optional)* A set of Columns to include in a query or scan, redered as a "ProjectionExpression". Default is None. :type projection: set :class:`~bloop.models.Column` :param key: *(Optional)* A key condition for queries, rendered as a "KeyConditionExpression". Default is None. :type key: :class:`~bloop.conditions.BaseCondition` """ if (atomic or update) and not obj: raise InvalidCondition("An object is required to render atomic conditions or updates without an object.") if filter: self.render_filter_expression(filter) if projection: self.render_projection_expression(projection) if key: self.render_key_expression(key) # Condition requires a bit of work, because either one can be empty/false condition = (condition or Condition()) & (get_snapshot(obj) if atomic else Condition()) if condition: self.render_condition_expression(condition) if update: self.render_update_expression(obj)
[ "def", "render", "(", "self", ",", "obj", "=", "None", ",", "condition", "=", "None", ",", "atomic", "=", "False", ",", "update", "=", "False", ",", "filter", "=", "None", ",", "projection", "=", "None", ",", "key", "=", "None", ")", ":", "if", "(", "atomic", "or", "update", ")", "and", "not", "obj", ":", "raise", "InvalidCondition", "(", "\"An object is required to render atomic conditions or updates without an object.\"", ")", "if", "filter", ":", "self", ".", "render_filter_expression", "(", "filter", ")", "if", "projection", ":", "self", ".", "render_projection_expression", "(", "projection", ")", "if", "key", ":", "self", ".", "render_key_expression", "(", "key", ")", "# Condition requires a bit of work, because either one can be empty/false", "condition", "=", "(", "condition", "or", "Condition", "(", ")", ")", "&", "(", "get_snapshot", "(", "obj", ")", "if", "atomic", "else", "Condition", "(", ")", ")", "if", "condition", ":", "self", ".", "render_condition_expression", "(", "condition", ")", "if", "update", ":", "self", ".", "render_update_expression", "(", "obj", ")" ]
Main entry point for rendering multiple expressions. All parameters are optional, except obj when atomic or update are True. :param obj: *(Optional)* An object to render an atomic condition or update expression for. Required if update or atomic are true. Default is False. :param condition: *(Optional)* Rendered as a "ConditionExpression" for a conditional operation. If atomic is True, the two are rendered in an AND condition. Default is None. :type condition: :class:`~bloop.conditions.BaseCondition` :param bool atomic: *(Optional)* True if an atomic condition should be created for ``obj`` and rendered as a "ConditionExpression". Default is False. :param bool update: *(Optional)* True if an "UpdateExpression" should be rendered for ``obj``. Default is False. :param filter: *(Optional)* A filter condition for a query or scan, rendered as a "FilterExpression". Default is None. :type filter: :class:`~bloop.conditions.BaseCondition` :param projection: *(Optional)* A set of Columns to include in a query or scan, redered as a "ProjectionExpression". Default is None. :type projection: set :class:`~bloop.models.Column` :param key: *(Optional)* A key condition for queries, rendered as a "KeyConditionExpression". Default is None. :type key: :class:`~bloop.conditions.BaseCondition`
[ "Main", "entry", "point", "for", "rendering", "multiple", "expressions", ".", "All", "parameters", "are", "optional", "except", "obj", "when", "atomic", "or", "update", "are", "True", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/conditions.py#L299-L339
numberoverzero/bloop
bloop/conditions.py
ConditionRenderer.rendered
def rendered(self): """The rendered wire format for all conditions that have been rendered. Rendered conditions are never cleared. A new :class:`~bloop.conditions.ConditionRenderer` should be used for each operation.""" expressions = {k: v for (k, v) in self.expressions.items() if v is not None} if self.refs.attr_names: expressions["ExpressionAttributeNames"] = self.refs.attr_names if self.refs.attr_values: expressions["ExpressionAttributeValues"] = self.refs.attr_values return expressions
python
def rendered(self): """The rendered wire format for all conditions that have been rendered. Rendered conditions are never cleared. A new :class:`~bloop.conditions.ConditionRenderer` should be used for each operation.""" expressions = {k: v for (k, v) in self.expressions.items() if v is not None} if self.refs.attr_names: expressions["ExpressionAttributeNames"] = self.refs.attr_names if self.refs.attr_values: expressions["ExpressionAttributeValues"] = self.refs.attr_values return expressions
[ "def", "rendered", "(", "self", ")", ":", "expressions", "=", "{", "k", ":", "v", "for", "(", "k", ",", "v", ")", "in", "self", ".", "expressions", ".", "items", "(", ")", "if", "v", "is", "not", "None", "}", "if", "self", ".", "refs", ".", "attr_names", ":", "expressions", "[", "\"ExpressionAttributeNames\"", "]", "=", "self", ".", "refs", ".", "attr_names", "if", "self", ".", "refs", ".", "attr_values", ":", "expressions", "[", "\"ExpressionAttributeValues\"", "]", "=", "self", ".", "refs", ".", "attr_values", "return", "expressions" ]
The rendered wire format for all conditions that have been rendered. Rendered conditions are never cleared. A new :class:`~bloop.conditions.ConditionRenderer` should be used for each operation.
[ "The", "rendered", "wire", "format", "for", "all", "conditions", "that", "have", "been", "rendered", ".", "Rendered", "conditions", "are", "never", "cleared", ".", "A", "new", ":", "class", ":", "~bloop", ".", "conditions", ".", "ConditionRenderer", "should", "be", "used", "for", "each", "operation", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/conditions.py#L388-L396
numberoverzero/bloop
bloop/stream/stream.py
Stream._unpack
def _unpack(self, record, key, expected): """Replaces the attr dict at the given key with an instance of a Model""" attrs = record.get(key) if attrs is None: return obj = unpack_from_dynamodb( attrs=attrs, expected=expected, model=self.model, engine=self.engine ) object_loaded.send(self.engine, engine=self.engine, obj=obj) record[key] = obj
python
def _unpack(self, record, key, expected): """Replaces the attr dict at the given key with an instance of a Model""" attrs = record.get(key) if attrs is None: return obj = unpack_from_dynamodb( attrs=attrs, expected=expected, model=self.model, engine=self.engine ) object_loaded.send(self.engine, engine=self.engine, obj=obj) record[key] = obj
[ "def", "_unpack", "(", "self", ",", "record", ",", "key", ",", "expected", ")", ":", "attrs", "=", "record", ".", "get", "(", "key", ")", "if", "attrs", "is", "None", ":", "return", "obj", "=", "unpack_from_dynamodb", "(", "attrs", "=", "attrs", ",", "expected", "=", "expected", ",", "model", "=", "self", ".", "model", ",", "engine", "=", "self", ".", "engine", ")", "object_loaded", ".", "send", "(", "self", ".", "engine", ",", "engine", "=", "self", ".", "engine", ",", "obj", "=", "obj", ")", "record", "[", "key", "]", "=", "obj" ]
Replaces the attr dict at the given key with an instance of a Model
[ "Replaces", "the", "attr", "dict", "at", "the", "given", "key", "with", "an", "instance", "of", "a", "Model" ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/stream/stream.py#L79-L91
numberoverzero/bloop
bloop/stream/shard.py
reformat_record
def reformat_record(record): """Repack a record into a cleaner structure for consumption.""" return { "key": record["dynamodb"].get("Keys", None), "new": record["dynamodb"].get("NewImage", None), "old": record["dynamodb"].get("OldImage", None), "meta": { "created_at": record["dynamodb"]["ApproximateCreationDateTime"], "event": { "id": record["eventID"], "type": record["eventName"].lower(), "version": record["eventVersion"] }, "sequence_number": record["dynamodb"]["SequenceNumber"], } }
python
def reformat_record(record): """Repack a record into a cleaner structure for consumption.""" return { "key": record["dynamodb"].get("Keys", None), "new": record["dynamodb"].get("NewImage", None), "old": record["dynamodb"].get("OldImage", None), "meta": { "created_at": record["dynamodb"]["ApproximateCreationDateTime"], "event": { "id": record["eventID"], "type": record["eventName"].lower(), "version": record["eventVersion"] }, "sequence_number": record["dynamodb"]["SequenceNumber"], } }
[ "def", "reformat_record", "(", "record", ")", ":", "return", "{", "\"key\"", ":", "record", "[", "\"dynamodb\"", "]", ".", "get", "(", "\"Keys\"", ",", "None", ")", ",", "\"new\"", ":", "record", "[", "\"dynamodb\"", "]", ".", "get", "(", "\"NewImage\"", ",", "None", ")", ",", "\"old\"", ":", "record", "[", "\"dynamodb\"", "]", ".", "get", "(", "\"OldImage\"", ",", "None", ")", ",", "\"meta\"", ":", "{", "\"created_at\"", ":", "record", "[", "\"dynamodb\"", "]", "[", "\"ApproximateCreationDateTime\"", "]", ",", "\"event\"", ":", "{", "\"id\"", ":", "record", "[", "\"eventID\"", "]", ",", "\"type\"", ":", "record", "[", "\"eventName\"", "]", ".", "lower", "(", ")", ",", "\"version\"", ":", "record", "[", "\"eventVersion\"", "]", "}", ",", "\"sequence_number\"", ":", "record", "[", "\"dynamodb\"", "]", "[", "\"SequenceNumber\"", "]", ",", "}", "}" ]
Repack a record into a cleaner structure for consumption.
[ "Repack", "a", "record", "into", "a", "cleaner", "structure", "for", "consumption", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/stream/shard.py#L287-L303
numberoverzero/bloop
bloop/stream/shard.py
unpack_shards
def unpack_shards(shards, stream_arn, session): """List[Dict] -> Dict[shard_id, Shard]. Each Shards' parent/children are hooked up with the other Shards in the list. """ if not shards: return {} # When unpacking tokens, shard id key is "shard_id" # When unpacking DescribeStream responses, shard id key is "ShardId" if "ShardId" in shards[0]: shards = _translate_shards(shards) by_id = {shard_token["shard_id"]: Shard(stream_arn=stream_arn, shard_id=shard_token["shard_id"], iterator_type=shard_token.get("iterator_type"), sequence_number=shard_token.get("sequence_number"), parent=shard_token.get("parent"), session=session) for shard_token in shards} for shard in by_id.values(): if shard.parent: shard.parent = by_id[shard.parent] shard.parent.children.append(shard) return by_id
python
def unpack_shards(shards, stream_arn, session): """List[Dict] -> Dict[shard_id, Shard]. Each Shards' parent/children are hooked up with the other Shards in the list. """ if not shards: return {} # When unpacking tokens, shard id key is "shard_id" # When unpacking DescribeStream responses, shard id key is "ShardId" if "ShardId" in shards[0]: shards = _translate_shards(shards) by_id = {shard_token["shard_id"]: Shard(stream_arn=stream_arn, shard_id=shard_token["shard_id"], iterator_type=shard_token.get("iterator_type"), sequence_number=shard_token.get("sequence_number"), parent=shard_token.get("parent"), session=session) for shard_token in shards} for shard in by_id.values(): if shard.parent: shard.parent = by_id[shard.parent] shard.parent.children.append(shard) return by_id
[ "def", "unpack_shards", "(", "shards", ",", "stream_arn", ",", "session", ")", ":", "if", "not", "shards", ":", "return", "{", "}", "# When unpacking tokens, shard id key is \"shard_id\"", "# When unpacking DescribeStream responses, shard id key is \"ShardId\"", "if", "\"ShardId\"", "in", "shards", "[", "0", "]", ":", "shards", "=", "_translate_shards", "(", "shards", ")", "by_id", "=", "{", "shard_token", "[", "\"shard_id\"", "]", ":", "Shard", "(", "stream_arn", "=", "stream_arn", ",", "shard_id", "=", "shard_token", "[", "\"shard_id\"", "]", ",", "iterator_type", "=", "shard_token", ".", "get", "(", "\"iterator_type\"", ")", ",", "sequence_number", "=", "shard_token", ".", "get", "(", "\"sequence_number\"", ")", ",", "parent", "=", "shard_token", ".", "get", "(", "\"parent\"", ")", ",", "session", "=", "session", ")", "for", "shard_token", "in", "shards", "}", "for", "shard", "in", "by_id", ".", "values", "(", ")", ":", "if", "shard", ".", "parent", ":", "shard", ".", "parent", "=", "by_id", "[", "shard", ".", "parent", "]", "shard", ".", "parent", ".", "children", ".", "append", "(", "shard", ")", "return", "by_id" ]
List[Dict] -> Dict[shard_id, Shard]. Each Shards' parent/children are hooked up with the other Shards in the list.
[ "List", "[", "Dict", "]", "-", ">", "Dict", "[", "shard_id", "Shard", "]", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/stream/shard.py#L306-L329
numberoverzero/bloop
bloop/stream/shard.py
Shard.token
def token(self): """JSON-serializable representation of the current Shard state. The token is enough to rebuild the Shard as part of rebuilding a Stream. :returns: Shard state as a json-friendly dict :rtype: dict """ if self.iterator_type in RELATIVE_ITERATORS: logger.warning("creating shard token at non-exact location \"{}\"".format(self.iterator_type)) token = { "stream_arn": self.stream_arn, "shard_id": self.shard_id, "iterator_type": self.iterator_type, "sequence_number": self.sequence_number, } if self.parent: token["parent"] = self.parent.shard_id if not self.iterator_type: del token["iterator_type"] if not self.sequence_number: del token["sequence_number"] return token
python
def token(self): """JSON-serializable representation of the current Shard state. The token is enough to rebuild the Shard as part of rebuilding a Stream. :returns: Shard state as a json-friendly dict :rtype: dict """ if self.iterator_type in RELATIVE_ITERATORS: logger.warning("creating shard token at non-exact location \"{}\"".format(self.iterator_type)) token = { "stream_arn": self.stream_arn, "shard_id": self.shard_id, "iterator_type": self.iterator_type, "sequence_number": self.sequence_number, } if self.parent: token["parent"] = self.parent.shard_id if not self.iterator_type: del token["iterator_type"] if not self.sequence_number: del token["sequence_number"] return token
[ "def", "token", "(", "self", ")", ":", "if", "self", ".", "iterator_type", "in", "RELATIVE_ITERATORS", ":", "logger", ".", "warning", "(", "\"creating shard token at non-exact location \\\"{}\\\"\"", ".", "format", "(", "self", ".", "iterator_type", ")", ")", "token", "=", "{", "\"stream_arn\"", ":", "self", ".", "stream_arn", ",", "\"shard_id\"", ":", "self", ".", "shard_id", ",", "\"iterator_type\"", ":", "self", ".", "iterator_type", ",", "\"sequence_number\"", ":", "self", ".", "sequence_number", ",", "}", "if", "self", ".", "parent", ":", "token", "[", "\"parent\"", "]", "=", "self", ".", "parent", ".", "shard_id", "if", "not", "self", ".", "iterator_type", ":", "del", "token", "[", "\"iterator_type\"", "]", "if", "not", "self", ".", "sequence_number", ":", "del", "token", "[", "\"sequence_number\"", "]", "return", "token" ]
JSON-serializable representation of the current Shard state. The token is enough to rebuild the Shard as part of rebuilding a Stream. :returns: Shard state as a json-friendly dict :rtype: dict
[ "JSON", "-", "serializable", "representation", "of", "the", "current", "Shard", "state", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/stream/shard.py#L127-L149
numberoverzero/bloop
bloop/stream/shard.py
Shard.walk_tree
def walk_tree(self): """Generator that yields each :class:`~bloop.stream.shard.Shard` by walking the shard's children in order.""" shards = collections.deque([self]) while shards: shard = shards.popleft() yield shard shards.extend(shard.children)
python
def walk_tree(self): """Generator that yields each :class:`~bloop.stream.shard.Shard` by walking the shard's children in order.""" shards = collections.deque([self]) while shards: shard = shards.popleft() yield shard shards.extend(shard.children)
[ "def", "walk_tree", "(", "self", ")", ":", "shards", "=", "collections", ".", "deque", "(", "[", "self", "]", ")", "while", "shards", ":", "shard", "=", "shards", ".", "popleft", "(", ")", "yield", "shard", "shards", ".", "extend", "(", "shard", ".", "children", ")" ]
Generator that yields each :class:`~bloop.stream.shard.Shard` by walking the shard's children in order.
[ "Generator", "that", "yields", "each", ":", "class", ":", "~bloop", ".", "stream", ".", "shard", ".", "Shard", "by", "walking", "the", "shard", "s", "children", "in", "order", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/stream/shard.py#L151-L157
numberoverzero/bloop
bloop/stream/shard.py
Shard.jump_to
def jump_to(self, *, iterator_type, sequence_number=None): """Move to a new position in the shard using the standard parameters to GetShardIterator. :param str iterator_type: "trim_horizon", "at_sequence", "after_sequence", "latest" :param str sequence_number: *(Optional)* Sequence number to use with at/after sequence. Default is None. """ # Just a simple wrapper; let the caller handle RecordsExpired self.iterator_id = self.session.get_shard_iterator( stream_arn=self.stream_arn, shard_id=self.shard_id, iterator_type=iterator_type, sequence_number=sequence_number) self.iterator_type = iterator_type self.sequence_number = sequence_number self.empty_responses = 0
python
def jump_to(self, *, iterator_type, sequence_number=None): """Move to a new position in the shard using the standard parameters to GetShardIterator. :param str iterator_type: "trim_horizon", "at_sequence", "after_sequence", "latest" :param str sequence_number: *(Optional)* Sequence number to use with at/after sequence. Default is None. """ # Just a simple wrapper; let the caller handle RecordsExpired self.iterator_id = self.session.get_shard_iterator( stream_arn=self.stream_arn, shard_id=self.shard_id, iterator_type=iterator_type, sequence_number=sequence_number) self.iterator_type = iterator_type self.sequence_number = sequence_number self.empty_responses = 0
[ "def", "jump_to", "(", "self", ",", "*", ",", "iterator_type", ",", "sequence_number", "=", "None", ")", ":", "# Just a simple wrapper; let the caller handle RecordsExpired", "self", ".", "iterator_id", "=", "self", ".", "session", ".", "get_shard_iterator", "(", "stream_arn", "=", "self", ".", "stream_arn", ",", "shard_id", "=", "self", ".", "shard_id", ",", "iterator_type", "=", "iterator_type", ",", "sequence_number", "=", "sequence_number", ")", "self", ".", "iterator_type", "=", "iterator_type", "self", ".", "sequence_number", "=", "sequence_number", "self", ".", "empty_responses", "=", "0" ]
Move to a new position in the shard using the standard parameters to GetShardIterator. :param str iterator_type: "trim_horizon", "at_sequence", "after_sequence", "latest" :param str sequence_number: *(Optional)* Sequence number to use with at/after sequence. Default is None.
[ "Move", "to", "a", "new", "position", "in", "the", "shard", "using", "the", "standard", "parameters", "to", "GetShardIterator", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/stream/shard.py#L159-L173
numberoverzero/bloop
bloop/stream/shard.py
Shard.seek_to
def seek_to(self, position): """Move the Shard's iterator to the earliest record after the :class:`~datetime.datetime` time. Returns the first records at or past ``position``. If the list is empty, the seek failed to find records, either because the Shard is exhausted or it reached the HEAD of an open Shard. :param position: The position in time to move to. :type position: :class:`~datetime.datetime` :returns: A list of the first records found after ``position``. May be empty. """ # 0) We have no way to associate the date with a position, # so we have to scan the shard from the beginning. self.jump_to(iterator_type="trim_horizon") position = int(position.timestamp()) while (not self.exhausted) and (self.empty_responses < CALLS_TO_REACH_HEAD): records = self.get_records() # We can skip the whole record set if the newest (last) record isn't new enough. if records and records[-1]["meta"]["created_at"].timestamp() >= position: # Looking for the first number *below* the position. for offset, record in enumerate(reversed(records)): if record["meta"]["created_at"].timestamp() < position: index = len(records) - offset return records[index:] return records # Either exhausted the Shard or caught up to HEAD. return []
python
def seek_to(self, position): """Move the Shard's iterator to the earliest record after the :class:`~datetime.datetime` time. Returns the first records at or past ``position``. If the list is empty, the seek failed to find records, either because the Shard is exhausted or it reached the HEAD of an open Shard. :param position: The position in time to move to. :type position: :class:`~datetime.datetime` :returns: A list of the first records found after ``position``. May be empty. """ # 0) We have no way to associate the date with a position, # so we have to scan the shard from the beginning. self.jump_to(iterator_type="trim_horizon") position = int(position.timestamp()) while (not self.exhausted) and (self.empty_responses < CALLS_TO_REACH_HEAD): records = self.get_records() # We can skip the whole record set if the newest (last) record isn't new enough. if records and records[-1]["meta"]["created_at"].timestamp() >= position: # Looking for the first number *below* the position. for offset, record in enumerate(reversed(records)): if record["meta"]["created_at"].timestamp() < position: index = len(records) - offset return records[index:] return records # Either exhausted the Shard or caught up to HEAD. return []
[ "def", "seek_to", "(", "self", ",", "position", ")", ":", "# 0) We have no way to associate the date with a position,", "# so we have to scan the shard from the beginning.", "self", ".", "jump_to", "(", "iterator_type", "=", "\"trim_horizon\"", ")", "position", "=", "int", "(", "position", ".", "timestamp", "(", ")", ")", "while", "(", "not", "self", ".", "exhausted", ")", "and", "(", "self", ".", "empty_responses", "<", "CALLS_TO_REACH_HEAD", ")", ":", "records", "=", "self", ".", "get_records", "(", ")", "# We can skip the whole record set if the newest (last) record isn't new enough.", "if", "records", "and", "records", "[", "-", "1", "]", "[", "\"meta\"", "]", "[", "\"created_at\"", "]", ".", "timestamp", "(", ")", ">=", "position", ":", "# Looking for the first number *below* the position.", "for", "offset", ",", "record", "in", "enumerate", "(", "reversed", "(", "records", ")", ")", ":", "if", "record", "[", "\"meta\"", "]", "[", "\"created_at\"", "]", ".", "timestamp", "(", ")", "<", "position", ":", "index", "=", "len", "(", "records", ")", "-", "offset", "return", "records", "[", "index", ":", "]", "return", "records", "# Either exhausted the Shard or caught up to HEAD.", "return", "[", "]" ]
Move the Shard's iterator to the earliest record after the :class:`~datetime.datetime` time. Returns the first records at or past ``position``. If the list is empty, the seek failed to find records, either because the Shard is exhausted or it reached the HEAD of an open Shard. :param position: The position in time to move to. :type position: :class:`~datetime.datetime` :returns: A list of the first records found after ``position``. May be empty.
[ "Move", "the", "Shard", "s", "iterator", "to", "the", "earliest", "record", "after", "the", ":", "class", ":", "~datetime", ".", "datetime", "time", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/stream/shard.py#L175-L204
numberoverzero/bloop
bloop/stream/shard.py
Shard.load_children
def load_children(self): """If the Shard doesn't have any children, tries to find some from DescribeStream. If the Shard is open this won't find any children, so an empty response doesn't mean the Shard will **never** have children. """ # Child count is fixed the first time any of the following happen: # 0 :: stream closed or throughput decreased # 1 :: shard was open for ~4 hours # 2 :: throughput increased if self.children: return self.children # ParentShardId -> [Shard, ...] by_parent = collections.defaultdict(list) # ShardId -> Shard by_id = {} for shard in self.session.describe_stream( stream_arn=self.stream_arn, first_shard=self.shard_id)["Shards"]: parent_list = by_parent[shard.get("ParentShardId")] shard = Shard( stream_arn=self.stream_arn, shard_id=shard["ShardId"], parent=shard.get("ParentShardId"), session=self.session) parent_list.append(shard) by_id[shard.shard_id] = shard # Find this shard when looking up shards by ParentShardId by_id[self.shard_id] = self # Insert this shard's children, then handle its child's descendants etc. to_insert = collections.deque(by_parent[self.shard_id]) while to_insert: shard = to_insert.popleft() # ParentShardId -> Shard shard.parent = by_id[shard.parent] shard.parent.children.append(shard) # Continue for any shards that have this shard as their parent to_insert.extend(by_parent[shard.shard_id]) return self.children
python
def load_children(self): """If the Shard doesn't have any children, tries to find some from DescribeStream. If the Shard is open this won't find any children, so an empty response doesn't mean the Shard will **never** have children. """ # Child count is fixed the first time any of the following happen: # 0 :: stream closed or throughput decreased # 1 :: shard was open for ~4 hours # 2 :: throughput increased if self.children: return self.children # ParentShardId -> [Shard, ...] by_parent = collections.defaultdict(list) # ShardId -> Shard by_id = {} for shard in self.session.describe_stream( stream_arn=self.stream_arn, first_shard=self.shard_id)["Shards"]: parent_list = by_parent[shard.get("ParentShardId")] shard = Shard( stream_arn=self.stream_arn, shard_id=shard["ShardId"], parent=shard.get("ParentShardId"), session=self.session) parent_list.append(shard) by_id[shard.shard_id] = shard # Find this shard when looking up shards by ParentShardId by_id[self.shard_id] = self # Insert this shard's children, then handle its child's descendants etc. to_insert = collections.deque(by_parent[self.shard_id]) while to_insert: shard = to_insert.popleft() # ParentShardId -> Shard shard.parent = by_id[shard.parent] shard.parent.children.append(shard) # Continue for any shards that have this shard as their parent to_insert.extend(by_parent[shard.shard_id]) return self.children
[ "def", "load_children", "(", "self", ")", ":", "# Child count is fixed the first time any of the following happen:", "# 0 :: stream closed or throughput decreased", "# 1 :: shard was open for ~4 hours", "# 2 :: throughput increased", "if", "self", ".", "children", ":", "return", "self", ".", "children", "# ParentShardId -> [Shard, ...]", "by_parent", "=", "collections", ".", "defaultdict", "(", "list", ")", "# ShardId -> Shard", "by_id", "=", "{", "}", "for", "shard", "in", "self", ".", "session", ".", "describe_stream", "(", "stream_arn", "=", "self", ".", "stream_arn", ",", "first_shard", "=", "self", ".", "shard_id", ")", "[", "\"Shards\"", "]", ":", "parent_list", "=", "by_parent", "[", "shard", ".", "get", "(", "\"ParentShardId\"", ")", "]", "shard", "=", "Shard", "(", "stream_arn", "=", "self", ".", "stream_arn", ",", "shard_id", "=", "shard", "[", "\"ShardId\"", "]", ",", "parent", "=", "shard", ".", "get", "(", "\"ParentShardId\"", ")", ",", "session", "=", "self", ".", "session", ")", "parent_list", ".", "append", "(", "shard", ")", "by_id", "[", "shard", ".", "shard_id", "]", "=", "shard", "# Find this shard when looking up shards by ParentShardId", "by_id", "[", "self", ".", "shard_id", "]", "=", "self", "# Insert this shard's children, then handle its child's descendants etc.", "to_insert", "=", "collections", ".", "deque", "(", "by_parent", "[", "self", ".", "shard_id", "]", ")", "while", "to_insert", ":", "shard", "=", "to_insert", ".", "popleft", "(", ")", "# ParentShardId -> Shard", "shard", ".", "parent", "=", "by_id", "[", "shard", ".", "parent", "]", "shard", ".", "parent", ".", "children", ".", "append", "(", "shard", ")", "# Continue for any shards that have this shard as their parent", "to_insert", ".", "extend", "(", "by_parent", "[", "shard", ".", "shard_id", "]", ")", "return", "self", ".", "children" ]
If the Shard doesn't have any children, tries to find some from DescribeStream. If the Shard is open this won't find any children, so an empty response doesn't mean the Shard will **never** have children.
[ "If", "the", "Shard", "doesn", "t", "have", "any", "children", "tries", "to", "find", "some", "from", "DescribeStream", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/stream/shard.py#L206-L250
numberoverzero/bloop
bloop/stream/shard.py
Shard.get_records
def get_records(self): """Get the next set of records in this shard. An empty list doesn't guarantee the shard is exhausted. :returns: A list of reformatted records. May be empty. """ # Won't be able to find new records. if self.exhausted: return [] # Already caught up, just the one call please. if self.empty_responses >= CALLS_TO_REACH_HEAD: return self._apply_get_records_response(self.session.get_stream_records(self.iterator_id)) # Up to 5 calls to try and find a result while self.empty_responses < CALLS_TO_REACH_HEAD and not self.exhausted: records = self._apply_get_records_response(self.session.get_stream_records(self.iterator_id)) if records: return records return []
python
def get_records(self): """Get the next set of records in this shard. An empty list doesn't guarantee the shard is exhausted. :returns: A list of reformatted records. May be empty. """ # Won't be able to find new records. if self.exhausted: return [] # Already caught up, just the one call please. if self.empty_responses >= CALLS_TO_REACH_HEAD: return self._apply_get_records_response(self.session.get_stream_records(self.iterator_id)) # Up to 5 calls to try and find a result while self.empty_responses < CALLS_TO_REACH_HEAD and not self.exhausted: records = self._apply_get_records_response(self.session.get_stream_records(self.iterator_id)) if records: return records return []
[ "def", "get_records", "(", "self", ")", ":", "# Won't be able to find new records.", "if", "self", ".", "exhausted", ":", "return", "[", "]", "# Already caught up, just the one call please.", "if", "self", ".", "empty_responses", ">=", "CALLS_TO_REACH_HEAD", ":", "return", "self", ".", "_apply_get_records_response", "(", "self", ".", "session", ".", "get_stream_records", "(", "self", ".", "iterator_id", ")", ")", "# Up to 5 calls to try and find a result", "while", "self", ".", "empty_responses", "<", "CALLS_TO_REACH_HEAD", "and", "not", "self", ".", "exhausted", ":", "records", "=", "self", ".", "_apply_get_records_response", "(", "self", ".", "session", ".", "get_stream_records", "(", "self", ".", "iterator_id", ")", ")", "if", "records", ":", "return", "records", "return", "[", "]" ]
Get the next set of records in this shard. An empty list doesn't guarantee the shard is exhausted. :returns: A list of reformatted records. May be empty.
[ "Get", "the", "next", "set", "of", "records", "in", "this", "shard", ".", "An", "empty", "list", "doesn", "t", "guarantee", "the", "shard", "is", "exhausted", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/stream/shard.py#L252-L271
numberoverzero/bloop
bloop/engine.py
Engine.bind
def bind(self, model, *, skip_table_setup=False): """Create backing tables for a model and its non-abstract subclasses. :param model: Base model to bind. Can be abstract. :param skip_table_setup: Don't create or verify the table in DynamoDB. Default is False. :raises bloop.exceptions.InvalidModel: if ``model`` is not a subclass of :class:`~bloop.models.BaseModel`. """ # Make sure we're looking at models validate_is_model(model) concrete = set(filter(lambda m: not m.Meta.abstract, walk_subclasses(model))) if not model.Meta.abstract: concrete.add(model) logger.debug("binding non-abstract models {}".format( sorted(c.__name__ for c in concrete) )) # create_table doesn't block until ACTIVE or validate. # It also doesn't throw when the table already exists, making it safe # to call multiple times for the same unbound model. if skip_table_setup: logger.info("skip_table_setup is True; not trying to create tables or validate models during bind") else: self.session.clear_cache() is_creating = {} for model in concrete: table_name = self._compute_table_name(model) before_create_table.send(self, engine=self, model=model) if not skip_table_setup: if table_name in is_creating: continue creating = self.session.create_table(table_name, model) is_creating[table_name] = creating for model in concrete: if not skip_table_setup: table_name = self._compute_table_name(model) if is_creating[table_name]: # polls until table is active self.session.describe_table(table_name) if model.Meta.ttl: self.session.enable_ttl(table_name, model) if model.Meta.backups and model.Meta.backups["enabled"]: self.session.enable_backups(table_name, model) self.session.validate_table(table_name, model) model_validated.send(self, engine=self, model=model) model_bound.send(self, engine=self, model=model) logger.info("successfully bound {} models to the engine".format(len(concrete)))
python
def bind(self, model, *, skip_table_setup=False): """Create backing tables for a model and its non-abstract subclasses. :param model: Base model to bind. Can be abstract. :param skip_table_setup: Don't create or verify the table in DynamoDB. Default is False. :raises bloop.exceptions.InvalidModel: if ``model`` is not a subclass of :class:`~bloop.models.BaseModel`. """ # Make sure we're looking at models validate_is_model(model) concrete = set(filter(lambda m: not m.Meta.abstract, walk_subclasses(model))) if not model.Meta.abstract: concrete.add(model) logger.debug("binding non-abstract models {}".format( sorted(c.__name__ for c in concrete) )) # create_table doesn't block until ACTIVE or validate. # It also doesn't throw when the table already exists, making it safe # to call multiple times for the same unbound model. if skip_table_setup: logger.info("skip_table_setup is True; not trying to create tables or validate models during bind") else: self.session.clear_cache() is_creating = {} for model in concrete: table_name = self._compute_table_name(model) before_create_table.send(self, engine=self, model=model) if not skip_table_setup: if table_name in is_creating: continue creating = self.session.create_table(table_name, model) is_creating[table_name] = creating for model in concrete: if not skip_table_setup: table_name = self._compute_table_name(model) if is_creating[table_name]: # polls until table is active self.session.describe_table(table_name) if model.Meta.ttl: self.session.enable_ttl(table_name, model) if model.Meta.backups and model.Meta.backups["enabled"]: self.session.enable_backups(table_name, model) self.session.validate_table(table_name, model) model_validated.send(self, engine=self, model=model) model_bound.send(self, engine=self, model=model) logger.info("successfully bound {} models to the engine".format(len(concrete)))
[ "def", "bind", "(", "self", ",", "model", ",", "*", ",", "skip_table_setup", "=", "False", ")", ":", "# Make sure we're looking at models", "validate_is_model", "(", "model", ")", "concrete", "=", "set", "(", "filter", "(", "lambda", "m", ":", "not", "m", ".", "Meta", ".", "abstract", ",", "walk_subclasses", "(", "model", ")", ")", ")", "if", "not", "model", ".", "Meta", ".", "abstract", ":", "concrete", ".", "add", "(", "model", ")", "logger", ".", "debug", "(", "\"binding non-abstract models {}\"", ".", "format", "(", "sorted", "(", "c", ".", "__name__", "for", "c", "in", "concrete", ")", ")", ")", "# create_table doesn't block until ACTIVE or validate.", "# It also doesn't throw when the table already exists, making it safe", "# to call multiple times for the same unbound model.", "if", "skip_table_setup", ":", "logger", ".", "info", "(", "\"skip_table_setup is True; not trying to create tables or validate models during bind\"", ")", "else", ":", "self", ".", "session", ".", "clear_cache", "(", ")", "is_creating", "=", "{", "}", "for", "model", "in", "concrete", ":", "table_name", "=", "self", ".", "_compute_table_name", "(", "model", ")", "before_create_table", ".", "send", "(", "self", ",", "engine", "=", "self", ",", "model", "=", "model", ")", "if", "not", "skip_table_setup", ":", "if", "table_name", "in", "is_creating", ":", "continue", "creating", "=", "self", ".", "session", ".", "create_table", "(", "table_name", ",", "model", ")", "is_creating", "[", "table_name", "]", "=", "creating", "for", "model", "in", "concrete", ":", "if", "not", "skip_table_setup", ":", "table_name", "=", "self", ".", "_compute_table_name", "(", "model", ")", "if", "is_creating", "[", "table_name", "]", ":", "# polls until table is active", "self", ".", "session", ".", "describe_table", "(", "table_name", ")", "if", "model", ".", "Meta", ".", "ttl", ":", "self", ".", "session", ".", "enable_ttl", "(", "table_name", ",", "model", ")", "if", "model", ".", "Meta", ".", "backups", "and", "model", ".", "Meta", ".", "backups", "[", "\"enabled\"", "]", ":", "self", ".", "session", ".", "enable_backups", "(", "table_name", ",", "model", ")", "self", ".", "session", ".", "validate_table", "(", "table_name", ",", "model", ")", "model_validated", ".", "send", "(", "self", ",", "engine", "=", "self", ",", "model", "=", "model", ")", "model_bound", ".", "send", "(", "self", ",", "engine", "=", "self", ",", "model", "=", "model", ")", "logger", ".", "info", "(", "\"successfully bound {} models to the engine\"", ".", "format", "(", "len", "(", "concrete", ")", ")", ")" ]
Create backing tables for a model and its non-abstract subclasses. :param model: Base model to bind. Can be abstract. :param skip_table_setup: Don't create or verify the table in DynamoDB. Default is False. :raises bloop.exceptions.InvalidModel: if ``model`` is not a subclass of :class:`~bloop.models.BaseModel`.
[ "Create", "backing", "tables", "for", "a", "model", "and", "its", "non", "-", "abstract", "subclasses", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/engine.py#L115-L165
numberoverzero/bloop
bloop/engine.py
Engine.delete
def delete(self, *objs, condition=None, atomic=False): """Delete one or more objects. :param objs: objects to delete. :param condition: only perform each delete if this condition holds. :param bool atomic: only perform each delete if the local and DynamoDB versions of the object match. :raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met. """ objs = set(objs) validate_not_abstract(*objs) for obj in objs: self.session.delete_item({ "TableName": self._compute_table_name(obj.__class__), "Key": dump_key(self, obj), **render(self, obj=obj, atomic=atomic, condition=condition) }) object_deleted.send(self, engine=self, obj=obj) logger.info("successfully deleted {} objects".format(len(objs)))
python
def delete(self, *objs, condition=None, atomic=False): """Delete one or more objects. :param objs: objects to delete. :param condition: only perform each delete if this condition holds. :param bool atomic: only perform each delete if the local and DynamoDB versions of the object match. :raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met. """ objs = set(objs) validate_not_abstract(*objs) for obj in objs: self.session.delete_item({ "TableName": self._compute_table_name(obj.__class__), "Key": dump_key(self, obj), **render(self, obj=obj, atomic=atomic, condition=condition) }) object_deleted.send(self, engine=self, obj=obj) logger.info("successfully deleted {} objects".format(len(objs)))
[ "def", "delete", "(", "self", ",", "*", "objs", ",", "condition", "=", "None", ",", "atomic", "=", "False", ")", ":", "objs", "=", "set", "(", "objs", ")", "validate_not_abstract", "(", "*", "objs", ")", "for", "obj", "in", "objs", ":", "self", ".", "session", ".", "delete_item", "(", "{", "\"TableName\"", ":", "self", ".", "_compute_table_name", "(", "obj", ".", "__class__", ")", ",", "\"Key\"", ":", "dump_key", "(", "self", ",", "obj", ")", ",", "*", "*", "render", "(", "self", ",", "obj", "=", "obj", ",", "atomic", "=", "atomic", ",", "condition", "=", "condition", ")", "}", ")", "object_deleted", ".", "send", "(", "self", ",", "engine", "=", "self", ",", "obj", "=", "obj", ")", "logger", ".", "info", "(", "\"successfully deleted {} objects\"", ".", "format", "(", "len", "(", "objs", ")", ")", ")" ]
Delete one or more objects. :param objs: objects to delete. :param condition: only perform each delete if this condition holds. :param bool atomic: only perform each delete if the local and DynamoDB versions of the object match. :raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met.
[ "Delete", "one", "or", "more", "objects", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/engine.py#L167-L184
numberoverzero/bloop
bloop/engine.py
Engine.load
def load(self, *objs, consistent=False): """Populate objects from DynamoDB. :param objs: objects to delete. :param bool consistent: Use `strongly consistent reads`__ if True. Default is False. :raises bloop.exceptions.MissingKey: if any object doesn't provide a value for a key column. :raises bloop.exceptions.MissingObjects: if one or more objects aren't loaded. __ http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html """ get_table_name = self._compute_table_name objs = set(objs) validate_not_abstract(*objs) table_index, object_index, request = {}, {}, {} for obj in objs: table_name = get_table_name(obj.__class__) key = dump_key(self, obj) index = index_for(key) if table_name not in object_index: table_index[table_name] = list(sorted(key.keys())) object_index[table_name] = {} request[table_name] = {"Keys": [], "ConsistentRead": consistent} if index not in object_index[table_name]: request[table_name]["Keys"].append(key) object_index[table_name][index] = set() object_index[table_name][index].add(obj) response = self.session.load_items(request) for table_name, list_of_attrs in response.items(): for attrs in list_of_attrs: key_shape = table_index[table_name] key = extract_key(key_shape, attrs) index = index_for(key) for obj in object_index[table_name].pop(index): unpack_from_dynamodb( attrs=attrs, expected=obj.Meta.columns, engine=self, obj=obj) object_loaded.send(self, engine=self, obj=obj) if not object_index[table_name]: object_index.pop(table_name) if object_index: not_loaded = set() for index in object_index.values(): for index_set in index.values(): not_loaded.update(index_set) logger.info("loaded {} of {} objects".format(len(objs) - len(not_loaded), len(objs))) raise MissingObjects("Failed to load some objects.", objects=not_loaded) logger.info("successfully loaded {} objects".format(len(objs)))
python
def load(self, *objs, consistent=False): """Populate objects from DynamoDB. :param objs: objects to delete. :param bool consistent: Use `strongly consistent reads`__ if True. Default is False. :raises bloop.exceptions.MissingKey: if any object doesn't provide a value for a key column. :raises bloop.exceptions.MissingObjects: if one or more objects aren't loaded. __ http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html """ get_table_name = self._compute_table_name objs = set(objs) validate_not_abstract(*objs) table_index, object_index, request = {}, {}, {} for obj in objs: table_name = get_table_name(obj.__class__) key = dump_key(self, obj) index = index_for(key) if table_name not in object_index: table_index[table_name] = list(sorted(key.keys())) object_index[table_name] = {} request[table_name] = {"Keys": [], "ConsistentRead": consistent} if index not in object_index[table_name]: request[table_name]["Keys"].append(key) object_index[table_name][index] = set() object_index[table_name][index].add(obj) response = self.session.load_items(request) for table_name, list_of_attrs in response.items(): for attrs in list_of_attrs: key_shape = table_index[table_name] key = extract_key(key_shape, attrs) index = index_for(key) for obj in object_index[table_name].pop(index): unpack_from_dynamodb( attrs=attrs, expected=obj.Meta.columns, engine=self, obj=obj) object_loaded.send(self, engine=self, obj=obj) if not object_index[table_name]: object_index.pop(table_name) if object_index: not_loaded = set() for index in object_index.values(): for index_set in index.values(): not_loaded.update(index_set) logger.info("loaded {} of {} objects".format(len(objs) - len(not_loaded), len(objs))) raise MissingObjects("Failed to load some objects.", objects=not_loaded) logger.info("successfully loaded {} objects".format(len(objs)))
[ "def", "load", "(", "self", ",", "*", "objs", ",", "consistent", "=", "False", ")", ":", "get_table_name", "=", "self", ".", "_compute_table_name", "objs", "=", "set", "(", "objs", ")", "validate_not_abstract", "(", "*", "objs", ")", "table_index", ",", "object_index", ",", "request", "=", "{", "}", ",", "{", "}", ",", "{", "}", "for", "obj", "in", "objs", ":", "table_name", "=", "get_table_name", "(", "obj", ".", "__class__", ")", "key", "=", "dump_key", "(", "self", ",", "obj", ")", "index", "=", "index_for", "(", "key", ")", "if", "table_name", "not", "in", "object_index", ":", "table_index", "[", "table_name", "]", "=", "list", "(", "sorted", "(", "key", ".", "keys", "(", ")", ")", ")", "object_index", "[", "table_name", "]", "=", "{", "}", "request", "[", "table_name", "]", "=", "{", "\"Keys\"", ":", "[", "]", ",", "\"ConsistentRead\"", ":", "consistent", "}", "if", "index", "not", "in", "object_index", "[", "table_name", "]", ":", "request", "[", "table_name", "]", "[", "\"Keys\"", "]", ".", "append", "(", "key", ")", "object_index", "[", "table_name", "]", "[", "index", "]", "=", "set", "(", ")", "object_index", "[", "table_name", "]", "[", "index", "]", ".", "add", "(", "obj", ")", "response", "=", "self", ".", "session", ".", "load_items", "(", "request", ")", "for", "table_name", ",", "list_of_attrs", "in", "response", ".", "items", "(", ")", ":", "for", "attrs", "in", "list_of_attrs", ":", "key_shape", "=", "table_index", "[", "table_name", "]", "key", "=", "extract_key", "(", "key_shape", ",", "attrs", ")", "index", "=", "index_for", "(", "key", ")", "for", "obj", "in", "object_index", "[", "table_name", "]", ".", "pop", "(", "index", ")", ":", "unpack_from_dynamodb", "(", "attrs", "=", "attrs", ",", "expected", "=", "obj", ".", "Meta", ".", "columns", ",", "engine", "=", "self", ",", "obj", "=", "obj", ")", "object_loaded", ".", "send", "(", "self", ",", "engine", "=", "self", ",", "obj", "=", "obj", ")", "if", "not", "object_index", "[", "table_name", "]", ":", "object_index", ".", "pop", "(", "table_name", ")", "if", "object_index", ":", "not_loaded", "=", "set", "(", ")", "for", "index", "in", "object_index", ".", "values", "(", ")", ":", "for", "index_set", "in", "index", ".", "values", "(", ")", ":", "not_loaded", ".", "update", "(", "index_set", ")", "logger", ".", "info", "(", "\"loaded {} of {} objects\"", ".", "format", "(", "len", "(", "objs", ")", "-", "len", "(", "not_loaded", ")", ",", "len", "(", "objs", ")", ")", ")", "raise", "MissingObjects", "(", "\"Failed to load some objects.\"", ",", "objects", "=", "not_loaded", ")", "logger", ".", "info", "(", "\"successfully loaded {} objects\"", ".", "format", "(", "len", "(", "objs", ")", ")", ")" ]
Populate objects from DynamoDB. :param objs: objects to delete. :param bool consistent: Use `strongly consistent reads`__ if True. Default is False. :raises bloop.exceptions.MissingKey: if any object doesn't provide a value for a key column. :raises bloop.exceptions.MissingObjects: if one or more objects aren't loaded. __ http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html
[ "Populate", "objects", "from", "DynamoDB", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/engine.py#L186-L239
numberoverzero/bloop
bloop/engine.py
Engine.query
def query(self, model_or_index, key, filter=None, projection="all", consistent=False, forward=True): """Create a reusable :class:`~bloop.search.QueryIterator`. :param model_or_index: A model or index to query. For example, ``User`` or ``User.by_email``. :param key: Key condition. This must include an equality against the hash key, and optionally one of a restricted set of conditions on the range key. :param filter: Filter condition. Only matching objects will be included in the results. :param projection: "all", "count", a list of column names, or a list of :class:`~bloop.models.Column`. When projection is "count", you must advance the iterator to retrieve the count. :param bool consistent: Use `strongly consistent reads`__ if True. Default is False. :param bool forward: Query in ascending or descending order. Default is True (ascending). :return: A reusable query iterator with helper methods. :rtype: :class:`~bloop.search.QueryIterator` __ http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html """ if isinstance(model_or_index, Index): model, index = model_or_index.model, model_or_index else: model, index = model_or_index, None validate_not_abstract(model) q = Search( mode="query", engine=self, model=model, index=index, key=key, filter=filter, projection=projection, consistent=consistent, forward=forward) return iter(q.prepare())
python
def query(self, model_or_index, key, filter=None, projection="all", consistent=False, forward=True): """Create a reusable :class:`~bloop.search.QueryIterator`. :param model_or_index: A model or index to query. For example, ``User`` or ``User.by_email``. :param key: Key condition. This must include an equality against the hash key, and optionally one of a restricted set of conditions on the range key. :param filter: Filter condition. Only matching objects will be included in the results. :param projection: "all", "count", a list of column names, or a list of :class:`~bloop.models.Column`. When projection is "count", you must advance the iterator to retrieve the count. :param bool consistent: Use `strongly consistent reads`__ if True. Default is False. :param bool forward: Query in ascending or descending order. Default is True (ascending). :return: A reusable query iterator with helper methods. :rtype: :class:`~bloop.search.QueryIterator` __ http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html """ if isinstance(model_or_index, Index): model, index = model_or_index.model, model_or_index else: model, index = model_or_index, None validate_not_abstract(model) q = Search( mode="query", engine=self, model=model, index=index, key=key, filter=filter, projection=projection, consistent=consistent, forward=forward) return iter(q.prepare())
[ "def", "query", "(", "self", ",", "model_or_index", ",", "key", ",", "filter", "=", "None", ",", "projection", "=", "\"all\"", ",", "consistent", "=", "False", ",", "forward", "=", "True", ")", ":", "if", "isinstance", "(", "model_or_index", ",", "Index", ")", ":", "model", ",", "index", "=", "model_or_index", ".", "model", ",", "model_or_index", "else", ":", "model", ",", "index", "=", "model_or_index", ",", "None", "validate_not_abstract", "(", "model", ")", "q", "=", "Search", "(", "mode", "=", "\"query\"", ",", "engine", "=", "self", ",", "model", "=", "model", ",", "index", "=", "index", ",", "key", "=", "key", ",", "filter", "=", "filter", ",", "projection", "=", "projection", ",", "consistent", "=", "consistent", ",", "forward", "=", "forward", ")", "return", "iter", "(", "q", ".", "prepare", "(", ")", ")" ]
Create a reusable :class:`~bloop.search.QueryIterator`. :param model_or_index: A model or index to query. For example, ``User`` or ``User.by_email``. :param key: Key condition. This must include an equality against the hash key, and optionally one of a restricted set of conditions on the range key. :param filter: Filter condition. Only matching objects will be included in the results. :param projection: "all", "count", a list of column names, or a list of :class:`~bloop.models.Column`. When projection is "count", you must advance the iterator to retrieve the count. :param bool consistent: Use `strongly consistent reads`__ if True. Default is False. :param bool forward: Query in ascending or descending order. Default is True (ascending). :return: A reusable query iterator with helper methods. :rtype: :class:`~bloop.search.QueryIterator` __ http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html
[ "Create", "a", "reusable", ":", "class", ":", "~bloop", ".", "search", ".", "QueryIterator", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/engine.py#L241-L268
numberoverzero/bloop
bloop/engine.py
Engine.save
def save(self, *objs, condition=None, atomic=False): """Save one or more objects. :param objs: objects to save. :param condition: only perform each save if this condition holds. :param bool atomic: only perform each save if the local and DynamoDB versions of the object match. :raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met. """ objs = set(objs) validate_not_abstract(*objs) for obj in objs: self.session.save_item({ "TableName": self._compute_table_name(obj.__class__), "Key": dump_key(self, obj), **render(self, obj=obj, atomic=atomic, condition=condition, update=True) }) object_saved.send(self, engine=self, obj=obj) logger.info("successfully saved {} objects".format(len(objs)))
python
def save(self, *objs, condition=None, atomic=False): """Save one or more objects. :param objs: objects to save. :param condition: only perform each save if this condition holds. :param bool atomic: only perform each save if the local and DynamoDB versions of the object match. :raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met. """ objs = set(objs) validate_not_abstract(*objs) for obj in objs: self.session.save_item({ "TableName": self._compute_table_name(obj.__class__), "Key": dump_key(self, obj), **render(self, obj=obj, atomic=atomic, condition=condition, update=True) }) object_saved.send(self, engine=self, obj=obj) logger.info("successfully saved {} objects".format(len(objs)))
[ "def", "save", "(", "self", ",", "*", "objs", ",", "condition", "=", "None", ",", "atomic", "=", "False", ")", ":", "objs", "=", "set", "(", "objs", ")", "validate_not_abstract", "(", "*", "objs", ")", "for", "obj", "in", "objs", ":", "self", ".", "session", ".", "save_item", "(", "{", "\"TableName\"", ":", "self", ".", "_compute_table_name", "(", "obj", ".", "__class__", ")", ",", "\"Key\"", ":", "dump_key", "(", "self", ",", "obj", ")", ",", "*", "*", "render", "(", "self", ",", "obj", "=", "obj", ",", "atomic", "=", "atomic", ",", "condition", "=", "condition", ",", "update", "=", "True", ")", "}", ")", "object_saved", ".", "send", "(", "self", ",", "engine", "=", "self", ",", "obj", "=", "obj", ")", "logger", ".", "info", "(", "\"successfully saved {} objects\"", ".", "format", "(", "len", "(", "objs", ")", ")", ")" ]
Save one or more objects. :param objs: objects to save. :param condition: only perform each save if this condition holds. :param bool atomic: only perform each save if the local and DynamoDB versions of the object match. :raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met.
[ "Save", "one", "or", "more", "objects", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/engine.py#L270-L287
numberoverzero/bloop
bloop/engine.py
Engine.scan
def scan(self, model_or_index, filter=None, projection="all", consistent=False, parallel=None): """Create a reusable :class:`~bloop.search.ScanIterator`. :param model_or_index: A model or index to scan. For example, ``User`` or ``User.by_email``. :param filter: Filter condition. Only matching objects will be included in the results. :param projection: "all", "count", a list of column names, or a list of :class:`~bloop.models.Column`. When projection is "count", you must exhaust the iterator to retrieve the count. :param bool consistent: Use `strongly consistent reads`__ if True. Default is False. :param tuple parallel: Perform a `parallel scan`__. A tuple of (Segment, TotalSegments) for this portion the scan. Default is None. :return: A reusable scan iterator with helper methods. :rtype: :class:`~bloop.search.ScanIterator` __ http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html __ http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#QueryAndScanParallelScan """ if isinstance(model_or_index, Index): model, index = model_or_index.model, model_or_index else: model, index = model_or_index, None validate_not_abstract(model) s = Search( mode="scan", engine=self, model=model, index=index, filter=filter, projection=projection, consistent=consistent, parallel=parallel) return iter(s.prepare())
python
def scan(self, model_or_index, filter=None, projection="all", consistent=False, parallel=None): """Create a reusable :class:`~bloop.search.ScanIterator`. :param model_or_index: A model or index to scan. For example, ``User`` or ``User.by_email``. :param filter: Filter condition. Only matching objects will be included in the results. :param projection: "all", "count", a list of column names, or a list of :class:`~bloop.models.Column`. When projection is "count", you must exhaust the iterator to retrieve the count. :param bool consistent: Use `strongly consistent reads`__ if True. Default is False. :param tuple parallel: Perform a `parallel scan`__. A tuple of (Segment, TotalSegments) for this portion the scan. Default is None. :return: A reusable scan iterator with helper methods. :rtype: :class:`~bloop.search.ScanIterator` __ http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html __ http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#QueryAndScanParallelScan """ if isinstance(model_or_index, Index): model, index = model_or_index.model, model_or_index else: model, index = model_or_index, None validate_not_abstract(model) s = Search( mode="scan", engine=self, model=model, index=index, filter=filter, projection=projection, consistent=consistent, parallel=parallel) return iter(s.prepare())
[ "def", "scan", "(", "self", ",", "model_or_index", ",", "filter", "=", "None", ",", "projection", "=", "\"all\"", ",", "consistent", "=", "False", ",", "parallel", "=", "None", ")", ":", "if", "isinstance", "(", "model_or_index", ",", "Index", ")", ":", "model", ",", "index", "=", "model_or_index", ".", "model", ",", "model_or_index", "else", ":", "model", ",", "index", "=", "model_or_index", ",", "None", "validate_not_abstract", "(", "model", ")", "s", "=", "Search", "(", "mode", "=", "\"scan\"", ",", "engine", "=", "self", ",", "model", "=", "model", ",", "index", "=", "index", ",", "filter", "=", "filter", ",", "projection", "=", "projection", ",", "consistent", "=", "consistent", ",", "parallel", "=", "parallel", ")", "return", "iter", "(", "s", ".", "prepare", "(", ")", ")" ]
Create a reusable :class:`~bloop.search.ScanIterator`. :param model_or_index: A model or index to scan. For example, ``User`` or ``User.by_email``. :param filter: Filter condition. Only matching objects will be included in the results. :param projection: "all", "count", a list of column names, or a list of :class:`~bloop.models.Column`. When projection is "count", you must exhaust the iterator to retrieve the count. :param bool consistent: Use `strongly consistent reads`__ if True. Default is False. :param tuple parallel: Perform a `parallel scan`__. A tuple of (Segment, TotalSegments) for this portion the scan. Default is None. :return: A reusable scan iterator with helper methods. :rtype: :class:`~bloop.search.ScanIterator` __ http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html __ http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#QueryAndScanParallelScan
[ "Create", "a", "reusable", ":", "class", ":", "~bloop", ".", "search", ".", "ScanIterator", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/engine.py#L289-L314
numberoverzero/bloop
bloop/engine.py
Engine.stream
def stream(self, model, position): """Create a :class:`~bloop.stream.Stream` that provides approximate chronological ordering. .. code-block:: pycon # Create a user so we have a record >>> engine = Engine() >>> user = User(id=3, email="user@domain.com") >>> engine.save(user) >>> user.email = "admin@domain.com" >>> engine.save(user) # First record lacks an "old" value since it's an insert >>> stream = engine.stream(User, "trim_horizon") >>> next(stream) {'key': None, 'old': None, 'new': User(email='user@domain.com', id=3, verified=None), 'meta': { 'created_at': datetime.datetime(2016, 10, 23, ...), 'event': { 'id': '3fe6d339b7cb19a1474b3d853972c12a', 'type': 'insert', 'version': '1.1'}, 'sequence_number': '700000000007366876916'} } :param model: The model to stream records from. :param position: "trim_horizon", "latest", a stream token, or a :class:`datetime.datetime`. :return: An iterator for records in all shards. :rtype: :class:`~bloop.stream.Stream` :raises bloop.exceptions.InvalidStream: if the model does not have a stream. """ validate_not_abstract(model) if not model.Meta.stream or not model.Meta.stream.get("arn"): raise InvalidStream("{!r} does not have a stream arn".format(model)) stream = Stream(model=model, engine=self) stream.move_to(position=position) return stream
python
def stream(self, model, position): """Create a :class:`~bloop.stream.Stream` that provides approximate chronological ordering. .. code-block:: pycon # Create a user so we have a record >>> engine = Engine() >>> user = User(id=3, email="user@domain.com") >>> engine.save(user) >>> user.email = "admin@domain.com" >>> engine.save(user) # First record lacks an "old" value since it's an insert >>> stream = engine.stream(User, "trim_horizon") >>> next(stream) {'key': None, 'old': None, 'new': User(email='user@domain.com', id=3, verified=None), 'meta': { 'created_at': datetime.datetime(2016, 10, 23, ...), 'event': { 'id': '3fe6d339b7cb19a1474b3d853972c12a', 'type': 'insert', 'version': '1.1'}, 'sequence_number': '700000000007366876916'} } :param model: The model to stream records from. :param position: "trim_horizon", "latest", a stream token, or a :class:`datetime.datetime`. :return: An iterator for records in all shards. :rtype: :class:`~bloop.stream.Stream` :raises bloop.exceptions.InvalidStream: if the model does not have a stream. """ validate_not_abstract(model) if not model.Meta.stream or not model.Meta.stream.get("arn"): raise InvalidStream("{!r} does not have a stream arn".format(model)) stream = Stream(model=model, engine=self) stream.move_to(position=position) return stream
[ "def", "stream", "(", "self", ",", "model", ",", "position", ")", ":", "validate_not_abstract", "(", "model", ")", "if", "not", "model", ".", "Meta", ".", "stream", "or", "not", "model", ".", "Meta", ".", "stream", ".", "get", "(", "\"arn\"", ")", ":", "raise", "InvalidStream", "(", "\"{!r} does not have a stream arn\"", ".", "format", "(", "model", ")", ")", "stream", "=", "Stream", "(", "model", "=", "model", ",", "engine", "=", "self", ")", "stream", ".", "move_to", "(", "position", "=", "position", ")", "return", "stream" ]
Create a :class:`~bloop.stream.Stream` that provides approximate chronological ordering. .. code-block:: pycon # Create a user so we have a record >>> engine = Engine() >>> user = User(id=3, email="user@domain.com") >>> engine.save(user) >>> user.email = "admin@domain.com" >>> engine.save(user) # First record lacks an "old" value since it's an insert >>> stream = engine.stream(User, "trim_horizon") >>> next(stream) {'key': None, 'old': None, 'new': User(email='user@domain.com', id=3, verified=None), 'meta': { 'created_at': datetime.datetime(2016, 10, 23, ...), 'event': { 'id': '3fe6d339b7cb19a1474b3d853972c12a', 'type': 'insert', 'version': '1.1'}, 'sequence_number': '700000000007366876916'} } :param model: The model to stream records from. :param position: "trim_horizon", "latest", a stream token, or a :class:`datetime.datetime`. :return: An iterator for records in all shards. :rtype: :class:`~bloop.stream.Stream` :raises bloop.exceptions.InvalidStream: if the model does not have a stream.
[ "Create", "a", ":", "class", ":", "~bloop", ".", "stream", ".", "Stream", "that", "provides", "approximate", "chronological", "ordering", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/engine.py#L316-L355
numberoverzero/bloop
bloop/engine.py
Engine.transaction
def transaction(self, mode="w"): """ Create a new :class:`~bloop.transactions.ReadTransaction` or :class:`~bloop.transactions.WriteTransaction`. As a context manager, calling commit when the block exits: .. code-block:: pycon >>> engine = Engine() >>> user = User(id=3, email="user@domain.com") >>> tweet = Tweet(id=42, data="hello, world") >>> with engine.transaction("w") as tx: ... tx.delete(user) ... tx.save(tweet, condition=Tweet.id.is_(None)) Or manually calling prepare and commit: .. code-block:: pycon >>> engine = Engine() >>> user = User(id=3, email="user@domain.com") >>> tweet = Tweet(id=42, data="hello, world") >>> tx = engine.transaction("w") >>> tx.delete(user) >>> tx.save(tweet, condition=Tweet.id.is_(None)) >>> tx.prepare().commit() :param str mode: Either "r" or "w" to create a ReadTransaction or WriteTransaction. Default is "w" :return: A new transaction that can be committed. :rtype: :class:`~bloop.transactions.ReadTransaction` or :class:`~bloop.transactions.WriteTransaction` """ if mode == "r": cls = ReadTransaction elif mode == "w": cls = WriteTransaction else: raise ValueError(f"unknown mode {mode}") return cls(self)
python
def transaction(self, mode="w"): """ Create a new :class:`~bloop.transactions.ReadTransaction` or :class:`~bloop.transactions.WriteTransaction`. As a context manager, calling commit when the block exits: .. code-block:: pycon >>> engine = Engine() >>> user = User(id=3, email="user@domain.com") >>> tweet = Tweet(id=42, data="hello, world") >>> with engine.transaction("w") as tx: ... tx.delete(user) ... tx.save(tweet, condition=Tweet.id.is_(None)) Or manually calling prepare and commit: .. code-block:: pycon >>> engine = Engine() >>> user = User(id=3, email="user@domain.com") >>> tweet = Tweet(id=42, data="hello, world") >>> tx = engine.transaction("w") >>> tx.delete(user) >>> tx.save(tweet, condition=Tweet.id.is_(None)) >>> tx.prepare().commit() :param str mode: Either "r" or "w" to create a ReadTransaction or WriteTransaction. Default is "w" :return: A new transaction that can be committed. :rtype: :class:`~bloop.transactions.ReadTransaction` or :class:`~bloop.transactions.WriteTransaction` """ if mode == "r": cls = ReadTransaction elif mode == "w": cls = WriteTransaction else: raise ValueError(f"unknown mode {mode}") return cls(self)
[ "def", "transaction", "(", "self", ",", "mode", "=", "\"w\"", ")", ":", "if", "mode", "==", "\"r\"", ":", "cls", "=", "ReadTransaction", "elif", "mode", "==", "\"w\"", ":", "cls", "=", "WriteTransaction", "else", ":", "raise", "ValueError", "(", "f\"unknown mode {mode}\"", ")", "return", "cls", "(", "self", ")" ]
Create a new :class:`~bloop.transactions.ReadTransaction` or :class:`~bloop.transactions.WriteTransaction`. As a context manager, calling commit when the block exits: .. code-block:: pycon >>> engine = Engine() >>> user = User(id=3, email="user@domain.com") >>> tweet = Tweet(id=42, data="hello, world") >>> with engine.transaction("w") as tx: ... tx.delete(user) ... tx.save(tweet, condition=Tweet.id.is_(None)) Or manually calling prepare and commit: .. code-block:: pycon >>> engine = Engine() >>> user = User(id=3, email="user@domain.com") >>> tweet = Tweet(id=42, data="hello, world") >>> tx = engine.transaction("w") >>> tx.delete(user) >>> tx.save(tweet, condition=Tweet.id.is_(None)) >>> tx.prepare().commit() :param str mode: Either "r" or "w" to create a ReadTransaction or WriteTransaction. Default is "w" :return: A new transaction that can be committed. :rtype: :class:`~bloop.transactions.ReadTransaction` or :class:`~bloop.transactions.WriteTransaction`
[ "Create", "a", "new", ":", "class", ":", "~bloop", ".", "transactions", ".", "ReadTransaction", "or", ":", "class", ":", "~bloop", ".", "transactions", ".", "WriteTransaction", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/engine.py#L357-L394
numberoverzero/bloop
bloop/types.py
Type._dump
def _dump(self, value, **kwargs): """Entry point for serializing values. Most custom types should use :func:`~bloop.types.Type.dynamo_dump`. This wraps the return value of :func:`~bloop.types.Type.dynamo_dump` in DynamoDB's wire format. For example, serializing a string enum to an int: .. code-block:: python value = "green" # dynamo_dump("green") = 2 _dump(value) == {"N": 2} If a complex type calls this function with ``None``, it will forward ``None`` to :func:`~bloop.types.Type.dynamo_dump`. This can happen when dumping eg. a sparse :class:`~.bloop.types.Map`, or a missing (not set) value. """ value = self.dynamo_dump(value, **kwargs) if value is None: return value return {self.backing_type: value}
python
def _dump(self, value, **kwargs): """Entry point for serializing values. Most custom types should use :func:`~bloop.types.Type.dynamo_dump`. This wraps the return value of :func:`~bloop.types.Type.dynamo_dump` in DynamoDB's wire format. For example, serializing a string enum to an int: .. code-block:: python value = "green" # dynamo_dump("green") = 2 _dump(value) == {"N": 2} If a complex type calls this function with ``None``, it will forward ``None`` to :func:`~bloop.types.Type.dynamo_dump`. This can happen when dumping eg. a sparse :class:`~.bloop.types.Map`, or a missing (not set) value. """ value = self.dynamo_dump(value, **kwargs) if value is None: return value return {self.backing_type: value}
[ "def", "_dump", "(", "self", ",", "value", ",", "*", "*", "kwargs", ")", ":", "value", "=", "self", ".", "dynamo_dump", "(", "value", ",", "*", "*", "kwargs", ")", "if", "value", "is", "None", ":", "return", "value", "return", "{", "self", ".", "backing_type", ":", "value", "}" ]
Entry point for serializing values. Most custom types should use :func:`~bloop.types.Type.dynamo_dump`. This wraps the return value of :func:`~bloop.types.Type.dynamo_dump` in DynamoDB's wire format. For example, serializing a string enum to an int: .. code-block:: python value = "green" # dynamo_dump("green") = 2 _dump(value) == {"N": 2} If a complex type calls this function with ``None``, it will forward ``None`` to :func:`~bloop.types.Type.dynamo_dump`. This can happen when dumping eg. a sparse :class:`~.bloop.types.Map`, or a missing (not set) value.
[ "Entry", "point", "for", "serializing", "values", ".", "Most", "custom", "types", "should", "use", ":", "func", ":", "~bloop", ".", "types", ".", "Type", ".", "dynamo_dump", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/types.py#L101-L120
numberoverzero/bloop
bloop/types.py
Type._load
def _load(self, value, **kwargs): """Entry point for deserializing values. Most custom types should use :func:`~bloop.types.Type.dynamo_load`. This unpacks DynamoDB's wire format and calls :func:`~bloop.types.Type.dynamo_load` on the inner value. For example, deserializing an int to a string enum: .. code-block:: python value = {"N": 2} # dynamo_load(2) = "green" _load(value) == "green" If a complex type calls this function with ``None``, it will forward ``None`` to :func:`~bloop.types.Type.dynamo_load`. This can happen when loading eg. a sparse :class:`~bloop.types.Map`. """ if value is not None: value = next(iter(value.values())) return self.dynamo_load(value, **kwargs)
python
def _load(self, value, **kwargs): """Entry point for deserializing values. Most custom types should use :func:`~bloop.types.Type.dynamo_load`. This unpacks DynamoDB's wire format and calls :func:`~bloop.types.Type.dynamo_load` on the inner value. For example, deserializing an int to a string enum: .. code-block:: python value = {"N": 2} # dynamo_load(2) = "green" _load(value) == "green" If a complex type calls this function with ``None``, it will forward ``None`` to :func:`~bloop.types.Type.dynamo_load`. This can happen when loading eg. a sparse :class:`~bloop.types.Map`. """ if value is not None: value = next(iter(value.values())) return self.dynamo_load(value, **kwargs)
[ "def", "_load", "(", "self", ",", "value", ",", "*", "*", "kwargs", ")", ":", "if", "value", "is", "not", "None", ":", "value", "=", "next", "(", "iter", "(", "value", ".", "values", "(", ")", ")", ")", "return", "self", ".", "dynamo_load", "(", "value", ",", "*", "*", "kwargs", ")" ]
Entry point for deserializing values. Most custom types should use :func:`~bloop.types.Type.dynamo_load`. This unpacks DynamoDB's wire format and calls :func:`~bloop.types.Type.dynamo_load` on the inner value. For example, deserializing an int to a string enum: .. code-block:: python value = {"N": 2} # dynamo_load(2) = "green" _load(value) == "green" If a complex type calls this function with ``None``, it will forward ``None`` to :func:`~bloop.types.Type.dynamo_load`. This can happen when loading eg. a sparse :class:`~bloop.types.Map`.
[ "Entry", "point", "for", "deserializing", "values", ".", "Most", "custom", "types", "should", "use", ":", "func", ":", "~bloop", ".", "types", ".", "Type", ".", "dynamo_load", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/types.py#L122-L139
numberoverzero/bloop
bloop/types.py
DynamicType.backing_type_for
def backing_type_for(value): """Returns the DynamoDB backing type for a given python value's type :: 4 -> 'N' ['x', 3] -> 'L' {2, 4} -> 'SS' """ if isinstance(value, str): vtype = "S" elif isinstance(value, bytes): vtype = "B" # NOTE: numbers.Number check must come **AFTER** bool check since isinstance(True, numbers.Number) elif isinstance(value, bool): vtype = "BOOL" elif isinstance(value, numbers.Number): vtype = "N" elif isinstance(value, dict): vtype = "M" elif isinstance(value, list): vtype = "L" elif isinstance(value, set): if not value: vtype = "SS" # doesn't matter, Set(x) should dump an empty set the same for all x else: inner = next(iter(value)) if isinstance(inner, str): vtype = "SS" elif isinstance(inner, bytes): vtype = "BS" elif isinstance(inner, numbers.Number): vtype = "NS" else: raise ValueError(f"Unknown set type for inner value {inner!r}") else: raise ValueError(f"Can't dump unexpected type {type(value)!r} for value {value!r}") return vtype
python
def backing_type_for(value): """Returns the DynamoDB backing type for a given python value's type :: 4 -> 'N' ['x', 3] -> 'L' {2, 4} -> 'SS' """ if isinstance(value, str): vtype = "S" elif isinstance(value, bytes): vtype = "B" # NOTE: numbers.Number check must come **AFTER** bool check since isinstance(True, numbers.Number) elif isinstance(value, bool): vtype = "BOOL" elif isinstance(value, numbers.Number): vtype = "N" elif isinstance(value, dict): vtype = "M" elif isinstance(value, list): vtype = "L" elif isinstance(value, set): if not value: vtype = "SS" # doesn't matter, Set(x) should dump an empty set the same for all x else: inner = next(iter(value)) if isinstance(inner, str): vtype = "SS" elif isinstance(inner, bytes): vtype = "BS" elif isinstance(inner, numbers.Number): vtype = "NS" else: raise ValueError(f"Unknown set type for inner value {inner!r}") else: raise ValueError(f"Can't dump unexpected type {type(value)!r} for value {value!r}") return vtype
[ "def", "backing_type_for", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "str", ")", ":", "vtype", "=", "\"S\"", "elif", "isinstance", "(", "value", ",", "bytes", ")", ":", "vtype", "=", "\"B\"", "# NOTE: numbers.Number check must come **AFTER** bool check since isinstance(True, numbers.Number)", "elif", "isinstance", "(", "value", ",", "bool", ")", ":", "vtype", "=", "\"BOOL\"", "elif", "isinstance", "(", "value", ",", "numbers", ".", "Number", ")", ":", "vtype", "=", "\"N\"", "elif", "isinstance", "(", "value", ",", "dict", ")", ":", "vtype", "=", "\"M\"", "elif", "isinstance", "(", "value", ",", "list", ")", ":", "vtype", "=", "\"L\"", "elif", "isinstance", "(", "value", ",", "set", ")", ":", "if", "not", "value", ":", "vtype", "=", "\"SS\"", "# doesn't matter, Set(x) should dump an empty set the same for all x", "else", ":", "inner", "=", "next", "(", "iter", "(", "value", ")", ")", "if", "isinstance", "(", "inner", ",", "str", ")", ":", "vtype", "=", "\"SS\"", "elif", "isinstance", "(", "inner", ",", "bytes", ")", ":", "vtype", "=", "\"BS\"", "elif", "isinstance", "(", "inner", ",", "numbers", ".", "Number", ")", ":", "vtype", "=", "\"NS\"", "else", ":", "raise", "ValueError", "(", "f\"Unknown set type for inner value {inner!r}\"", ")", "else", ":", "raise", "ValueError", "(", "f\"Can't dump unexpected type {type(value)!r} for value {value!r}\"", ")", "return", "vtype" ]
Returns the DynamoDB backing type for a given python value's type :: 4 -> 'N' ['x', 3] -> 'L' {2, 4} -> 'SS'
[ "Returns", "the", "DynamoDB", "backing", "type", "for", "a", "given", "python", "value", "s", "type" ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/types.py#L637-L674
numberoverzero/bloop
examples/replication.py
stream_replicate
def stream_replicate(): """Monitor changes in approximately real-time and replicate them""" stream = primary.stream(SomeDataBlob, "trim_horizon") next_heartbeat = pendulum.now() while True: now = pendulum.now() if now >= next_heartbeat: stream.heartbeat() next_heartbeat = now.add(minutes=10) record = next(stream) if record is None: continue if record["new"] is not None: replica.save(record["new"]) else: replica.delete(record["old"])
python
def stream_replicate(): """Monitor changes in approximately real-time and replicate them""" stream = primary.stream(SomeDataBlob, "trim_horizon") next_heartbeat = pendulum.now() while True: now = pendulum.now() if now >= next_heartbeat: stream.heartbeat() next_heartbeat = now.add(minutes=10) record = next(stream) if record is None: continue if record["new"] is not None: replica.save(record["new"]) else: replica.delete(record["old"])
[ "def", "stream_replicate", "(", ")", ":", "stream", "=", "primary", ".", "stream", "(", "SomeDataBlob", ",", "\"trim_horizon\"", ")", "next_heartbeat", "=", "pendulum", ".", "now", "(", ")", "while", "True", ":", "now", "=", "pendulum", ".", "now", "(", ")", "if", "now", ">=", "next_heartbeat", ":", "stream", ".", "heartbeat", "(", ")", "next_heartbeat", "=", "now", ".", "add", "(", "minutes", "=", "10", ")", "record", "=", "next", "(", "stream", ")", "if", "record", "is", "None", ":", "continue", "if", "record", "[", "\"new\"", "]", "is", "not", "None", ":", "replica", ".", "save", "(", "record", "[", "\"new\"", "]", ")", "else", ":", "replica", ".", "delete", "(", "record", "[", "\"old\"", "]", ")" ]
Monitor changes in approximately real-time and replicate them
[ "Monitor", "changes", "in", "approximately", "real", "-", "time", "and", "replicate", "them" ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/examples/replication.py#L42-L58
numberoverzero/bloop
bloop/stream/coordinator.py
_move_stream_endpoint
def _move_stream_endpoint(coordinator, position): """Move to the "trim_horizon" or "latest" of the entire stream.""" # 0) Everything will be rebuilt from DescribeStream. stream_arn = coordinator.stream_arn coordinator.roots.clear() coordinator.active.clear() coordinator.buffer.clear() # 1) Build a Dict[str, Shard] of the current Stream from a DescribeStream call current_shards = coordinator.session.describe_stream(stream_arn=stream_arn)["Shards"] current_shards = unpack_shards(current_shards, stream_arn, coordinator.session) # 2) Roots are any shards without parents. coordinator.roots.extend(shard for shard in current_shards.values() if not shard.parent) # 3.0) Stream trim_horizon is the combined trim_horizon of all roots. if position == "trim_horizon": for shard in coordinator.roots: shard.jump_to(iterator_type="trim_horizon") coordinator.active.extend(coordinator.roots) # 3.1) Stream latest is the combined latest of all shards without children. else: for root in coordinator.roots: for shard in root.walk_tree(): if not shard.children: shard.jump_to(iterator_type="latest") coordinator.active.append(shard)
python
def _move_stream_endpoint(coordinator, position): """Move to the "trim_horizon" or "latest" of the entire stream.""" # 0) Everything will be rebuilt from DescribeStream. stream_arn = coordinator.stream_arn coordinator.roots.clear() coordinator.active.clear() coordinator.buffer.clear() # 1) Build a Dict[str, Shard] of the current Stream from a DescribeStream call current_shards = coordinator.session.describe_stream(stream_arn=stream_arn)["Shards"] current_shards = unpack_shards(current_shards, stream_arn, coordinator.session) # 2) Roots are any shards without parents. coordinator.roots.extend(shard for shard in current_shards.values() if not shard.parent) # 3.0) Stream trim_horizon is the combined trim_horizon of all roots. if position == "trim_horizon": for shard in coordinator.roots: shard.jump_to(iterator_type="trim_horizon") coordinator.active.extend(coordinator.roots) # 3.1) Stream latest is the combined latest of all shards without children. else: for root in coordinator.roots: for shard in root.walk_tree(): if not shard.children: shard.jump_to(iterator_type="latest") coordinator.active.append(shard)
[ "def", "_move_stream_endpoint", "(", "coordinator", ",", "position", ")", ":", "# 0) Everything will be rebuilt from DescribeStream.", "stream_arn", "=", "coordinator", ".", "stream_arn", "coordinator", ".", "roots", ".", "clear", "(", ")", "coordinator", ".", "active", ".", "clear", "(", ")", "coordinator", ".", "buffer", ".", "clear", "(", ")", "# 1) Build a Dict[str, Shard] of the current Stream from a DescribeStream call", "current_shards", "=", "coordinator", ".", "session", ".", "describe_stream", "(", "stream_arn", "=", "stream_arn", ")", "[", "\"Shards\"", "]", "current_shards", "=", "unpack_shards", "(", "current_shards", ",", "stream_arn", ",", "coordinator", ".", "session", ")", "# 2) Roots are any shards without parents.", "coordinator", ".", "roots", ".", "extend", "(", "shard", "for", "shard", "in", "current_shards", ".", "values", "(", ")", "if", "not", "shard", ".", "parent", ")", "# 3.0) Stream trim_horizon is the combined trim_horizon of all roots.", "if", "position", "==", "\"trim_horizon\"", ":", "for", "shard", "in", "coordinator", ".", "roots", ":", "shard", ".", "jump_to", "(", "iterator_type", "=", "\"trim_horizon\"", ")", "coordinator", ".", "active", ".", "extend", "(", "coordinator", ".", "roots", ")", "# 3.1) Stream latest is the combined latest of all shards without children.", "else", ":", "for", "root", "in", "coordinator", ".", "roots", ":", "for", "shard", "in", "root", ".", "walk_tree", "(", ")", ":", "if", "not", "shard", ".", "children", ":", "shard", ".", "jump_to", "(", "iterator_type", "=", "\"latest\"", ")", "coordinator", ".", "active", ".", "append", "(", "shard", ")" ]
Move to the "trim_horizon" or "latest" of the entire stream.
[ "Move", "to", "the", "trim_horizon", "or", "latest", "of", "the", "entire", "stream", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/stream/coordinator.py#L214-L240
numberoverzero/bloop
bloop/stream/coordinator.py
_move_stream_time
def _move_stream_time(coordinator, time): """Scan through the *entire* Stream for the first record after ``time``. This is an extremely expensive, naive algorithm that starts at trim_horizon and simply dumps records into the void until the first hit. General improvements in performance are tough; we can use the fact that Shards have a max life of 24hr to pick a pretty-good starting point for any Shard trees with 6 generations. Even then we can't know how close the oldest one is to rolling off so we either hit trim_horizon, or iterate an extra Shard more than we need to. The corner cases are worse; short trees, recent splits, trees with different branch heights. """ if time > datetime.datetime.now(datetime.timezone.utc): _move_stream_endpoint(coordinator, "latest") return _move_stream_endpoint(coordinator, "trim_horizon") shard_trees = collections.deque(coordinator.roots) while shard_trees: shard = shard_trees.popleft() records = shard.seek_to(time) # Success! This section of some Shard tree is at the desired time. if records: coordinator.buffer.push_all((record, shard) for record in records) # Closed shard, keep searching its children. elif shard.exhausted: coordinator.remove_shard(shard, drop_buffered_records=True) shard_trees.extend(shard.children)
python
def _move_stream_time(coordinator, time): """Scan through the *entire* Stream for the first record after ``time``. This is an extremely expensive, naive algorithm that starts at trim_horizon and simply dumps records into the void until the first hit. General improvements in performance are tough; we can use the fact that Shards have a max life of 24hr to pick a pretty-good starting point for any Shard trees with 6 generations. Even then we can't know how close the oldest one is to rolling off so we either hit trim_horizon, or iterate an extra Shard more than we need to. The corner cases are worse; short trees, recent splits, trees with different branch heights. """ if time > datetime.datetime.now(datetime.timezone.utc): _move_stream_endpoint(coordinator, "latest") return _move_stream_endpoint(coordinator, "trim_horizon") shard_trees = collections.deque(coordinator.roots) while shard_trees: shard = shard_trees.popleft() records = shard.seek_to(time) # Success! This section of some Shard tree is at the desired time. if records: coordinator.buffer.push_all((record, shard) for record in records) # Closed shard, keep searching its children. elif shard.exhausted: coordinator.remove_shard(shard, drop_buffered_records=True) shard_trees.extend(shard.children)
[ "def", "_move_stream_time", "(", "coordinator", ",", "time", ")", ":", "if", "time", ">", "datetime", ".", "datetime", ".", "now", "(", "datetime", ".", "timezone", ".", "utc", ")", ":", "_move_stream_endpoint", "(", "coordinator", ",", "\"latest\"", ")", "return", "_move_stream_endpoint", "(", "coordinator", ",", "\"trim_horizon\"", ")", "shard_trees", "=", "collections", ".", "deque", "(", "coordinator", ".", "roots", ")", "while", "shard_trees", ":", "shard", "=", "shard_trees", ".", "popleft", "(", ")", "records", "=", "shard", ".", "seek_to", "(", "time", ")", "# Success! This section of some Shard tree is at the desired time.", "if", "records", ":", "coordinator", ".", "buffer", ".", "push_all", "(", "(", "record", ",", "shard", ")", "for", "record", "in", "records", ")", "# Closed shard, keep searching its children.", "elif", "shard", ".", "exhausted", ":", "coordinator", ".", "remove_shard", "(", "shard", ",", "drop_buffered_records", "=", "True", ")", "shard_trees", ".", "extend", "(", "shard", ".", "children", ")" ]
Scan through the *entire* Stream for the first record after ``time``. This is an extremely expensive, naive algorithm that starts at trim_horizon and simply dumps records into the void until the first hit. General improvements in performance are tough; we can use the fact that Shards have a max life of 24hr to pick a pretty-good starting point for any Shard trees with 6 generations. Even then we can't know how close the oldest one is to rolling off so we either hit trim_horizon, or iterate an extra Shard more than we need to. The corner cases are worse; short trees, recent splits, trees with different branch heights.
[ "Scan", "through", "the", "*", "entire", "*", "Stream", "for", "the", "first", "record", "after", "time", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/stream/coordinator.py#L243-L271
numberoverzero/bloop
bloop/stream/coordinator.py
_move_stream_token
def _move_stream_token(coordinator, token): """Move to the Stream position described by the token. The following rules are applied when interpolation is required: - If a shard does not exist (past the trim_horizon) it is ignored. If that shard had children, its children are also checked against the existing shards. - If none of the shards in the token exist, then InvalidStream is raised. - If a Shard expects its iterator to point to a SequenceNumber that is now past that Shard's trim_horizon, the Shard instead points to trim_horizon. """ stream_arn = coordinator.stream_arn = token["stream_arn"] # 0) Everything will be rebuilt from the DescribeStream masked by the token. coordinator.roots.clear() coordinator.active.clear() coordinator.closed.clear() coordinator.buffer.clear() # Injecting the token gives us access to the standard shard management functions token_shards = unpack_shards(token["shards"], stream_arn, coordinator.session) coordinator.roots = [shard for shard in token_shards.values() if not shard.parent] coordinator.active.extend(token_shards[shard_id] for shard_id in token["active"]) # 1) Build a Dict[str, Shard] of the current Stream from a DescribeStream call current_shards = coordinator.session.describe_stream(stream_arn=stream_arn)["Shards"] current_shards = unpack_shards(current_shards, stream_arn, coordinator.session) # 2) Trying to find an intersection with the actual Stream by walking each root shard's tree. # Prune any Shard with no children that's not part of the actual Stream. # Raise InvalidStream if the entire token is pruned. unverified = collections.deque(coordinator.roots) while unverified: shard = unverified.popleft() if shard.shard_id not in current_shards: logger.info("Unknown or expired shard \"{}\" - pruning from stream token".format(shard.shard_id)) coordinator.remove_shard(shard, drop_buffered_records=True) unverified.extend(shard.children) # 3) Everything was pruned, so the token describes an unknown stream. if not coordinator.roots: raise InvalidStream("This token has no relation to the actual Stream.") # 4) Now that everything's verified, grab new iterators for the coordinator's active Shards. for shard in coordinator.active: try: if shard.iterator_type is None: # Descendant of an unknown shard shard.iterator_type = "trim_horizon" # Move back to the token's specified position shard.jump_to(iterator_type=shard.iterator_type, sequence_number=shard.sequence_number) except RecordsExpired: # This token shard's sequence_number is beyond the trim_horizon. # The next closest record is at trim_horizon. msg = "SequenceNumber \"{}\" in shard \"{}\" beyond trim horizon: jumping to trim_horizon" logger.info(msg.format(shard.sequence_number, shard.shard_id)) shard.jump_to(iterator_type="trim_horizon")
python
def _move_stream_token(coordinator, token): """Move to the Stream position described by the token. The following rules are applied when interpolation is required: - If a shard does not exist (past the trim_horizon) it is ignored. If that shard had children, its children are also checked against the existing shards. - If none of the shards in the token exist, then InvalidStream is raised. - If a Shard expects its iterator to point to a SequenceNumber that is now past that Shard's trim_horizon, the Shard instead points to trim_horizon. """ stream_arn = coordinator.stream_arn = token["stream_arn"] # 0) Everything will be rebuilt from the DescribeStream masked by the token. coordinator.roots.clear() coordinator.active.clear() coordinator.closed.clear() coordinator.buffer.clear() # Injecting the token gives us access to the standard shard management functions token_shards = unpack_shards(token["shards"], stream_arn, coordinator.session) coordinator.roots = [shard for shard in token_shards.values() if not shard.parent] coordinator.active.extend(token_shards[shard_id] for shard_id in token["active"]) # 1) Build a Dict[str, Shard] of the current Stream from a DescribeStream call current_shards = coordinator.session.describe_stream(stream_arn=stream_arn)["Shards"] current_shards = unpack_shards(current_shards, stream_arn, coordinator.session) # 2) Trying to find an intersection with the actual Stream by walking each root shard's tree. # Prune any Shard with no children that's not part of the actual Stream. # Raise InvalidStream if the entire token is pruned. unverified = collections.deque(coordinator.roots) while unverified: shard = unverified.popleft() if shard.shard_id not in current_shards: logger.info("Unknown or expired shard \"{}\" - pruning from stream token".format(shard.shard_id)) coordinator.remove_shard(shard, drop_buffered_records=True) unverified.extend(shard.children) # 3) Everything was pruned, so the token describes an unknown stream. if not coordinator.roots: raise InvalidStream("This token has no relation to the actual Stream.") # 4) Now that everything's verified, grab new iterators for the coordinator's active Shards. for shard in coordinator.active: try: if shard.iterator_type is None: # Descendant of an unknown shard shard.iterator_type = "trim_horizon" # Move back to the token's specified position shard.jump_to(iterator_type=shard.iterator_type, sequence_number=shard.sequence_number) except RecordsExpired: # This token shard's sequence_number is beyond the trim_horizon. # The next closest record is at trim_horizon. msg = "SequenceNumber \"{}\" in shard \"{}\" beyond trim horizon: jumping to trim_horizon" logger.info(msg.format(shard.sequence_number, shard.shard_id)) shard.jump_to(iterator_type="trim_horizon")
[ "def", "_move_stream_token", "(", "coordinator", ",", "token", ")", ":", "stream_arn", "=", "coordinator", ".", "stream_arn", "=", "token", "[", "\"stream_arn\"", "]", "# 0) Everything will be rebuilt from the DescribeStream masked by the token.", "coordinator", ".", "roots", ".", "clear", "(", ")", "coordinator", ".", "active", ".", "clear", "(", ")", "coordinator", ".", "closed", ".", "clear", "(", ")", "coordinator", ".", "buffer", ".", "clear", "(", ")", "# Injecting the token gives us access to the standard shard management functions", "token_shards", "=", "unpack_shards", "(", "token", "[", "\"shards\"", "]", ",", "stream_arn", ",", "coordinator", ".", "session", ")", "coordinator", ".", "roots", "=", "[", "shard", "for", "shard", "in", "token_shards", ".", "values", "(", ")", "if", "not", "shard", ".", "parent", "]", "coordinator", ".", "active", ".", "extend", "(", "token_shards", "[", "shard_id", "]", "for", "shard_id", "in", "token", "[", "\"active\"", "]", ")", "# 1) Build a Dict[str, Shard] of the current Stream from a DescribeStream call", "current_shards", "=", "coordinator", ".", "session", ".", "describe_stream", "(", "stream_arn", "=", "stream_arn", ")", "[", "\"Shards\"", "]", "current_shards", "=", "unpack_shards", "(", "current_shards", ",", "stream_arn", ",", "coordinator", ".", "session", ")", "# 2) Trying to find an intersection with the actual Stream by walking each root shard's tree.", "# Prune any Shard with no children that's not part of the actual Stream.", "# Raise InvalidStream if the entire token is pruned.", "unverified", "=", "collections", ".", "deque", "(", "coordinator", ".", "roots", ")", "while", "unverified", ":", "shard", "=", "unverified", ".", "popleft", "(", ")", "if", "shard", ".", "shard_id", "not", "in", "current_shards", ":", "logger", ".", "info", "(", "\"Unknown or expired shard \\\"{}\\\" - pruning from stream token\"", ".", "format", "(", "shard", ".", "shard_id", ")", ")", "coordinator", ".", "remove_shard", "(", "shard", ",", "drop_buffered_records", "=", "True", ")", "unverified", ".", "extend", "(", "shard", ".", "children", ")", "# 3) Everything was pruned, so the token describes an unknown stream.", "if", "not", "coordinator", ".", "roots", ":", "raise", "InvalidStream", "(", "\"This token has no relation to the actual Stream.\"", ")", "# 4) Now that everything's verified, grab new iterators for the coordinator's active Shards.", "for", "shard", "in", "coordinator", ".", "active", ":", "try", ":", "if", "shard", ".", "iterator_type", "is", "None", ":", "# Descendant of an unknown shard", "shard", ".", "iterator_type", "=", "\"trim_horizon\"", "# Move back to the token's specified position", "shard", ".", "jump_to", "(", "iterator_type", "=", "shard", ".", "iterator_type", ",", "sequence_number", "=", "shard", ".", "sequence_number", ")", "except", "RecordsExpired", ":", "# This token shard's sequence_number is beyond the trim_horizon.", "# The next closest record is at trim_horizon.", "msg", "=", "\"SequenceNumber \\\"{}\\\" in shard \\\"{}\\\" beyond trim horizon: jumping to trim_horizon\"", "logger", ".", "info", "(", "msg", ".", "format", "(", "shard", ".", "sequence_number", ",", "shard", ".", "shard_id", ")", ")", "shard", ".", "jump_to", "(", "iterator_type", "=", "\"trim_horizon\"", ")" ]
Move to the Stream position described by the token. The following rules are applied when interpolation is required: - If a shard does not exist (past the trim_horizon) it is ignored. If that shard had children, its children are also checked against the existing shards. - If none of the shards in the token exist, then InvalidStream is raised. - If a Shard expects its iterator to point to a SequenceNumber that is now past that Shard's trim_horizon, the Shard instead points to trim_horizon.
[ "Move", "to", "the", "Stream", "position", "described", "by", "the", "token", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/stream/coordinator.py#L274-L328
numberoverzero/bloop
bloop/stream/coordinator.py
Coordinator.advance_shards
def advance_shards(self): """Poll active shards for records and insert them into the buffer. Rotate exhausted shards. Returns immediately if the buffer isn't empty. """ # Don't poll shards when there are pending records. if self.buffer: return # 0) Collect new records from all active shards. record_shard_pairs = [] for shard in self.active: records = next(shard) if records: record_shard_pairs.extend((record, shard) for record in records) self.buffer.push_all(record_shard_pairs) self.migrate_closed_shards()
python
def advance_shards(self): """Poll active shards for records and insert them into the buffer. Rotate exhausted shards. Returns immediately if the buffer isn't empty. """ # Don't poll shards when there are pending records. if self.buffer: return # 0) Collect new records from all active shards. record_shard_pairs = [] for shard in self.active: records = next(shard) if records: record_shard_pairs.extend((record, shard) for record in records) self.buffer.push_all(record_shard_pairs) self.migrate_closed_shards()
[ "def", "advance_shards", "(", "self", ")", ":", "# Don't poll shards when there are pending records.", "if", "self", ".", "buffer", ":", "return", "# 0) Collect new records from all active shards.", "record_shard_pairs", "=", "[", "]", "for", "shard", "in", "self", ".", "active", ":", "records", "=", "next", "(", "shard", ")", "if", "records", ":", "record_shard_pairs", ".", "extend", "(", "(", "record", ",", "shard", ")", "for", "record", "in", "records", ")", "self", ".", "buffer", ".", "push_all", "(", "record_shard_pairs", ")", "self", ".", "migrate_closed_shards", "(", ")" ]
Poll active shards for records and insert them into the buffer. Rotate exhausted shards. Returns immediately if the buffer isn't empty.
[ "Poll", "active", "shards", "for", "records", "and", "insert", "them", "into", "the", "buffer", ".", "Rotate", "exhausted", "shards", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/stream/coordinator.py#L77-L94
numberoverzero/bloop
bloop/stream/coordinator.py
Coordinator.heartbeat
def heartbeat(self): """Keep active shards with "trim_horizon", "latest" iterators alive by advancing their iterators.""" for shard in self.active: if shard.sequence_number is None: records = next(shard) # Success! This shard now has an ``at_sequence`` iterator if records: self.buffer.push_all((record, shard) for record in records) self.migrate_closed_shards()
python
def heartbeat(self): """Keep active shards with "trim_horizon", "latest" iterators alive by advancing their iterators.""" for shard in self.active: if shard.sequence_number is None: records = next(shard) # Success! This shard now has an ``at_sequence`` iterator if records: self.buffer.push_all((record, shard) for record in records) self.migrate_closed_shards()
[ "def", "heartbeat", "(", "self", ")", ":", "for", "shard", "in", "self", ".", "active", ":", "if", "shard", ".", "sequence_number", "is", "None", ":", "records", "=", "next", "(", "shard", ")", "# Success! This shard now has an ``at_sequence`` iterator", "if", "records", ":", "self", ".", "buffer", ".", "push_all", "(", "(", "record", ",", "shard", ")", "for", "record", "in", "records", ")", "self", ".", "migrate_closed_shards", "(", ")" ]
Keep active shards with "trim_horizon", "latest" iterators alive by advancing their iterators.
[ "Keep", "active", "shards", "with", "trim_horizon", "latest", "iterators", "alive", "by", "advancing", "their", "iterators", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/stream/coordinator.py#L96-L104
numberoverzero/bloop
bloop/stream/coordinator.py
Coordinator.token
def token(self): """JSON-serializable representation of the current Stream state. Use :func:`Engine.stream(YourModel, token) <bloop.engine.Engine.stream>` to create an identical stream, or :func:`stream.move_to(token) <bloop.stream.Stream.move_to>` to move an existing stream to this position. :returns: Stream state as a json-friendly dict :rtype: dict """ # 0) Trace roots and active shards active_ids = [] shard_tokens = [] for root in self.roots: for shard in root.walk_tree(): shard_tokens.append(shard.token) # dedupe, stream_arn will be in the root token shard_tokens[-1].pop("stream_arn") active_ids.extend((shard.shard_id for shard in self.active)) # 1) Inject closed shards for shard in self.closed.keys(): active_ids.append(shard.shard_id) shard_tokens.append(shard.token) shard_tokens[-1].pop("stream_arn") return { "stream_arn": self.stream_arn, "active": active_ids, "shards": shard_tokens }
python
def token(self): """JSON-serializable representation of the current Stream state. Use :func:`Engine.stream(YourModel, token) <bloop.engine.Engine.stream>` to create an identical stream, or :func:`stream.move_to(token) <bloop.stream.Stream.move_to>` to move an existing stream to this position. :returns: Stream state as a json-friendly dict :rtype: dict """ # 0) Trace roots and active shards active_ids = [] shard_tokens = [] for root in self.roots: for shard in root.walk_tree(): shard_tokens.append(shard.token) # dedupe, stream_arn will be in the root token shard_tokens[-1].pop("stream_arn") active_ids.extend((shard.shard_id for shard in self.active)) # 1) Inject closed shards for shard in self.closed.keys(): active_ids.append(shard.shard_id) shard_tokens.append(shard.token) shard_tokens[-1].pop("stream_arn") return { "stream_arn": self.stream_arn, "active": active_ids, "shards": shard_tokens }
[ "def", "token", "(", "self", ")", ":", "# 0) Trace roots and active shards", "active_ids", "=", "[", "]", "shard_tokens", "=", "[", "]", "for", "root", "in", "self", ".", "roots", ":", "for", "shard", "in", "root", ".", "walk_tree", "(", ")", ":", "shard_tokens", ".", "append", "(", "shard", ".", "token", ")", "# dedupe, stream_arn will be in the root token", "shard_tokens", "[", "-", "1", "]", ".", "pop", "(", "\"stream_arn\"", ")", "active_ids", ".", "extend", "(", "(", "shard", ".", "shard_id", "for", "shard", "in", "self", ".", "active", ")", ")", "# 1) Inject closed shards", "for", "shard", "in", "self", ".", "closed", ".", "keys", "(", ")", ":", "active_ids", ".", "append", "(", "shard", ".", "shard_id", ")", "shard_tokens", ".", "append", "(", "shard", ".", "token", ")", "shard_tokens", "[", "-", "1", "]", ".", "pop", "(", "\"stream_arn\"", ")", "return", "{", "\"stream_arn\"", ":", "self", ".", "stream_arn", ",", "\"active\"", ":", "active_ids", ",", "\"shards\"", ":", "shard_tokens", "}" ]
JSON-serializable representation of the current Stream state. Use :func:`Engine.stream(YourModel, token) <bloop.engine.Engine.stream>` to create an identical stream, or :func:`stream.move_to(token) <bloop.stream.Stream.move_to>` to move an existing stream to this position. :returns: Stream state as a json-friendly dict :rtype: dict
[ "JSON", "-", "serializable", "representation", "of", "the", "current", "Stream", "state", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/stream/coordinator.py#L131-L160
numberoverzero/bloop
bloop/stream/coordinator.py
Coordinator.remove_shard
def remove_shard(self, shard, drop_buffered_records=False): """Remove a Shard from the Coordinator. Drops all buffered records from the Shard. If the Shard is active or a root, it is removed and any children promoted to those roles. :param shard: The shard to remove :type shard: :class:`~bloop.stream.shard.Shard` :param bool drop_buffered_records: Whether records from this shard should be removed. Default is False. """ try: self.roots.remove(shard) except ValueError: # Wasn't a root Shard pass else: self.roots.extend(shard.children) try: self.active.remove(shard) except ValueError: # Wasn't an active Shard pass else: self.active.extend(shard.children) if drop_buffered_records: # TODO can this be improved? Gets expensive for high-volume streams with large buffers heap = self.buffer.heap # Clear buffered records from the shard. Each record is (ordering, record, shard) to_remove = [x for x in heap if x[2] is shard] for x in to_remove: heap.remove(x)
python
def remove_shard(self, shard, drop_buffered_records=False): """Remove a Shard from the Coordinator. Drops all buffered records from the Shard. If the Shard is active or a root, it is removed and any children promoted to those roles. :param shard: The shard to remove :type shard: :class:`~bloop.stream.shard.Shard` :param bool drop_buffered_records: Whether records from this shard should be removed. Default is False. """ try: self.roots.remove(shard) except ValueError: # Wasn't a root Shard pass else: self.roots.extend(shard.children) try: self.active.remove(shard) except ValueError: # Wasn't an active Shard pass else: self.active.extend(shard.children) if drop_buffered_records: # TODO can this be improved? Gets expensive for high-volume streams with large buffers heap = self.buffer.heap # Clear buffered records from the shard. Each record is (ordering, record, shard) to_remove = [x for x in heap if x[2] is shard] for x in to_remove: heap.remove(x)
[ "def", "remove_shard", "(", "self", ",", "shard", ",", "drop_buffered_records", "=", "False", ")", ":", "try", ":", "self", ".", "roots", ".", "remove", "(", "shard", ")", "except", "ValueError", ":", "# Wasn't a root Shard", "pass", "else", ":", "self", ".", "roots", ".", "extend", "(", "shard", ".", "children", ")", "try", ":", "self", ".", "active", ".", "remove", "(", "shard", ")", "except", "ValueError", ":", "# Wasn't an active Shard", "pass", "else", ":", "self", ".", "active", ".", "extend", "(", "shard", ".", "children", ")", "if", "drop_buffered_records", ":", "# TODO can this be improved? Gets expensive for high-volume streams with large buffers", "heap", "=", "self", ".", "buffer", ".", "heap", "# Clear buffered records from the shard. Each record is (ordering, record, shard)", "to_remove", "=", "[", "x", "for", "x", "in", "heap", "if", "x", "[", "2", "]", "is", "shard", "]", "for", "x", "in", "to_remove", ":", "heap", ".", "remove", "(", "x", ")" ]
Remove a Shard from the Coordinator. Drops all buffered records from the Shard. If the Shard is active or a root, it is removed and any children promoted to those roles. :param shard: The shard to remove :type shard: :class:`~bloop.stream.shard.Shard` :param bool drop_buffered_records: Whether records from this shard should be removed. Default is False.
[ "Remove", "a", "Shard", "from", "the", "Coordinator", ".", "Drops", "all", "buffered", "records", "from", "the", "Shard", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/stream/coordinator.py#L162-L195
numberoverzero/bloop
bloop/stream/coordinator.py
Coordinator.move_to
def move_to(self, position): """Set the Coordinator to a specific endpoint or time, or load state from a token. :param position: "trim_horizon", "latest", :class:`~datetime.datetime`, or a :attr:`Coordinator.token <bloop.stream.coordinator.Coordinator.token>` """ if isinstance(position, collections.abc.Mapping): move = _move_stream_token elif hasattr(position, "timestamp") and callable(position.timestamp): move = _move_stream_time elif isinstance(position, str) and position.lower() in ["latest", "trim_horizon"]: move = _move_stream_endpoint else: raise InvalidPosition("Don't know how to move to position {!r}".format(position)) move(self, position)
python
def move_to(self, position): """Set the Coordinator to a specific endpoint or time, or load state from a token. :param position: "trim_horizon", "latest", :class:`~datetime.datetime`, or a :attr:`Coordinator.token <bloop.stream.coordinator.Coordinator.token>` """ if isinstance(position, collections.abc.Mapping): move = _move_stream_token elif hasattr(position, "timestamp") and callable(position.timestamp): move = _move_stream_time elif isinstance(position, str) and position.lower() in ["latest", "trim_horizon"]: move = _move_stream_endpoint else: raise InvalidPosition("Don't know how to move to position {!r}".format(position)) move(self, position)
[ "def", "move_to", "(", "self", ",", "position", ")", ":", "if", "isinstance", "(", "position", ",", "collections", ".", "abc", ".", "Mapping", ")", ":", "move", "=", "_move_stream_token", "elif", "hasattr", "(", "position", ",", "\"timestamp\"", ")", "and", "callable", "(", "position", ".", "timestamp", ")", ":", "move", "=", "_move_stream_time", "elif", "isinstance", "(", "position", ",", "str", ")", "and", "position", ".", "lower", "(", ")", "in", "[", "\"latest\"", ",", "\"trim_horizon\"", "]", ":", "move", "=", "_move_stream_endpoint", "else", ":", "raise", "InvalidPosition", "(", "\"Don't know how to move to position {!r}\"", ".", "format", "(", "position", ")", ")", "move", "(", "self", ",", "position", ")" ]
Set the Coordinator to a specific endpoint or time, or load state from a token. :param position: "trim_horizon", "latest", :class:`~datetime.datetime`, or a :attr:`Coordinator.token <bloop.stream.coordinator.Coordinator.token>`
[ "Set", "the", "Coordinator", "to", "a", "specific", "endpoint", "or", "time", "or", "load", "state", "from", "a", "token", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/stream/coordinator.py#L197-L211
numberoverzero/bloop
bloop/stream/buffer.py
heap_item
def heap_item(clock, record, shard): """Create a tuple of (ordering, (record, shard)) for use in a RecordBuffer.""" # Primary ordering is by event creation time. # However, creation time is *approximate* and has whole-second resolution. # This means two events in the same shard within one second can't be ordered. ordering = record["meta"]["created_at"] # From testing, SequenceNumber isn't a guaranteed ordering either. However, # it is guaranteed to be unique within a shard. This will be tie-breaker # for multiple records within the same shard, within the same second. second_ordering = int(record["meta"]["sequence_number"]) # It's possible though unlikely, that sequence numbers will collide across # multiple shards, within the same second. The final tie-breaker is # a monotonically increasing integer from the buffer. total_ordering = (ordering, second_ordering, clock()) return total_ordering, record, shard
python
def heap_item(clock, record, shard): """Create a tuple of (ordering, (record, shard)) for use in a RecordBuffer.""" # Primary ordering is by event creation time. # However, creation time is *approximate* and has whole-second resolution. # This means two events in the same shard within one second can't be ordered. ordering = record["meta"]["created_at"] # From testing, SequenceNumber isn't a guaranteed ordering either. However, # it is guaranteed to be unique within a shard. This will be tie-breaker # for multiple records within the same shard, within the same second. second_ordering = int(record["meta"]["sequence_number"]) # It's possible though unlikely, that sequence numbers will collide across # multiple shards, within the same second. The final tie-breaker is # a monotonically increasing integer from the buffer. total_ordering = (ordering, second_ordering, clock()) return total_ordering, record, shard
[ "def", "heap_item", "(", "clock", ",", "record", ",", "shard", ")", ":", "# Primary ordering is by event creation time.", "# However, creation time is *approximate* and has whole-second resolution.", "# This means two events in the same shard within one second can't be ordered.", "ordering", "=", "record", "[", "\"meta\"", "]", "[", "\"created_at\"", "]", "# From testing, SequenceNumber isn't a guaranteed ordering either. However,", "# it is guaranteed to be unique within a shard. This will be tie-breaker", "# for multiple records within the same shard, within the same second.", "second_ordering", "=", "int", "(", "record", "[", "\"meta\"", "]", "[", "\"sequence_number\"", "]", ")", "# It's possible though unlikely, that sequence numbers will collide across", "# multiple shards, within the same second. The final tie-breaker is", "# a monotonically increasing integer from the buffer.", "total_ordering", "=", "(", "ordering", ",", "second_ordering", ",", "clock", "(", ")", ")", "return", "total_ordering", ",", "record", ",", "shard" ]
Create a tuple of (ordering, (record, shard)) for use in a RecordBuffer.
[ "Create", "a", "tuple", "of", "(", "ordering", "(", "record", "shard", "))", "for", "use", "in", "a", "RecordBuffer", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/stream/buffer.py#L4-L18
numberoverzero/bloop
bloop/stream/buffer.py
RecordBuffer.push
def push(self, record, shard): """Push a new record into the buffer :param dict record: new record :param shard: Shard the record came from :type shard: :class:`~bloop.stream.shard.Shard` """ heapq.heappush(self.heap, heap_item(self.clock, record, shard))
python
def push(self, record, shard): """Push a new record into the buffer :param dict record: new record :param shard: Shard the record came from :type shard: :class:`~bloop.stream.shard.Shard` """ heapq.heappush(self.heap, heap_item(self.clock, record, shard))
[ "def", "push", "(", "self", ",", "record", ",", "shard", ")", ":", "heapq", ".", "heappush", "(", "self", ".", "heap", ",", "heap_item", "(", "self", ".", "clock", ",", "record", ",", "shard", ")", ")" ]
Push a new record into the buffer :param dict record: new record :param shard: Shard the record came from :type shard: :class:`~bloop.stream.shard.Shard`
[ "Push", "a", "new", "record", "into", "the", "buffer" ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/stream/buffer.py#L39-L46
numberoverzero/bloop
bloop/stream/buffer.py
RecordBuffer.push_all
def push_all(self, record_shard_pairs): """Push multiple (record, shard) pairs at once, with only one :meth:`heapq.heapify` call to maintain order. :param record_shard_pairs: list of ``(record, shard)`` tuples (see :func:`~bloop.stream.buffer.RecordBuffer.push`). """ # Faster than inserting one at a time; the heap is sorted once after all inserts. for record, shard in record_shard_pairs: item = heap_item(self.clock, record, shard) self.heap.append(item) heapq.heapify(self.heap)
python
def push_all(self, record_shard_pairs): """Push multiple (record, shard) pairs at once, with only one :meth:`heapq.heapify` call to maintain order. :param record_shard_pairs: list of ``(record, shard)`` tuples (see :func:`~bloop.stream.buffer.RecordBuffer.push`). """ # Faster than inserting one at a time; the heap is sorted once after all inserts. for record, shard in record_shard_pairs: item = heap_item(self.clock, record, shard) self.heap.append(item) heapq.heapify(self.heap)
[ "def", "push_all", "(", "self", ",", "record_shard_pairs", ")", ":", "# Faster than inserting one at a time; the heap is sorted once after all inserts.", "for", "record", ",", "shard", "in", "record_shard_pairs", ":", "item", "=", "heap_item", "(", "self", ".", "clock", ",", "record", ",", "shard", ")", "self", ".", "heap", ".", "append", "(", "item", ")", "heapq", ".", "heapify", "(", "self", ".", "heap", ")" ]
Push multiple (record, shard) pairs at once, with only one :meth:`heapq.heapify` call to maintain order. :param record_shard_pairs: list of ``(record, shard)`` tuples (see :func:`~bloop.stream.buffer.RecordBuffer.push`).
[ "Push", "multiple", "(", "record", "shard", ")", "pairs", "at", "once", "with", "only", "one", ":", "meth", ":", "heapq", ".", "heapify", "call", "to", "maintain", "order", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/stream/buffer.py#L48-L58
numberoverzero/bloop
bloop/models.py
loaded_columns
def loaded_columns(obj: BaseModel): """Yields each (name, value) tuple for all columns in an object that aren't missing""" for column in sorted(obj.Meta.columns, key=lambda c: c.name): value = getattr(obj, column.name, missing) if value is not missing: yield column.name, value
python
def loaded_columns(obj: BaseModel): """Yields each (name, value) tuple for all columns in an object that aren't missing""" for column in sorted(obj.Meta.columns, key=lambda c: c.name): value = getattr(obj, column.name, missing) if value is not missing: yield column.name, value
[ "def", "loaded_columns", "(", "obj", ":", "BaseModel", ")", ":", "for", "column", "in", "sorted", "(", "obj", ".", "Meta", ".", "columns", ",", "key", "=", "lambda", "c", ":", "c", ".", "name", ")", ":", "value", "=", "getattr", "(", "obj", ",", "column", ".", "name", ",", "missing", ")", "if", "value", "is", "not", "missing", ":", "yield", "column", ".", "name", ",", "value" ]
Yields each (name, value) tuple for all columns in an object that aren't missing
[ "Yields", "each", "(", "name", "value", ")", "tuple", "for", "all", "columns", "in", "an", "object", "that", "aren", "t", "missing" ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/models.py#L583-L588
numberoverzero/bloop
bloop/models.py
unpack_from_dynamodb
def unpack_from_dynamodb(*, attrs, expected, model=None, obj=None, engine=None, context=None, **kwargs): """Push values by dynamo_name into an object""" context = context or {"engine": engine} engine = engine or context.get("engine", None) if not engine: raise ValueError("You must provide engine or a context with an engine.") if model is None and obj is None: raise ValueError("You must provide a model or obj to unpack.") if model is not None and obj is not None: raise ValueError("Only specify model or obj.") if model: obj = model.Meta.init() for column in expected: value = attrs.get(column.dynamo_name, None) value = engine._load(column.typedef, value, context=context, **kwargs) setattr(obj, column.name, value) return obj
python
def unpack_from_dynamodb(*, attrs, expected, model=None, obj=None, engine=None, context=None, **kwargs): """Push values by dynamo_name into an object""" context = context or {"engine": engine} engine = engine or context.get("engine", None) if not engine: raise ValueError("You must provide engine or a context with an engine.") if model is None and obj is None: raise ValueError("You must provide a model or obj to unpack.") if model is not None and obj is not None: raise ValueError("Only specify model or obj.") if model: obj = model.Meta.init() for column in expected: value = attrs.get(column.dynamo_name, None) value = engine._load(column.typedef, value, context=context, **kwargs) setattr(obj, column.name, value) return obj
[ "def", "unpack_from_dynamodb", "(", "*", ",", "attrs", ",", "expected", ",", "model", "=", "None", ",", "obj", "=", "None", ",", "engine", "=", "None", ",", "context", "=", "None", ",", "*", "*", "kwargs", ")", ":", "context", "=", "context", "or", "{", "\"engine\"", ":", "engine", "}", "engine", "=", "engine", "or", "context", ".", "get", "(", "\"engine\"", ",", "None", ")", "if", "not", "engine", ":", "raise", "ValueError", "(", "\"You must provide engine or a context with an engine.\"", ")", "if", "model", "is", "None", "and", "obj", "is", "None", ":", "raise", "ValueError", "(", "\"You must provide a model or obj to unpack.\"", ")", "if", "model", "is", "not", "None", "and", "obj", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Only specify model or obj.\"", ")", "if", "model", ":", "obj", "=", "model", ".", "Meta", ".", "init", "(", ")", "for", "column", "in", "expected", ":", "value", "=", "attrs", ".", "get", "(", "column", ".", "dynamo_name", ",", "None", ")", "value", "=", "engine", ".", "_load", "(", "column", ".", "typedef", ",", "value", ",", "context", "=", "context", ",", "*", "*", "kwargs", ")", "setattr", "(", "obj", ",", "column", ".", "name", ",", "value", ")", "return", "obj" ]
Push values by dynamo_name into an object
[ "Push", "values", "by", "dynamo_name", "into", "an", "object" ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/models.py#L591-L608
numberoverzero/bloop
bloop/models.py
setdefault
def setdefault(obj, field, default): """Set an object's field to default if it doesn't have a value""" setattr(obj, field, getattr(obj, field, default))
python
def setdefault(obj, field, default): """Set an object's field to default if it doesn't have a value""" setattr(obj, field, getattr(obj, field, default))
[ "def", "setdefault", "(", "obj", ",", "field", ",", "default", ")", ":", "setattr", "(", "obj", ",", "field", ",", "getattr", "(", "obj", ",", "field", ",", "default", ")", ")" ]
Set an object's field to default if it doesn't have a value
[ "Set", "an", "object", "s", "field", "to", "default", "if", "it", "doesn", "t", "have", "a", "value" ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/models.py#L751-L753
numberoverzero/bloop
bloop/models.py
bind_column
def bind_column(model, name, column, force=False, recursive=False, copy=False) -> Column: """Bind a column to the model with the given name. This method is primarily used during BaseModel.__init_subclass__, although it can be used to easily attach a new column to an existing model: .. code-block:: python import bloop.models class User(BaseModel): id = Column(String, hash_key=True) email = Column(String, dynamo_name="e") bound = bloop.models.bind_column(User, "email", email) assert bound is email # rebind with force, and use a copy bound = bloop.models.bind_column(User, "email", email, force=True, copy=True) assert bound is not email If an existing index refers to this column, it will be updated to point to the new column using :meth:`~bloop.models.refresh_index`, including recalculating the index projection. Meta attributes including ``Meta.columns``, ``Meta.hash_key``, etc. will be updated if necessary. If ``name`` or the column's ``dynamo_name`` conflicts with an existing column or index on the model, raises :exc:`~bloop.exceptions.InvalidModel` unless ``force`` is True. If ``recursive`` is ``True`` and there are existing subclasses of ``model``, a copy of the column will attempt to bind to each subclass. The recursive calls will not force the bind, and will always use a new copy. If ``copy`` is ``True`` then a copy of the provided column is used. This uses a shallow copy via :meth:`~bloop.models.Column.__copy__`. :param model: The model to bind the column to. :param name: The name to bind the column as. In effect, used for ``setattr(model, name, column)`` :param column: The column to bind to the model. :param force: Unbind existing columns or indexes with the same name or dynamo_name. Default is False. :param recursive: Bind to each subclass of this model. Default is False. :param copy: Use a copy of the column instead of the column directly. Default is False. :return: The bound column. This is a new column when ``copy`` is True, otherwise the input column. """ if not subclassof(model, BaseModel): raise InvalidModel(f"{model} is not a subclass of BaseModel") meta = model.Meta if copy: column = copyfn(column) # TODO elif column.model is not None: logger.warning(f"Trying to rebind column bound to {column.model}") column._name = name safe_repr = unbound_repr(column) # Guard against name, dynamo_name collisions; if force=True, unbind any matches same_dynamo_name = ( util.index(meta.columns, "dynamo_name").get(column.dynamo_name) or util.index(meta.indexes, "dynamo_name").get(column.dynamo_name) ) same_name = ( meta.columns_by_name.get(column.name) or util.index(meta.indexes, "name").get(column.name) ) if column.hash_key and column.range_key: raise InvalidModel(f"Tried to bind {safe_repr} as both a hash and range key.") if force: if same_name: unbind(meta, name=column.name) if same_dynamo_name: unbind(meta, dynamo_name=column.dynamo_name) else: if same_name: raise InvalidModel( f"The column {safe_repr} has the same name as an existing column " f"or index {same_name}. Did you mean to bind with force=True?") if same_dynamo_name: raise InvalidModel( f"The column {safe_repr} has the same dynamo_name as an existing " f"column or index {same_name}. Did you mean to bind with force=True?") if column.hash_key and meta.hash_key: raise InvalidModel( f"Tried to bind {safe_repr} but {meta.model} " f"already has a different hash_key: {meta.hash_key}") if column.range_key and meta.range_key: raise InvalidModel( f"Tried to bind {safe_repr} but {meta.model} " f"already has a different range_key: {meta.range_key}") # success! # -------------------------------- column.model = meta.model meta.columns.add(column) meta.columns_by_name[name] = column setattr(meta.model, name, column) if column.hash_key: meta.hash_key = column meta.keys.add(column) if column.range_key: meta.range_key = column meta.keys.add(column) try: for index in meta.indexes: refresh_index(meta, index) except KeyError as e: raise InvalidModel( f"Binding column {column} removed a required column for index {unbound_repr(index)}") from e if recursive: for subclass in util.walk_subclasses(meta.model): try: bind_column(subclass, name, column, force=False, recursive=False, copy=True) except InvalidModel: pass return column
python
def bind_column(model, name, column, force=False, recursive=False, copy=False) -> Column: """Bind a column to the model with the given name. This method is primarily used during BaseModel.__init_subclass__, although it can be used to easily attach a new column to an existing model: .. code-block:: python import bloop.models class User(BaseModel): id = Column(String, hash_key=True) email = Column(String, dynamo_name="e") bound = bloop.models.bind_column(User, "email", email) assert bound is email # rebind with force, and use a copy bound = bloop.models.bind_column(User, "email", email, force=True, copy=True) assert bound is not email If an existing index refers to this column, it will be updated to point to the new column using :meth:`~bloop.models.refresh_index`, including recalculating the index projection. Meta attributes including ``Meta.columns``, ``Meta.hash_key``, etc. will be updated if necessary. If ``name`` or the column's ``dynamo_name`` conflicts with an existing column or index on the model, raises :exc:`~bloop.exceptions.InvalidModel` unless ``force`` is True. If ``recursive`` is ``True`` and there are existing subclasses of ``model``, a copy of the column will attempt to bind to each subclass. The recursive calls will not force the bind, and will always use a new copy. If ``copy`` is ``True`` then a copy of the provided column is used. This uses a shallow copy via :meth:`~bloop.models.Column.__copy__`. :param model: The model to bind the column to. :param name: The name to bind the column as. In effect, used for ``setattr(model, name, column)`` :param column: The column to bind to the model. :param force: Unbind existing columns or indexes with the same name or dynamo_name. Default is False. :param recursive: Bind to each subclass of this model. Default is False. :param copy: Use a copy of the column instead of the column directly. Default is False. :return: The bound column. This is a new column when ``copy`` is True, otherwise the input column. """ if not subclassof(model, BaseModel): raise InvalidModel(f"{model} is not a subclass of BaseModel") meta = model.Meta if copy: column = copyfn(column) # TODO elif column.model is not None: logger.warning(f"Trying to rebind column bound to {column.model}") column._name = name safe_repr = unbound_repr(column) # Guard against name, dynamo_name collisions; if force=True, unbind any matches same_dynamo_name = ( util.index(meta.columns, "dynamo_name").get(column.dynamo_name) or util.index(meta.indexes, "dynamo_name").get(column.dynamo_name) ) same_name = ( meta.columns_by_name.get(column.name) or util.index(meta.indexes, "name").get(column.name) ) if column.hash_key and column.range_key: raise InvalidModel(f"Tried to bind {safe_repr} as both a hash and range key.") if force: if same_name: unbind(meta, name=column.name) if same_dynamo_name: unbind(meta, dynamo_name=column.dynamo_name) else: if same_name: raise InvalidModel( f"The column {safe_repr} has the same name as an existing column " f"or index {same_name}. Did you mean to bind with force=True?") if same_dynamo_name: raise InvalidModel( f"The column {safe_repr} has the same dynamo_name as an existing " f"column or index {same_name}. Did you mean to bind with force=True?") if column.hash_key and meta.hash_key: raise InvalidModel( f"Tried to bind {safe_repr} but {meta.model} " f"already has a different hash_key: {meta.hash_key}") if column.range_key and meta.range_key: raise InvalidModel( f"Tried to bind {safe_repr} but {meta.model} " f"already has a different range_key: {meta.range_key}") # success! # -------------------------------- column.model = meta.model meta.columns.add(column) meta.columns_by_name[name] = column setattr(meta.model, name, column) if column.hash_key: meta.hash_key = column meta.keys.add(column) if column.range_key: meta.range_key = column meta.keys.add(column) try: for index in meta.indexes: refresh_index(meta, index) except KeyError as e: raise InvalidModel( f"Binding column {column} removed a required column for index {unbound_repr(index)}") from e if recursive: for subclass in util.walk_subclasses(meta.model): try: bind_column(subclass, name, column, force=False, recursive=False, copy=True) except InvalidModel: pass return column
[ "def", "bind_column", "(", "model", ",", "name", ",", "column", ",", "force", "=", "False", ",", "recursive", "=", "False", ",", "copy", "=", "False", ")", "->", "Column", ":", "if", "not", "subclassof", "(", "model", ",", "BaseModel", ")", ":", "raise", "InvalidModel", "(", "f\"{model} is not a subclass of BaseModel\"", ")", "meta", "=", "model", ".", "Meta", "if", "copy", ":", "column", "=", "copyfn", "(", "column", ")", "# TODO elif column.model is not None: logger.warning(f\"Trying to rebind column bound to {column.model}\")", "column", ".", "_name", "=", "name", "safe_repr", "=", "unbound_repr", "(", "column", ")", "# Guard against name, dynamo_name collisions; if force=True, unbind any matches", "same_dynamo_name", "=", "(", "util", ".", "index", "(", "meta", ".", "columns", ",", "\"dynamo_name\"", ")", ".", "get", "(", "column", ".", "dynamo_name", ")", "or", "util", ".", "index", "(", "meta", ".", "indexes", ",", "\"dynamo_name\"", ")", ".", "get", "(", "column", ".", "dynamo_name", ")", ")", "same_name", "=", "(", "meta", ".", "columns_by_name", ".", "get", "(", "column", ".", "name", ")", "or", "util", ".", "index", "(", "meta", ".", "indexes", ",", "\"name\"", ")", ".", "get", "(", "column", ".", "name", ")", ")", "if", "column", ".", "hash_key", "and", "column", ".", "range_key", ":", "raise", "InvalidModel", "(", "f\"Tried to bind {safe_repr} as both a hash and range key.\"", ")", "if", "force", ":", "if", "same_name", ":", "unbind", "(", "meta", ",", "name", "=", "column", ".", "name", ")", "if", "same_dynamo_name", ":", "unbind", "(", "meta", ",", "dynamo_name", "=", "column", ".", "dynamo_name", ")", "else", ":", "if", "same_name", ":", "raise", "InvalidModel", "(", "f\"The column {safe_repr} has the same name as an existing column \"", "f\"or index {same_name}. Did you mean to bind with force=True?\"", ")", "if", "same_dynamo_name", ":", "raise", "InvalidModel", "(", "f\"The column {safe_repr} has the same dynamo_name as an existing \"", "f\"column or index {same_name}. Did you mean to bind with force=True?\"", ")", "if", "column", ".", "hash_key", "and", "meta", ".", "hash_key", ":", "raise", "InvalidModel", "(", "f\"Tried to bind {safe_repr} but {meta.model} \"", "f\"already has a different hash_key: {meta.hash_key}\"", ")", "if", "column", ".", "range_key", "and", "meta", ".", "range_key", ":", "raise", "InvalidModel", "(", "f\"Tried to bind {safe_repr} but {meta.model} \"", "f\"already has a different range_key: {meta.range_key}\"", ")", "# success!", "# --------------------------------", "column", ".", "model", "=", "meta", ".", "model", "meta", ".", "columns", ".", "add", "(", "column", ")", "meta", ".", "columns_by_name", "[", "name", "]", "=", "column", "setattr", "(", "meta", ".", "model", ",", "name", ",", "column", ")", "if", "column", ".", "hash_key", ":", "meta", ".", "hash_key", "=", "column", "meta", ".", "keys", ".", "add", "(", "column", ")", "if", "column", ".", "range_key", ":", "meta", ".", "range_key", "=", "column", "meta", ".", "keys", ".", "add", "(", "column", ")", "try", ":", "for", "index", "in", "meta", ".", "indexes", ":", "refresh_index", "(", "meta", ",", "index", ")", "except", "KeyError", "as", "e", ":", "raise", "InvalidModel", "(", "f\"Binding column {column} removed a required column for index {unbound_repr(index)}\"", ")", "from", "e", "if", "recursive", ":", "for", "subclass", "in", "util", ".", "walk_subclasses", "(", "meta", ".", "model", ")", ":", "try", ":", "bind_column", "(", "subclass", ",", "name", ",", "column", ",", "force", "=", "False", ",", "recursive", "=", "False", ",", "copy", "=", "True", ")", "except", "InvalidModel", ":", "pass", "return", "column" ]
Bind a column to the model with the given name. This method is primarily used during BaseModel.__init_subclass__, although it can be used to easily attach a new column to an existing model: .. code-block:: python import bloop.models class User(BaseModel): id = Column(String, hash_key=True) email = Column(String, dynamo_name="e") bound = bloop.models.bind_column(User, "email", email) assert bound is email # rebind with force, and use a copy bound = bloop.models.bind_column(User, "email", email, force=True, copy=True) assert bound is not email If an existing index refers to this column, it will be updated to point to the new column using :meth:`~bloop.models.refresh_index`, including recalculating the index projection. Meta attributes including ``Meta.columns``, ``Meta.hash_key``, etc. will be updated if necessary. If ``name`` or the column's ``dynamo_name`` conflicts with an existing column or index on the model, raises :exc:`~bloop.exceptions.InvalidModel` unless ``force`` is True. If ``recursive`` is ``True`` and there are existing subclasses of ``model``, a copy of the column will attempt to bind to each subclass. The recursive calls will not force the bind, and will always use a new copy. If ``copy`` is ``True`` then a copy of the provided column is used. This uses a shallow copy via :meth:`~bloop.models.Column.__copy__`. :param model: The model to bind the column to. :param name: The name to bind the column as. In effect, used for ``setattr(model, name, column)`` :param column: The column to bind to the model. :param force: Unbind existing columns or indexes with the same name or dynamo_name. Default is False. :param recursive: Bind to each subclass of this model. Default is False. :param copy: Use a copy of the column instead of the column directly. Default is False. :return: The bound column. This is a new column when ``copy`` is True, otherwise the input column.
[ "Bind", "a", "column", "to", "the", "model", "with", "the", "given", "name", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/models.py#L820-L940
numberoverzero/bloop
bloop/models.py
bind_index
def bind_index(model, name, index, force=False, recursive=True, copy=False) -> Index: """Bind an index to the model with the given name. This method is primarily used during BaseModel.__init_subclass__, although it can be used to easily attach a new index to an existing model: .. code-block:: python import bloop.models class User(BaseModel): id = Column(String, hash_key=True) email = Column(String, dynamo_name="e") by_email = GlobalSecondaryIndex(projection="keys", hash_key="email") bound = bloop.models.bind_index(User, "by_email", by_email) assert bound is by_email # rebind with force, and use a copy bound = bloop.models.bind_index(User, "by_email", by_email, force=True, copy=True) assert bound is not by_email If ``name`` or the index's ``dynamo_name`` conflicts with an existing column or index on the model, raises :exc:`~bloop.exceptions.InvalidModel` unless ``force`` is True. If ``recursive`` is ``True`` and there are existing subclasses of ``model``, a copy of the index will attempt to bind to each subclass. The recursive calls will not force the bind, and will always use a new copy. If ``copy`` is ``True`` then a copy of the provided index is used. This uses a shallow copy via :meth:`~bloop.models.Index.__copy__`. :param model: The model to bind the index to. :param name: The name to bind the index as. In effect, used for ``setattr(model, name, index)`` :param index: The index to bind to the model. :param force: Unbind existing columns or indexes with the same name or dynamo_name. Default is False. :param recursive: Bind to each subclass of this model. Default is False. :param copy: Use a copy of the index instead of the index directly. Default is False. :return: The bound index. This is a new column when ``copy`` is True, otherwise the input index. """ if not subclassof(model, BaseModel): raise InvalidModel(f"{model} is not a subclass of BaseModel") meta = model.Meta if copy: index = copyfn(index) # TODO elif index.model is not None: logger.warning(f"Trying to rebind index bound to {index.model}") index._name = name safe_repr = unbound_repr(index) # Guard against name, dynamo_name collisions; if force=True, unbind any matches same_dynamo_name = ( util.index(meta.columns, "dynamo_name").get(index.dynamo_name) or util.index(meta.indexes, "dynamo_name").get(index.dynamo_name) ) same_name = ( meta.columns_by_name.get(index.name) or util.index(meta.indexes, "name").get(index.name) ) if isinstance(index, LocalSecondaryIndex) and not meta.range_key: raise InvalidModel("An LSI requires the Model to have a range key.") if force: if same_name: unbind(meta, name=index.name) if same_dynamo_name: unbind(meta, dynamo_name=index.dynamo_name) else: if same_name: raise InvalidModel( f"The index {safe_repr} has the same name as an existing index " f"or column {same_name}. Did you mean to bind with force=True?") if same_dynamo_name: raise InvalidModel( f"The index {safe_repr} has the same dynamo_name as an existing " f"index or column {same_name}. Did you mean to bind with force=True?") # success! # -------------------------------- index.model = meta.model meta.indexes.add(index) setattr(meta.model, name, index) if isinstance(index, LocalSecondaryIndex): meta.lsis.add(index) if isinstance(index, GlobalSecondaryIndex): meta.gsis.add(index) try: refresh_index(meta, index) except KeyError as e: raise InvalidModel("Index expected a hash or range key that does not exist") from e if recursive: for subclass in util.walk_subclasses(meta.model): try: bind_index(subclass, name, index, force=False, recursive=False, copy=True) except InvalidModel: pass return index
python
def bind_index(model, name, index, force=False, recursive=True, copy=False) -> Index: """Bind an index to the model with the given name. This method is primarily used during BaseModel.__init_subclass__, although it can be used to easily attach a new index to an existing model: .. code-block:: python import bloop.models class User(BaseModel): id = Column(String, hash_key=True) email = Column(String, dynamo_name="e") by_email = GlobalSecondaryIndex(projection="keys", hash_key="email") bound = bloop.models.bind_index(User, "by_email", by_email) assert bound is by_email # rebind with force, and use a copy bound = bloop.models.bind_index(User, "by_email", by_email, force=True, copy=True) assert bound is not by_email If ``name`` or the index's ``dynamo_name`` conflicts with an existing column or index on the model, raises :exc:`~bloop.exceptions.InvalidModel` unless ``force`` is True. If ``recursive`` is ``True`` and there are existing subclasses of ``model``, a copy of the index will attempt to bind to each subclass. The recursive calls will not force the bind, and will always use a new copy. If ``copy`` is ``True`` then a copy of the provided index is used. This uses a shallow copy via :meth:`~bloop.models.Index.__copy__`. :param model: The model to bind the index to. :param name: The name to bind the index as. In effect, used for ``setattr(model, name, index)`` :param index: The index to bind to the model. :param force: Unbind existing columns or indexes with the same name or dynamo_name. Default is False. :param recursive: Bind to each subclass of this model. Default is False. :param copy: Use a copy of the index instead of the index directly. Default is False. :return: The bound index. This is a new column when ``copy`` is True, otherwise the input index. """ if not subclassof(model, BaseModel): raise InvalidModel(f"{model} is not a subclass of BaseModel") meta = model.Meta if copy: index = copyfn(index) # TODO elif index.model is not None: logger.warning(f"Trying to rebind index bound to {index.model}") index._name = name safe_repr = unbound_repr(index) # Guard against name, dynamo_name collisions; if force=True, unbind any matches same_dynamo_name = ( util.index(meta.columns, "dynamo_name").get(index.dynamo_name) or util.index(meta.indexes, "dynamo_name").get(index.dynamo_name) ) same_name = ( meta.columns_by_name.get(index.name) or util.index(meta.indexes, "name").get(index.name) ) if isinstance(index, LocalSecondaryIndex) and not meta.range_key: raise InvalidModel("An LSI requires the Model to have a range key.") if force: if same_name: unbind(meta, name=index.name) if same_dynamo_name: unbind(meta, dynamo_name=index.dynamo_name) else: if same_name: raise InvalidModel( f"The index {safe_repr} has the same name as an existing index " f"or column {same_name}. Did you mean to bind with force=True?") if same_dynamo_name: raise InvalidModel( f"The index {safe_repr} has the same dynamo_name as an existing " f"index or column {same_name}. Did you mean to bind with force=True?") # success! # -------------------------------- index.model = meta.model meta.indexes.add(index) setattr(meta.model, name, index) if isinstance(index, LocalSecondaryIndex): meta.lsis.add(index) if isinstance(index, GlobalSecondaryIndex): meta.gsis.add(index) try: refresh_index(meta, index) except KeyError as e: raise InvalidModel("Index expected a hash or range key that does not exist") from e if recursive: for subclass in util.walk_subclasses(meta.model): try: bind_index(subclass, name, index, force=False, recursive=False, copy=True) except InvalidModel: pass return index
[ "def", "bind_index", "(", "model", ",", "name", ",", "index", ",", "force", "=", "False", ",", "recursive", "=", "True", ",", "copy", "=", "False", ")", "->", "Index", ":", "if", "not", "subclassof", "(", "model", ",", "BaseModel", ")", ":", "raise", "InvalidModel", "(", "f\"{model} is not a subclass of BaseModel\"", ")", "meta", "=", "model", ".", "Meta", "if", "copy", ":", "index", "=", "copyfn", "(", "index", ")", "# TODO elif index.model is not None: logger.warning(f\"Trying to rebind index bound to {index.model}\")", "index", ".", "_name", "=", "name", "safe_repr", "=", "unbound_repr", "(", "index", ")", "# Guard against name, dynamo_name collisions; if force=True, unbind any matches", "same_dynamo_name", "=", "(", "util", ".", "index", "(", "meta", ".", "columns", ",", "\"dynamo_name\"", ")", ".", "get", "(", "index", ".", "dynamo_name", ")", "or", "util", ".", "index", "(", "meta", ".", "indexes", ",", "\"dynamo_name\"", ")", ".", "get", "(", "index", ".", "dynamo_name", ")", ")", "same_name", "=", "(", "meta", ".", "columns_by_name", ".", "get", "(", "index", ".", "name", ")", "or", "util", ".", "index", "(", "meta", ".", "indexes", ",", "\"name\"", ")", ".", "get", "(", "index", ".", "name", ")", ")", "if", "isinstance", "(", "index", ",", "LocalSecondaryIndex", ")", "and", "not", "meta", ".", "range_key", ":", "raise", "InvalidModel", "(", "\"An LSI requires the Model to have a range key.\"", ")", "if", "force", ":", "if", "same_name", ":", "unbind", "(", "meta", ",", "name", "=", "index", ".", "name", ")", "if", "same_dynamo_name", ":", "unbind", "(", "meta", ",", "dynamo_name", "=", "index", ".", "dynamo_name", ")", "else", ":", "if", "same_name", ":", "raise", "InvalidModel", "(", "f\"The index {safe_repr} has the same name as an existing index \"", "f\"or column {same_name}. Did you mean to bind with force=True?\"", ")", "if", "same_dynamo_name", ":", "raise", "InvalidModel", "(", "f\"The index {safe_repr} has the same dynamo_name as an existing \"", "f\"index or column {same_name}. Did you mean to bind with force=True?\"", ")", "# success!", "# --------------------------------", "index", ".", "model", "=", "meta", ".", "model", "meta", ".", "indexes", ".", "add", "(", "index", ")", "setattr", "(", "meta", ".", "model", ",", "name", ",", "index", ")", "if", "isinstance", "(", "index", ",", "LocalSecondaryIndex", ")", ":", "meta", ".", "lsis", ".", "add", "(", "index", ")", "if", "isinstance", "(", "index", ",", "GlobalSecondaryIndex", ")", ":", "meta", ".", "gsis", ".", "add", "(", "index", ")", "try", ":", "refresh_index", "(", "meta", ",", "index", ")", "except", "KeyError", "as", "e", ":", "raise", "InvalidModel", "(", "\"Index expected a hash or range key that does not exist\"", ")", "from", "e", "if", "recursive", ":", "for", "subclass", "in", "util", ".", "walk_subclasses", "(", "meta", ".", "model", ")", ":", "try", ":", "bind_index", "(", "subclass", ",", "name", ",", "index", ",", "force", "=", "False", ",", "recursive", "=", "False", ",", "copy", "=", "True", ")", "except", "InvalidModel", ":", "pass", "return", "index" ]
Bind an index to the model with the given name. This method is primarily used during BaseModel.__init_subclass__, although it can be used to easily attach a new index to an existing model: .. code-block:: python import bloop.models class User(BaseModel): id = Column(String, hash_key=True) email = Column(String, dynamo_name="e") by_email = GlobalSecondaryIndex(projection="keys", hash_key="email") bound = bloop.models.bind_index(User, "by_email", by_email) assert bound is by_email # rebind with force, and use a copy bound = bloop.models.bind_index(User, "by_email", by_email, force=True, copy=True) assert bound is not by_email If ``name`` or the index's ``dynamo_name`` conflicts with an existing column or index on the model, raises :exc:`~bloop.exceptions.InvalidModel` unless ``force`` is True. If ``recursive`` is ``True`` and there are existing subclasses of ``model``, a copy of the index will attempt to bind to each subclass. The recursive calls will not force the bind, and will always use a new copy. If ``copy`` is ``True`` then a copy of the provided index is used. This uses a shallow copy via :meth:`~bloop.models.Index.__copy__`. :param model: The model to bind the index to. :param name: The name to bind the index as. In effect, used for ``setattr(model, name, index)`` :param index: The index to bind to the model. :param force: Unbind existing columns or indexes with the same name or dynamo_name. Default is False. :param recursive: Bind to each subclass of this model. Default is False. :param copy: Use a copy of the index instead of the index directly. Default is False. :return: The bound index. This is a new column when ``copy`` is True, otherwise the input index.
[ "Bind", "an", "index", "to", "the", "model", "with", "the", "given", "name", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/models.py#L943-L1047
numberoverzero/bloop
bloop/models.py
refresh_index
def refresh_index(meta, index) -> None: """Recalculate the projection, hash_key, and range_key for the given index. :param meta: model.Meta to find columns by name :param index: The index to refresh """ # All projections include model + index keys projection_keys = set.union(meta.keys, index.keys) proj = index.projection mode = proj["mode"] if mode == "keys": proj["included"] = projection_keys elif mode == "all": proj["included"] = meta.columns elif mode == "include": # pragma: no branch if all(isinstance(p, str) for p in proj["included"]): proj["included"] = set(meta.columns_by_name[n] for n in proj["included"]) else: proj["included"] = set(proj["included"]) proj["included"].update(projection_keys) if proj["strict"]: proj["available"] = proj["included"] else: proj["available"] = meta.columns
python
def refresh_index(meta, index) -> None: """Recalculate the projection, hash_key, and range_key for the given index. :param meta: model.Meta to find columns by name :param index: The index to refresh """ # All projections include model + index keys projection_keys = set.union(meta.keys, index.keys) proj = index.projection mode = proj["mode"] if mode == "keys": proj["included"] = projection_keys elif mode == "all": proj["included"] = meta.columns elif mode == "include": # pragma: no branch if all(isinstance(p, str) for p in proj["included"]): proj["included"] = set(meta.columns_by_name[n] for n in proj["included"]) else: proj["included"] = set(proj["included"]) proj["included"].update(projection_keys) if proj["strict"]: proj["available"] = proj["included"] else: proj["available"] = meta.columns
[ "def", "refresh_index", "(", "meta", ",", "index", ")", "->", "None", ":", "# All projections include model + index keys", "projection_keys", "=", "set", ".", "union", "(", "meta", ".", "keys", ",", "index", ".", "keys", ")", "proj", "=", "index", ".", "projection", "mode", "=", "proj", "[", "\"mode\"", "]", "if", "mode", "==", "\"keys\"", ":", "proj", "[", "\"included\"", "]", "=", "projection_keys", "elif", "mode", "==", "\"all\"", ":", "proj", "[", "\"included\"", "]", "=", "meta", ".", "columns", "elif", "mode", "==", "\"include\"", ":", "# pragma: no branch", "if", "all", "(", "isinstance", "(", "p", ",", "str", ")", "for", "p", "in", "proj", "[", "\"included\"", "]", ")", ":", "proj", "[", "\"included\"", "]", "=", "set", "(", "meta", ".", "columns_by_name", "[", "n", "]", "for", "n", "in", "proj", "[", "\"included\"", "]", ")", "else", ":", "proj", "[", "\"included\"", "]", "=", "set", "(", "proj", "[", "\"included\"", "]", ")", "proj", "[", "\"included\"", "]", ".", "update", "(", "projection_keys", ")", "if", "proj", "[", "\"strict\"", "]", ":", "proj", "[", "\"available\"", "]", "=", "proj", "[", "\"included\"", "]", "else", ":", "proj", "[", "\"available\"", "]", "=", "meta", ".", "columns" ]
Recalculate the projection, hash_key, and range_key for the given index. :param meta: model.Meta to find columns by name :param index: The index to refresh
[ "Recalculate", "the", "projection", "hash_key", "and", "range_key", "for", "the", "given", "index", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/models.py#L1050-L1076
numberoverzero/bloop
bloop/models.py
unbind
def unbind(meta, name=None, dynamo_name=None) -> None: """Unconditionally remove any columns or indexes bound to the given name or dynamo_name. .. code-block:: python import bloop.models class User(BaseModel): id = Column(String, hash_key=True) email = Column(String, dynamo_name="e") by_email = GlobalSecondaryIndex(projection="keys", hash_key=email) for dynamo_name in ("id", "e", "by_email"): bloop.models.unbind(User.Meta, dynamo_name=dynamo_name) assert not User.Meta.columns assert not User.Meta.indexes assert not User.Meta.keys .. warning:: This method does not pre- or post- validate the model with the requested changes. You are responsible for ensuring the model still has a hash key, that required columns exist for each index, etc. :param meta: model.Meta to remove the columns or indexes from :param name: column or index name to unbind by. Default is None. :param dynamo_name: column or index name to unbind by. Default is None. """ if name is not None: columns = {x for x in meta.columns if x.name == name} indexes = {x for x in meta.indexes if x.name == name} elif dynamo_name is not None: columns = {x for x in meta.columns if x.dynamo_name == dynamo_name} indexes = {x for x in meta.indexes if x.dynamo_name == dynamo_name} else: raise RuntimeError("Must provide name= or dynamo_name= to unbind from meta") # Nothing in bloop should allow name or dynamo_name # collisions to exist, so this is either a bug or # the user manually hacked up meta. assert len(columns) <= 1 assert len(indexes) <= 1 assert not (columns and indexes) if columns: [column] = columns meta.columns.remove(column) # If these don't line up, there's likely a bug in bloop # or the user manually hacked up columns_by_name expect_same = meta.columns_by_name[column.name] assert expect_same is column meta.columns_by_name.pop(column.name) if column in meta.keys: meta.keys.remove(column) if meta.hash_key is column: meta.hash_key = None if meta.range_key is column: meta.range_key = None delattr(meta.model, column.name) if indexes: [index] = indexes meta.indexes.remove(index) if index in meta.gsis: meta.gsis.remove(index) if index in meta.lsis: meta.lsis.remove(index) delattr(meta.model, index.name)
python
def unbind(meta, name=None, dynamo_name=None) -> None: """Unconditionally remove any columns or indexes bound to the given name or dynamo_name. .. code-block:: python import bloop.models class User(BaseModel): id = Column(String, hash_key=True) email = Column(String, dynamo_name="e") by_email = GlobalSecondaryIndex(projection="keys", hash_key=email) for dynamo_name in ("id", "e", "by_email"): bloop.models.unbind(User.Meta, dynamo_name=dynamo_name) assert not User.Meta.columns assert not User.Meta.indexes assert not User.Meta.keys .. warning:: This method does not pre- or post- validate the model with the requested changes. You are responsible for ensuring the model still has a hash key, that required columns exist for each index, etc. :param meta: model.Meta to remove the columns or indexes from :param name: column or index name to unbind by. Default is None. :param dynamo_name: column or index name to unbind by. Default is None. """ if name is not None: columns = {x for x in meta.columns if x.name == name} indexes = {x for x in meta.indexes if x.name == name} elif dynamo_name is not None: columns = {x for x in meta.columns if x.dynamo_name == dynamo_name} indexes = {x for x in meta.indexes if x.dynamo_name == dynamo_name} else: raise RuntimeError("Must provide name= or dynamo_name= to unbind from meta") # Nothing in bloop should allow name or dynamo_name # collisions to exist, so this is either a bug or # the user manually hacked up meta. assert len(columns) <= 1 assert len(indexes) <= 1 assert not (columns and indexes) if columns: [column] = columns meta.columns.remove(column) # If these don't line up, there's likely a bug in bloop # or the user manually hacked up columns_by_name expect_same = meta.columns_by_name[column.name] assert expect_same is column meta.columns_by_name.pop(column.name) if column in meta.keys: meta.keys.remove(column) if meta.hash_key is column: meta.hash_key = None if meta.range_key is column: meta.range_key = None delattr(meta.model, column.name) if indexes: [index] = indexes meta.indexes.remove(index) if index in meta.gsis: meta.gsis.remove(index) if index in meta.lsis: meta.lsis.remove(index) delattr(meta.model, index.name)
[ "def", "unbind", "(", "meta", ",", "name", "=", "None", ",", "dynamo_name", "=", "None", ")", "->", "None", ":", "if", "name", "is", "not", "None", ":", "columns", "=", "{", "x", "for", "x", "in", "meta", ".", "columns", "if", "x", ".", "name", "==", "name", "}", "indexes", "=", "{", "x", "for", "x", "in", "meta", ".", "indexes", "if", "x", ".", "name", "==", "name", "}", "elif", "dynamo_name", "is", "not", "None", ":", "columns", "=", "{", "x", "for", "x", "in", "meta", ".", "columns", "if", "x", ".", "dynamo_name", "==", "dynamo_name", "}", "indexes", "=", "{", "x", "for", "x", "in", "meta", ".", "indexes", "if", "x", ".", "dynamo_name", "==", "dynamo_name", "}", "else", ":", "raise", "RuntimeError", "(", "\"Must provide name= or dynamo_name= to unbind from meta\"", ")", "# Nothing in bloop should allow name or dynamo_name", "# collisions to exist, so this is either a bug or", "# the user manually hacked up meta.", "assert", "len", "(", "columns", ")", "<=", "1", "assert", "len", "(", "indexes", ")", "<=", "1", "assert", "not", "(", "columns", "and", "indexes", ")", "if", "columns", ":", "[", "column", "]", "=", "columns", "meta", ".", "columns", ".", "remove", "(", "column", ")", "# If these don't line up, there's likely a bug in bloop", "# or the user manually hacked up columns_by_name", "expect_same", "=", "meta", ".", "columns_by_name", "[", "column", ".", "name", "]", "assert", "expect_same", "is", "column", "meta", ".", "columns_by_name", ".", "pop", "(", "column", ".", "name", ")", "if", "column", "in", "meta", ".", "keys", ":", "meta", ".", "keys", ".", "remove", "(", "column", ")", "if", "meta", ".", "hash_key", "is", "column", ":", "meta", ".", "hash_key", "=", "None", "if", "meta", ".", "range_key", "is", "column", ":", "meta", ".", "range_key", "=", "None", "delattr", "(", "meta", ".", "model", ",", "column", ".", "name", ")", "if", "indexes", ":", "[", "index", "]", "=", "indexes", "meta", ".", "indexes", ".", "remove", "(", "index", ")", "if", "index", "in", "meta", ".", "gsis", ":", "meta", ".", "gsis", ".", "remove", "(", "index", ")", "if", "index", "in", "meta", ".", "lsis", ":", "meta", ".", "lsis", ".", "remove", "(", "index", ")", "delattr", "(", "meta", ".", "model", ",", "index", ".", "name", ")" ]
Unconditionally remove any columns or indexes bound to the given name or dynamo_name. .. code-block:: python import bloop.models class User(BaseModel): id = Column(String, hash_key=True) email = Column(String, dynamo_name="e") by_email = GlobalSecondaryIndex(projection="keys", hash_key=email) for dynamo_name in ("id", "e", "by_email"): bloop.models.unbind(User.Meta, dynamo_name=dynamo_name) assert not User.Meta.columns assert not User.Meta.indexes assert not User.Meta.keys .. warning:: This method does not pre- or post- validate the model with the requested changes. You are responsible for ensuring the model still has a hash key, that required columns exist for each index, etc. :param meta: model.Meta to remove the columns or indexes from :param name: column or index name to unbind by. Default is None. :param dynamo_name: column or index name to unbind by. Default is None.
[ "Unconditionally", "remove", "any", "columns", "or", "indexes", "bound", "to", "the", "given", "name", "or", "dynamo_name", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/models.py#L1079-L1151
numberoverzero/bloop
bloop/models.py
BaseModel._load
def _load(cls, attrs, *, context, **kwargs): """ dict (dynamo name) -> obj """ return unpack_from_dynamodb( model=cls, attrs=attrs or {}, expected=cls.Meta.columns, context=context, **kwargs)
python
def _load(cls, attrs, *, context, **kwargs): """ dict (dynamo name) -> obj """ return unpack_from_dynamodb( model=cls, attrs=attrs or {}, expected=cls.Meta.columns, context=context, **kwargs)
[ "def", "_load", "(", "cls", ",", "attrs", ",", "*", ",", "context", ",", "*", "*", "kwargs", ")", ":", "return", "unpack_from_dynamodb", "(", "model", "=", "cls", ",", "attrs", "=", "attrs", "or", "{", "}", ",", "expected", "=", "cls", ".", "Meta", ".", "columns", ",", "context", "=", "context", ",", "*", "*", "kwargs", ")" ]
dict (dynamo name) -> obj
[ "dict", "(", "dynamo", "name", ")", "-", ">", "obj" ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/models.py#L207-L213
numberoverzero/bloop
bloop/models.py
BaseModel._dump
def _dump(cls, obj, *, context, **kwargs): """ obj -> dict """ if obj is None: return None dump = context["engine"]._dump filtered = filter( lambda item: item[1] is not None, (( column.dynamo_name, dump(column.typedef, getattr(obj, column.name, None), context=context, **kwargs) ) for column in cls.Meta.columns)) return dict(filtered) or None
python
def _dump(cls, obj, *, context, **kwargs): """ obj -> dict """ if obj is None: return None dump = context["engine"]._dump filtered = filter( lambda item: item[1] is not None, (( column.dynamo_name, dump(column.typedef, getattr(obj, column.name, None), context=context, **kwargs) ) for column in cls.Meta.columns)) return dict(filtered) or None
[ "def", "_dump", "(", "cls", ",", "obj", ",", "*", ",", "context", ",", "*", "*", "kwargs", ")", ":", "if", "obj", "is", "None", ":", "return", "None", "dump", "=", "context", "[", "\"engine\"", "]", ".", "_dump", "filtered", "=", "filter", "(", "lambda", "item", ":", "item", "[", "1", "]", "is", "not", "None", ",", "(", "(", "column", ".", "dynamo_name", ",", "dump", "(", "column", ".", "typedef", ",", "getattr", "(", "obj", ",", "column", ".", "name", ",", "None", ")", ",", "context", "=", "context", ",", "*", "*", "kwargs", ")", ")", "for", "column", "in", "cls", ".", "Meta", ".", "columns", ")", ")", "return", "dict", "(", "filtered", ")", "or", "None" ]
obj -> dict
[ "obj", "-", ">", "dict" ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/models.py#L216-L227
numberoverzero/bloop
bloop/session.py
is_valid_superset
def is_valid_superset(actual_projection, index): """Returns True if the actual index is a valid superset of the expected index""" projection_type = actual_projection["ProjectionType"] if projection_type == "ALL": return True meta = index.model.Meta # all index types provide index keys and model keys provides = set.union(meta.keys, index.keys) if projection_type == "KEYS_ONLY": pass elif projection_type == "INCLUDE": # pragma: no branch (unknown projections break loud) by_dynamo_name = {column.dynamo_name: column for column in meta.columns} provides.update( by_dynamo_name[name] for name in actual_projection["NonKeyAttributes"] if name in by_dynamo_name # ignore columns the projection provides if the model doesn't care about them ) else: logger.info(f"unexpected index ProjectionType '{projection_type}'") return False expects = index.projection["included"] return provides.issuperset(expects)
python
def is_valid_superset(actual_projection, index): """Returns True if the actual index is a valid superset of the expected index""" projection_type = actual_projection["ProjectionType"] if projection_type == "ALL": return True meta = index.model.Meta # all index types provide index keys and model keys provides = set.union(meta.keys, index.keys) if projection_type == "KEYS_ONLY": pass elif projection_type == "INCLUDE": # pragma: no branch (unknown projections break loud) by_dynamo_name = {column.dynamo_name: column for column in meta.columns} provides.update( by_dynamo_name[name] for name in actual_projection["NonKeyAttributes"] if name in by_dynamo_name # ignore columns the projection provides if the model doesn't care about them ) else: logger.info(f"unexpected index ProjectionType '{projection_type}'") return False expects = index.projection["included"] return provides.issuperset(expects)
[ "def", "is_valid_superset", "(", "actual_projection", ",", "index", ")", ":", "projection_type", "=", "actual_projection", "[", "\"ProjectionType\"", "]", "if", "projection_type", "==", "\"ALL\"", ":", "return", "True", "meta", "=", "index", ".", "model", ".", "Meta", "# all index types provide index keys and model keys", "provides", "=", "set", ".", "union", "(", "meta", ".", "keys", ",", "index", ".", "keys", ")", "if", "projection_type", "==", "\"KEYS_ONLY\"", ":", "pass", "elif", "projection_type", "==", "\"INCLUDE\"", ":", "# pragma: no branch (unknown projections break loud)", "by_dynamo_name", "=", "{", "column", ".", "dynamo_name", ":", "column", "for", "column", "in", "meta", ".", "columns", "}", "provides", ".", "update", "(", "by_dynamo_name", "[", "name", "]", "for", "name", "in", "actual_projection", "[", "\"NonKeyAttributes\"", "]", "if", "name", "in", "by_dynamo_name", "# ignore columns the projection provides if the model doesn't care about them", ")", "else", ":", "logger", ".", "info", "(", "f\"unexpected index ProjectionType '{projection_type}'\"", ")", "return", "False", "expects", "=", "index", ".", "projection", "[", "\"included\"", "]", "return", "provides", ".", "issuperset", "(", "expects", ")" ]
Returns True if the actual index is a valid superset of the expected index
[ "Returns", "True", "if", "the", "actual", "index", "is", "a", "valid", "superset", "of", "the", "expected", "index" ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/session.py#L664-L685
numberoverzero/bloop
bloop/session.py
SessionWrapper.save_item
def save_item(self, item): """Save an object to DynamoDB. :param item: Unpacked into kwargs for :func:`boto3.DynamoDB.Client.update_item`. :raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met. """ try: self.dynamodb_client.update_item(**item) except botocore.exceptions.ClientError as error: handle_constraint_violation(error)
python
def save_item(self, item): """Save an object to DynamoDB. :param item: Unpacked into kwargs for :func:`boto3.DynamoDB.Client.update_item`. :raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met. """ try: self.dynamodb_client.update_item(**item) except botocore.exceptions.ClientError as error: handle_constraint_violation(error)
[ "def", "save_item", "(", "self", ",", "item", ")", ":", "try", ":", "self", ".", "dynamodb_client", ".", "update_item", "(", "*", "*", "item", ")", "except", "botocore", ".", "exceptions", ".", "ClientError", "as", "error", ":", "handle_constraint_violation", "(", "error", ")" ]
Save an object to DynamoDB. :param item: Unpacked into kwargs for :func:`boto3.DynamoDB.Client.update_item`. :raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met.
[ "Save", "an", "object", "to", "DynamoDB", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/session.py#L60-L69
numberoverzero/bloop
bloop/session.py
SessionWrapper.delete_item
def delete_item(self, item): """Delete an object in DynamoDB. :param item: Unpacked into kwargs for :func:`boto3.DynamoDB.Client.delete_item`. :raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met. """ try: self.dynamodb_client.delete_item(**item) except botocore.exceptions.ClientError as error: handle_constraint_violation(error)
python
def delete_item(self, item): """Delete an object in DynamoDB. :param item: Unpacked into kwargs for :func:`boto3.DynamoDB.Client.delete_item`. :raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met. """ try: self.dynamodb_client.delete_item(**item) except botocore.exceptions.ClientError as error: handle_constraint_violation(error)
[ "def", "delete_item", "(", "self", ",", "item", ")", ":", "try", ":", "self", ".", "dynamodb_client", ".", "delete_item", "(", "*", "*", "item", ")", "except", "botocore", ".", "exceptions", ".", "ClientError", "as", "error", ":", "handle_constraint_violation", "(", "error", ")" ]
Delete an object in DynamoDB. :param item: Unpacked into kwargs for :func:`boto3.DynamoDB.Client.delete_item`. :raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met.
[ "Delete", "an", "object", "in", "DynamoDB", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/session.py#L71-L80
numberoverzero/bloop
bloop/session.py
SessionWrapper.load_items
def load_items(self, items): """Loads any number of items in chunks, handling continuation tokens. :param items: Unpacked in chunks into "RequestItems" for :func:`boto3.DynamoDB.Client.batch_get_item`. """ loaded_items = {} requests = collections.deque(create_batch_get_chunks(items)) while requests: request = requests.pop() try: response = self.dynamodb_client.batch_get_item(RequestItems=request) except botocore.exceptions.ClientError as error: raise BloopException("Unexpected error while loading items.") from error # Accumulate results for table_name, table_items in response.get("Responses", {}).items(): loaded_items.setdefault(table_name, []).extend(table_items) # Push additional request onto the deque. # "UnprocessedKeys" is {} if this request is done if response["UnprocessedKeys"]: requests.append(response["UnprocessedKeys"]) return loaded_items
python
def load_items(self, items): """Loads any number of items in chunks, handling continuation tokens. :param items: Unpacked in chunks into "RequestItems" for :func:`boto3.DynamoDB.Client.batch_get_item`. """ loaded_items = {} requests = collections.deque(create_batch_get_chunks(items)) while requests: request = requests.pop() try: response = self.dynamodb_client.batch_get_item(RequestItems=request) except botocore.exceptions.ClientError as error: raise BloopException("Unexpected error while loading items.") from error # Accumulate results for table_name, table_items in response.get("Responses", {}).items(): loaded_items.setdefault(table_name, []).extend(table_items) # Push additional request onto the deque. # "UnprocessedKeys" is {} if this request is done if response["UnprocessedKeys"]: requests.append(response["UnprocessedKeys"]) return loaded_items
[ "def", "load_items", "(", "self", ",", "items", ")", ":", "loaded_items", "=", "{", "}", "requests", "=", "collections", ".", "deque", "(", "create_batch_get_chunks", "(", "items", ")", ")", "while", "requests", ":", "request", "=", "requests", ".", "pop", "(", ")", "try", ":", "response", "=", "self", ".", "dynamodb_client", ".", "batch_get_item", "(", "RequestItems", "=", "request", ")", "except", "botocore", ".", "exceptions", ".", "ClientError", "as", "error", ":", "raise", "BloopException", "(", "\"Unexpected error while loading items.\"", ")", "from", "error", "# Accumulate results", "for", "table_name", ",", "table_items", "in", "response", ".", "get", "(", "\"Responses\"", ",", "{", "}", ")", ".", "items", "(", ")", ":", "loaded_items", ".", "setdefault", "(", "table_name", ",", "[", "]", ")", ".", "extend", "(", "table_items", ")", "# Push additional request onto the deque.", "# \"UnprocessedKeys\" is {} if this request is done", "if", "response", "[", "\"UnprocessedKeys\"", "]", ":", "requests", ".", "append", "(", "response", "[", "\"UnprocessedKeys\"", "]", ")", "return", "loaded_items" ]
Loads any number of items in chunks, handling continuation tokens. :param items: Unpacked in chunks into "RequestItems" for :func:`boto3.DynamoDB.Client.batch_get_item`.
[ "Loads", "any", "number", "of", "items", "in", "chunks", "handling", "continuation", "tokens", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/session.py#L82-L104
numberoverzero/bloop
bloop/session.py
SessionWrapper.search_items
def search_items(self, mode, request): """Invoke query/scan by name. Response always includes "Count" and "ScannedCount" :param str mode: "query" or "scan" :param request: Unpacked into :func:`boto3.DynamoDB.Client.query` or :func:`boto3.DynamoDB.Client.scan` """ validate_search_mode(mode) method = getattr(self.dynamodb_client, mode) try: response = method(**request) except botocore.exceptions.ClientError as error: raise BloopException("Unexpected error during {}.".format(mode)) from error standardize_query_response(response) return response
python
def search_items(self, mode, request): """Invoke query/scan by name. Response always includes "Count" and "ScannedCount" :param str mode: "query" or "scan" :param request: Unpacked into :func:`boto3.DynamoDB.Client.query` or :func:`boto3.DynamoDB.Client.scan` """ validate_search_mode(mode) method = getattr(self.dynamodb_client, mode) try: response = method(**request) except botocore.exceptions.ClientError as error: raise BloopException("Unexpected error during {}.".format(mode)) from error standardize_query_response(response) return response
[ "def", "search_items", "(", "self", ",", "mode", ",", "request", ")", ":", "validate_search_mode", "(", "mode", ")", "method", "=", "getattr", "(", "self", ".", "dynamodb_client", ",", "mode", ")", "try", ":", "response", "=", "method", "(", "*", "*", "request", ")", "except", "botocore", ".", "exceptions", ".", "ClientError", "as", "error", ":", "raise", "BloopException", "(", "\"Unexpected error during {}.\"", ".", "format", "(", "mode", ")", ")", "from", "error", "standardize_query_response", "(", "response", ")", "return", "response" ]
Invoke query/scan by name. Response always includes "Count" and "ScannedCount" :param str mode: "query" or "scan" :param request: Unpacked into :func:`boto3.DynamoDB.Client.query` or :func:`boto3.DynamoDB.Client.scan`
[ "Invoke", "query", "/", "scan", "by", "name", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/session.py#L124-L139
numberoverzero/bloop
bloop/session.py
SessionWrapper.create_table
def create_table(self, table_name, model): """Create the model's table. Returns True if the table is being created, False otherwise. Does not wait for the table to create, and does not validate an existing table. Will not raise "ResourceInUseException" if the table exists or is being created. :param str table_name: The name of the table to create for the model. :param model: The :class:`~bloop.models.BaseModel` to create the table for. :return: True if the table is being created, False if the table exists :rtype: bool """ table = create_table_request(table_name, model) try: self.dynamodb_client.create_table(**table) is_creating = True except botocore.exceptions.ClientError as error: handle_table_exists(error, model) is_creating = False return is_creating
python
def create_table(self, table_name, model): """Create the model's table. Returns True if the table is being created, False otherwise. Does not wait for the table to create, and does not validate an existing table. Will not raise "ResourceInUseException" if the table exists or is being created. :param str table_name: The name of the table to create for the model. :param model: The :class:`~bloop.models.BaseModel` to create the table for. :return: True if the table is being created, False if the table exists :rtype: bool """ table = create_table_request(table_name, model) try: self.dynamodb_client.create_table(**table) is_creating = True except botocore.exceptions.ClientError as error: handle_table_exists(error, model) is_creating = False return is_creating
[ "def", "create_table", "(", "self", ",", "table_name", ",", "model", ")", ":", "table", "=", "create_table_request", "(", "table_name", ",", "model", ")", "try", ":", "self", ".", "dynamodb_client", ".", "create_table", "(", "*", "*", "table", ")", "is_creating", "=", "True", "except", "botocore", ".", "exceptions", ".", "ClientError", "as", "error", ":", "handle_table_exists", "(", "error", ",", "model", ")", "is_creating", "=", "False", "return", "is_creating" ]
Create the model's table. Returns True if the table is being created, False otherwise. Does not wait for the table to create, and does not validate an existing table. Will not raise "ResourceInUseException" if the table exists or is being created. :param str table_name: The name of the table to create for the model. :param model: The :class:`~bloop.models.BaseModel` to create the table for. :return: True if the table is being created, False if the table exists :rtype: bool
[ "Create", "the", "model", "s", "table", ".", "Returns", "True", "if", "the", "table", "is", "being", "created", "False", "otherwise", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/session.py#L141-L159
numberoverzero/bloop
bloop/session.py
SessionWrapper.describe_table
def describe_table(self, table_name): """ Polls until the table is ready, then returns the first result when the table was ready. The returned dict is standardized to ensure all fields are present, even when empty or across different DynamoDB API versions. TTL information is also inserted. :param table_name: The name of the table to describe :return: The (sanitized) result of DescribeTable["Table"] :rtype: dict """ if table_name in self._tables: return self._tables[table_name] status, description = None, {} calls = 0 while status is not ready: calls += 1 try: description = self.dynamodb_client.describe_table(TableName=table_name)["Table"] except botocore.exceptions.ClientError as error: raise BloopException("Unexpected error while describing table.") from error status = simple_table_status(description) logger.debug("describe_table: table \"{}\" was in ACTIVE state after {} calls".format(table_name, calls)) try: ttl = self.dynamodb_client.describe_time_to_live(TableName=table_name) except botocore.exceptions.ClientError as error: raise BloopException("Unexpected error while describing ttl.") from error try: backups = self.dynamodb_client.describe_continuous_backups(TableName=table_name) except botocore.exceptions.ClientError as error: raise BloopException("Unexpected error while describing continuous backups.") from error description["TimeToLiveDescription"] = { "AttributeName": _read_field(ttl, None, "TimeToLiveDescription", "AttributeName"), "TimeToLiveStatus": _read_field(ttl, None, "TimeToLiveDescription", "TimeToLiveStatus"), } description["ContinuousBackupsDescription"] = { "ContinuousBackupsStatus": _read_field( backups, None, "ContinuousBackupsDescription", "ContinuousBackupsStatus"), } table = self._tables[table_name] = sanitize_table_description(description) return table
python
def describe_table(self, table_name): """ Polls until the table is ready, then returns the first result when the table was ready. The returned dict is standardized to ensure all fields are present, even when empty or across different DynamoDB API versions. TTL information is also inserted. :param table_name: The name of the table to describe :return: The (sanitized) result of DescribeTable["Table"] :rtype: dict """ if table_name in self._tables: return self._tables[table_name] status, description = None, {} calls = 0 while status is not ready: calls += 1 try: description = self.dynamodb_client.describe_table(TableName=table_name)["Table"] except botocore.exceptions.ClientError as error: raise BloopException("Unexpected error while describing table.") from error status = simple_table_status(description) logger.debug("describe_table: table \"{}\" was in ACTIVE state after {} calls".format(table_name, calls)) try: ttl = self.dynamodb_client.describe_time_to_live(TableName=table_name) except botocore.exceptions.ClientError as error: raise BloopException("Unexpected error while describing ttl.") from error try: backups = self.dynamodb_client.describe_continuous_backups(TableName=table_name) except botocore.exceptions.ClientError as error: raise BloopException("Unexpected error while describing continuous backups.") from error description["TimeToLiveDescription"] = { "AttributeName": _read_field(ttl, None, "TimeToLiveDescription", "AttributeName"), "TimeToLiveStatus": _read_field(ttl, None, "TimeToLiveDescription", "TimeToLiveStatus"), } description["ContinuousBackupsDescription"] = { "ContinuousBackupsStatus": _read_field( backups, None, "ContinuousBackupsDescription", "ContinuousBackupsStatus"), } table = self._tables[table_name] = sanitize_table_description(description) return table
[ "def", "describe_table", "(", "self", ",", "table_name", ")", ":", "if", "table_name", "in", "self", ".", "_tables", ":", "return", "self", ".", "_tables", "[", "table_name", "]", "status", ",", "description", "=", "None", ",", "{", "}", "calls", "=", "0", "while", "status", "is", "not", "ready", ":", "calls", "+=", "1", "try", ":", "description", "=", "self", ".", "dynamodb_client", ".", "describe_table", "(", "TableName", "=", "table_name", ")", "[", "\"Table\"", "]", "except", "botocore", ".", "exceptions", ".", "ClientError", "as", "error", ":", "raise", "BloopException", "(", "\"Unexpected error while describing table.\"", ")", "from", "error", "status", "=", "simple_table_status", "(", "description", ")", "logger", ".", "debug", "(", "\"describe_table: table \\\"{}\\\" was in ACTIVE state after {} calls\"", ".", "format", "(", "table_name", ",", "calls", ")", ")", "try", ":", "ttl", "=", "self", ".", "dynamodb_client", ".", "describe_time_to_live", "(", "TableName", "=", "table_name", ")", "except", "botocore", ".", "exceptions", ".", "ClientError", "as", "error", ":", "raise", "BloopException", "(", "\"Unexpected error while describing ttl.\"", ")", "from", "error", "try", ":", "backups", "=", "self", ".", "dynamodb_client", ".", "describe_continuous_backups", "(", "TableName", "=", "table_name", ")", "except", "botocore", ".", "exceptions", ".", "ClientError", "as", "error", ":", "raise", "BloopException", "(", "\"Unexpected error while describing continuous backups.\"", ")", "from", "error", "description", "[", "\"TimeToLiveDescription\"", "]", "=", "{", "\"AttributeName\"", ":", "_read_field", "(", "ttl", ",", "None", ",", "\"TimeToLiveDescription\"", ",", "\"AttributeName\"", ")", ",", "\"TimeToLiveStatus\"", ":", "_read_field", "(", "ttl", ",", "None", ",", "\"TimeToLiveDescription\"", ",", "\"TimeToLiveStatus\"", ")", ",", "}", "description", "[", "\"ContinuousBackupsDescription\"", "]", "=", "{", "\"ContinuousBackupsStatus\"", ":", "_read_field", "(", "backups", ",", "None", ",", "\"ContinuousBackupsDescription\"", ",", "\"ContinuousBackupsStatus\"", ")", ",", "}", "table", "=", "self", ".", "_tables", "[", "table_name", "]", "=", "sanitize_table_description", "(", "description", ")", "return", "table" ]
Polls until the table is ready, then returns the first result when the table was ready. The returned dict is standardized to ensure all fields are present, even when empty or across different DynamoDB API versions. TTL information is also inserted. :param table_name: The name of the table to describe :return: The (sanitized) result of DescribeTable["Table"] :rtype: dict
[ "Polls", "until", "the", "table", "is", "ready", "then", "returns", "the", "first", "result", "when", "the", "table", "was", "ready", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/session.py#L161-L204
numberoverzero/bloop
bloop/session.py
SessionWrapper.validate_table
def validate_table(self, table_name, model): """Polls until a creating table is ready, then verifies the description against the model's requirements. The model may have a subset of all GSIs and LSIs on the table, but the key structure must be exactly the same. The table must have a stream if the model expects one, but not the other way around. When read or write units are not specified for the model or any GSI, the existing values will always pass validation. :param str table_name: The name of the table to validate the model against. :param model: The :class:`~bloop.models.BaseModel` to validate the table of. :raises bloop.exceptions.TableMismatch: When the table does not meet the constraints of the model. """ actual = self.describe_table(table_name) if not compare_tables(model, actual): raise TableMismatch("The expected and actual tables for {!r} do not match.".format(model.__name__)) # Fill in values that Meta doesn't know ahead of time (such as arns). # These won't be populated unless Meta explicitly cares about the value if model.Meta.stream: stream_arn = model.Meta.stream["arn"] = actual["LatestStreamArn"] logger.debug(f"Set {model.__name__}.Meta.stream['arn'] to '{stream_arn}' from DescribeTable response") if model.Meta.ttl: ttl_enabled = actual["TimeToLiveDescription"]["TimeToLiveStatus"].lower() == "enabled" model.Meta.ttl["enabled"] = ttl_enabled logger.debug(f"Set {model.__name__}.Meta.ttl['enabled'] to '{ttl_enabled}' from DescribeTable response") # Fill in meta values that the table didn't care about (eg. billing=None) if model.Meta.encryption is None: sse_enabled = actual["SSEDescription"]["Status"].lower() == "enabled" model.Meta.encryption = {"enabled": sse_enabled} logger.debug( f"Set {model.__name__}.Meta.encryption['enabled'] to '{sse_enabled}' from DescribeTable response") if model.Meta.backups is None: backups = actual["ContinuousBackupsDescription"]["ContinuousBackupsStatus"] == "ENABLED" model.Meta.backups = {"enabled": backups} logger.debug(f"Set {model.__name__}.Meta.backups['enabled'] to '{backups}' from DescribeTable response") if model.Meta.billing is None: billing_mode = { "PAY_PER_REQUEST": "on_demand", "PROVISIONED": "provisioned" }[actual["BillingModeSummary"]["BillingMode"]] model.Meta.billing = {"mode": billing_mode} logger.debug(f"Set {model.__name__}.Meta.billing['mode'] to '{billing_mode}' from DescribeTable response") if model.Meta.read_units is None: read_units = model.Meta.read_units = actual["ProvisionedThroughput"]["ReadCapacityUnits"] logger.debug( f"Set {model.__name__}.Meta.read_units to {read_units} from DescribeTable response") if model.Meta.write_units is None: write_units = model.Meta.write_units = actual["ProvisionedThroughput"]["WriteCapacityUnits"] logger.debug( f"Set {model.__name__}.Meta.write_units to {write_units} from DescribeTable response") # Replace any ``None`` values for read_units, write_units in GSIs with their actual values gsis = {index["IndexName"]: index for index in actual["GlobalSecondaryIndexes"]} for index in model.Meta.gsis: read_units = gsis[index.dynamo_name]["ProvisionedThroughput"]["ReadCapacityUnits"] write_units = gsis[index.dynamo_name]["ProvisionedThroughput"]["WriteCapacityUnits"] if index.read_units is None: index.read_units = read_units logger.debug( f"Set {model.__name__}.{index.name}.read_units to {read_units} from DescribeTable response") if index.write_units is None: index.write_units = write_units logger.debug( f"Set {model.__name__}.{index.name}.write_units to {write_units} from DescribeTable response")
python
def validate_table(self, table_name, model): """Polls until a creating table is ready, then verifies the description against the model's requirements. The model may have a subset of all GSIs and LSIs on the table, but the key structure must be exactly the same. The table must have a stream if the model expects one, but not the other way around. When read or write units are not specified for the model or any GSI, the existing values will always pass validation. :param str table_name: The name of the table to validate the model against. :param model: The :class:`~bloop.models.BaseModel` to validate the table of. :raises bloop.exceptions.TableMismatch: When the table does not meet the constraints of the model. """ actual = self.describe_table(table_name) if not compare_tables(model, actual): raise TableMismatch("The expected and actual tables for {!r} do not match.".format(model.__name__)) # Fill in values that Meta doesn't know ahead of time (such as arns). # These won't be populated unless Meta explicitly cares about the value if model.Meta.stream: stream_arn = model.Meta.stream["arn"] = actual["LatestStreamArn"] logger.debug(f"Set {model.__name__}.Meta.stream['arn'] to '{stream_arn}' from DescribeTable response") if model.Meta.ttl: ttl_enabled = actual["TimeToLiveDescription"]["TimeToLiveStatus"].lower() == "enabled" model.Meta.ttl["enabled"] = ttl_enabled logger.debug(f"Set {model.__name__}.Meta.ttl['enabled'] to '{ttl_enabled}' from DescribeTable response") # Fill in meta values that the table didn't care about (eg. billing=None) if model.Meta.encryption is None: sse_enabled = actual["SSEDescription"]["Status"].lower() == "enabled" model.Meta.encryption = {"enabled": sse_enabled} logger.debug( f"Set {model.__name__}.Meta.encryption['enabled'] to '{sse_enabled}' from DescribeTable response") if model.Meta.backups is None: backups = actual["ContinuousBackupsDescription"]["ContinuousBackupsStatus"] == "ENABLED" model.Meta.backups = {"enabled": backups} logger.debug(f"Set {model.__name__}.Meta.backups['enabled'] to '{backups}' from DescribeTable response") if model.Meta.billing is None: billing_mode = { "PAY_PER_REQUEST": "on_demand", "PROVISIONED": "provisioned" }[actual["BillingModeSummary"]["BillingMode"]] model.Meta.billing = {"mode": billing_mode} logger.debug(f"Set {model.__name__}.Meta.billing['mode'] to '{billing_mode}' from DescribeTable response") if model.Meta.read_units is None: read_units = model.Meta.read_units = actual["ProvisionedThroughput"]["ReadCapacityUnits"] logger.debug( f"Set {model.__name__}.Meta.read_units to {read_units} from DescribeTable response") if model.Meta.write_units is None: write_units = model.Meta.write_units = actual["ProvisionedThroughput"]["WriteCapacityUnits"] logger.debug( f"Set {model.__name__}.Meta.write_units to {write_units} from DescribeTable response") # Replace any ``None`` values for read_units, write_units in GSIs with their actual values gsis = {index["IndexName"]: index for index in actual["GlobalSecondaryIndexes"]} for index in model.Meta.gsis: read_units = gsis[index.dynamo_name]["ProvisionedThroughput"]["ReadCapacityUnits"] write_units = gsis[index.dynamo_name]["ProvisionedThroughput"]["WriteCapacityUnits"] if index.read_units is None: index.read_units = read_units logger.debug( f"Set {model.__name__}.{index.name}.read_units to {read_units} from DescribeTable response") if index.write_units is None: index.write_units = write_units logger.debug( f"Set {model.__name__}.{index.name}.write_units to {write_units} from DescribeTable response")
[ "def", "validate_table", "(", "self", ",", "table_name", ",", "model", ")", ":", "actual", "=", "self", ".", "describe_table", "(", "table_name", ")", "if", "not", "compare_tables", "(", "model", ",", "actual", ")", ":", "raise", "TableMismatch", "(", "\"The expected and actual tables for {!r} do not match.\"", ".", "format", "(", "model", ".", "__name__", ")", ")", "# Fill in values that Meta doesn't know ahead of time (such as arns).", "# These won't be populated unless Meta explicitly cares about the value", "if", "model", ".", "Meta", ".", "stream", ":", "stream_arn", "=", "model", ".", "Meta", ".", "stream", "[", "\"arn\"", "]", "=", "actual", "[", "\"LatestStreamArn\"", "]", "logger", ".", "debug", "(", "f\"Set {model.__name__}.Meta.stream['arn'] to '{stream_arn}' from DescribeTable response\"", ")", "if", "model", ".", "Meta", ".", "ttl", ":", "ttl_enabled", "=", "actual", "[", "\"TimeToLiveDescription\"", "]", "[", "\"TimeToLiveStatus\"", "]", ".", "lower", "(", ")", "==", "\"enabled\"", "model", ".", "Meta", ".", "ttl", "[", "\"enabled\"", "]", "=", "ttl_enabled", "logger", ".", "debug", "(", "f\"Set {model.__name__}.Meta.ttl['enabled'] to '{ttl_enabled}' from DescribeTable response\"", ")", "# Fill in meta values that the table didn't care about (eg. billing=None)", "if", "model", ".", "Meta", ".", "encryption", "is", "None", ":", "sse_enabled", "=", "actual", "[", "\"SSEDescription\"", "]", "[", "\"Status\"", "]", ".", "lower", "(", ")", "==", "\"enabled\"", "model", ".", "Meta", ".", "encryption", "=", "{", "\"enabled\"", ":", "sse_enabled", "}", "logger", ".", "debug", "(", "f\"Set {model.__name__}.Meta.encryption['enabled'] to '{sse_enabled}' from DescribeTable response\"", ")", "if", "model", ".", "Meta", ".", "backups", "is", "None", ":", "backups", "=", "actual", "[", "\"ContinuousBackupsDescription\"", "]", "[", "\"ContinuousBackupsStatus\"", "]", "==", "\"ENABLED\"", "model", ".", "Meta", ".", "backups", "=", "{", "\"enabled\"", ":", "backups", "}", "logger", ".", "debug", "(", "f\"Set {model.__name__}.Meta.backups['enabled'] to '{backups}' from DescribeTable response\"", ")", "if", "model", ".", "Meta", ".", "billing", "is", "None", ":", "billing_mode", "=", "{", "\"PAY_PER_REQUEST\"", ":", "\"on_demand\"", ",", "\"PROVISIONED\"", ":", "\"provisioned\"", "}", "[", "actual", "[", "\"BillingModeSummary\"", "]", "[", "\"BillingMode\"", "]", "]", "model", ".", "Meta", ".", "billing", "=", "{", "\"mode\"", ":", "billing_mode", "}", "logger", ".", "debug", "(", "f\"Set {model.__name__}.Meta.billing['mode'] to '{billing_mode}' from DescribeTable response\"", ")", "if", "model", ".", "Meta", ".", "read_units", "is", "None", ":", "read_units", "=", "model", ".", "Meta", ".", "read_units", "=", "actual", "[", "\"ProvisionedThroughput\"", "]", "[", "\"ReadCapacityUnits\"", "]", "logger", ".", "debug", "(", "f\"Set {model.__name__}.Meta.read_units to {read_units} from DescribeTable response\"", ")", "if", "model", ".", "Meta", ".", "write_units", "is", "None", ":", "write_units", "=", "model", ".", "Meta", ".", "write_units", "=", "actual", "[", "\"ProvisionedThroughput\"", "]", "[", "\"WriteCapacityUnits\"", "]", "logger", ".", "debug", "(", "f\"Set {model.__name__}.Meta.write_units to {write_units} from DescribeTable response\"", ")", "# Replace any ``None`` values for read_units, write_units in GSIs with their actual values", "gsis", "=", "{", "index", "[", "\"IndexName\"", "]", ":", "index", "for", "index", "in", "actual", "[", "\"GlobalSecondaryIndexes\"", "]", "}", "for", "index", "in", "model", ".", "Meta", ".", "gsis", ":", "read_units", "=", "gsis", "[", "index", ".", "dynamo_name", "]", "[", "\"ProvisionedThroughput\"", "]", "[", "\"ReadCapacityUnits\"", "]", "write_units", "=", "gsis", "[", "index", ".", "dynamo_name", "]", "[", "\"ProvisionedThroughput\"", "]", "[", "\"WriteCapacityUnits\"", "]", "if", "index", ".", "read_units", "is", "None", ":", "index", ".", "read_units", "=", "read_units", "logger", ".", "debug", "(", "f\"Set {model.__name__}.{index.name}.read_units to {read_units} from DescribeTable response\"", ")", "if", "index", ".", "write_units", "is", "None", ":", "index", ".", "write_units", "=", "write_units", "logger", ".", "debug", "(", "f\"Set {model.__name__}.{index.name}.write_units to {write_units} from DescribeTable response\"", ")" ]
Polls until a creating table is ready, then verifies the description against the model's requirements. The model may have a subset of all GSIs and LSIs on the table, but the key structure must be exactly the same. The table must have a stream if the model expects one, but not the other way around. When read or write units are not specified for the model or any GSI, the existing values will always pass validation. :param str table_name: The name of the table to validate the model against. :param model: The :class:`~bloop.models.BaseModel` to validate the table of. :raises bloop.exceptions.TableMismatch: When the table does not meet the constraints of the model.
[ "Polls", "until", "a", "creating", "table", "is", "ready", "then", "verifies", "the", "description", "against", "the", "model", "s", "requirements", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/session.py#L206-L269
numberoverzero/bloop
bloop/session.py
SessionWrapper.enable_ttl
def enable_ttl(self, table_name, model): """Calls UpdateTimeToLive on the table according to model.Meta["ttl"] :param table_name: The name of the table to enable the TTL setting on :param model: The model to get TTL settings from """ self._tables.pop(table_name, None) ttl_name = model.Meta.ttl["column"].dynamo_name request = { "TableName": table_name, "TimeToLiveSpecification": {"AttributeName": ttl_name, "Enabled": True} } try: self.dynamodb_client.update_time_to_live(**request) except botocore.exceptions.ClientError as error: raise BloopException("Unexpected error while setting TTL.") from error
python
def enable_ttl(self, table_name, model): """Calls UpdateTimeToLive on the table according to model.Meta["ttl"] :param table_name: The name of the table to enable the TTL setting on :param model: The model to get TTL settings from """ self._tables.pop(table_name, None) ttl_name = model.Meta.ttl["column"].dynamo_name request = { "TableName": table_name, "TimeToLiveSpecification": {"AttributeName": ttl_name, "Enabled": True} } try: self.dynamodb_client.update_time_to_live(**request) except botocore.exceptions.ClientError as error: raise BloopException("Unexpected error while setting TTL.") from error
[ "def", "enable_ttl", "(", "self", ",", "table_name", ",", "model", ")", ":", "self", ".", "_tables", ".", "pop", "(", "table_name", ",", "None", ")", "ttl_name", "=", "model", ".", "Meta", ".", "ttl", "[", "\"column\"", "]", ".", "dynamo_name", "request", "=", "{", "\"TableName\"", ":", "table_name", ",", "\"TimeToLiveSpecification\"", ":", "{", "\"AttributeName\"", ":", "ttl_name", ",", "\"Enabled\"", ":", "True", "}", "}", "try", ":", "self", ".", "dynamodb_client", ".", "update_time_to_live", "(", "*", "*", "request", ")", "except", "botocore", ".", "exceptions", ".", "ClientError", "as", "error", ":", "raise", "BloopException", "(", "\"Unexpected error while setting TTL.\"", ")", "from", "error" ]
Calls UpdateTimeToLive on the table according to model.Meta["ttl"] :param table_name: The name of the table to enable the TTL setting on :param model: The model to get TTL settings from
[ "Calls", "UpdateTimeToLive", "on", "the", "table", "according", "to", "model", ".", "Meta", "[", "ttl", "]" ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/session.py#L271-L286
numberoverzero/bloop
bloop/session.py
SessionWrapper.enable_backups
def enable_backups(self, table_name, model): """Calls UpdateContinuousBackups on the table according to model.Meta["continuous_backups"] :param table_name: The name of the table to enable Continuous Backups on :param model: The model to get Continuous Backups settings from """ self._tables.pop(table_name, None) request = { "TableName": table_name, "PointInTimeRecoverySpecification": {"PointInTimeRecoveryEnabled": True} } try: self.dynamodb_client.update_continuous_backups(**request) except botocore.exceptions.ClientError as error: raise BloopException("Unexpected error while setting Continuous Backups.") from error
python
def enable_backups(self, table_name, model): """Calls UpdateContinuousBackups on the table according to model.Meta["continuous_backups"] :param table_name: The name of the table to enable Continuous Backups on :param model: The model to get Continuous Backups settings from """ self._tables.pop(table_name, None) request = { "TableName": table_name, "PointInTimeRecoverySpecification": {"PointInTimeRecoveryEnabled": True} } try: self.dynamodb_client.update_continuous_backups(**request) except botocore.exceptions.ClientError as error: raise BloopException("Unexpected error while setting Continuous Backups.") from error
[ "def", "enable_backups", "(", "self", ",", "table_name", ",", "model", ")", ":", "self", ".", "_tables", ".", "pop", "(", "table_name", ",", "None", ")", "request", "=", "{", "\"TableName\"", ":", "table_name", ",", "\"PointInTimeRecoverySpecification\"", ":", "{", "\"PointInTimeRecoveryEnabled\"", ":", "True", "}", "}", "try", ":", "self", ".", "dynamodb_client", ".", "update_continuous_backups", "(", "*", "*", "request", ")", "except", "botocore", ".", "exceptions", ".", "ClientError", "as", "error", ":", "raise", "BloopException", "(", "\"Unexpected error while setting Continuous Backups.\"", ")", "from", "error" ]
Calls UpdateContinuousBackups on the table according to model.Meta["continuous_backups"] :param table_name: The name of the table to enable Continuous Backups on :param model: The model to get Continuous Backups settings from
[ "Calls", "UpdateContinuousBackups", "on", "the", "table", "according", "to", "model", ".", "Meta", "[", "continuous_backups", "]" ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/session.py#L288-L302
numberoverzero/bloop
bloop/session.py
SessionWrapper.describe_stream
def describe_stream(self, stream_arn, first_shard=None): """Wraps :func:`boto3.DynamoDBStreams.Client.describe_stream`, handling continuation tokens. :param str stream_arn: Stream arn, usually from the model's ``Meta.stream["arn"]``. :param str first_shard: *(Optional)* If provided, only shards after this shard id will be returned. :return: All shards in the stream, or a subset if ``first_shard`` is provided. :rtype: dict """ description = {"Shards": []} request = {"StreamArn": stream_arn, "ExclusiveStartShardId": first_shard} # boto3 isn't down with literal Nones. if first_shard is None: request.pop("ExclusiveStartShardId") while request.get("ExclusiveStartShardId") is not missing: try: response = self.stream_client.describe_stream(**request)["StreamDescription"] except botocore.exceptions.ClientError as error: if error.response["Error"]["Code"] == "ResourceNotFoundException": raise InvalidStream(f"The stream arn {stream_arn!r} does not exist.") from error raise BloopException("Unexpected error while describing stream.") from error # Docs aren't clear if the terminal value is null, or won't exist. # Since we don't terminate the loop on None, the "or missing" here # will ensure we stop on a falsey value. request["ExclusiveStartShardId"] = response.pop("LastEvaluatedShardId", None) or missing description["Shards"].extend(response.pop("Shards", [])) description.update(response) return description
python
def describe_stream(self, stream_arn, first_shard=None): """Wraps :func:`boto3.DynamoDBStreams.Client.describe_stream`, handling continuation tokens. :param str stream_arn: Stream arn, usually from the model's ``Meta.stream["arn"]``. :param str first_shard: *(Optional)* If provided, only shards after this shard id will be returned. :return: All shards in the stream, or a subset if ``first_shard`` is provided. :rtype: dict """ description = {"Shards": []} request = {"StreamArn": stream_arn, "ExclusiveStartShardId": first_shard} # boto3 isn't down with literal Nones. if first_shard is None: request.pop("ExclusiveStartShardId") while request.get("ExclusiveStartShardId") is not missing: try: response = self.stream_client.describe_stream(**request)["StreamDescription"] except botocore.exceptions.ClientError as error: if error.response["Error"]["Code"] == "ResourceNotFoundException": raise InvalidStream(f"The stream arn {stream_arn!r} does not exist.") from error raise BloopException("Unexpected error while describing stream.") from error # Docs aren't clear if the terminal value is null, or won't exist. # Since we don't terminate the loop on None, the "or missing" here # will ensure we stop on a falsey value. request["ExclusiveStartShardId"] = response.pop("LastEvaluatedShardId", None) or missing description["Shards"].extend(response.pop("Shards", [])) description.update(response) return description
[ "def", "describe_stream", "(", "self", ",", "stream_arn", ",", "first_shard", "=", "None", ")", ":", "description", "=", "{", "\"Shards\"", ":", "[", "]", "}", "request", "=", "{", "\"StreamArn\"", ":", "stream_arn", ",", "\"ExclusiveStartShardId\"", ":", "first_shard", "}", "# boto3 isn't down with literal Nones.", "if", "first_shard", "is", "None", ":", "request", ".", "pop", "(", "\"ExclusiveStartShardId\"", ")", "while", "request", ".", "get", "(", "\"ExclusiveStartShardId\"", ")", "is", "not", "missing", ":", "try", ":", "response", "=", "self", ".", "stream_client", ".", "describe_stream", "(", "*", "*", "request", ")", "[", "\"StreamDescription\"", "]", "except", "botocore", ".", "exceptions", ".", "ClientError", "as", "error", ":", "if", "error", ".", "response", "[", "\"Error\"", "]", "[", "\"Code\"", "]", "==", "\"ResourceNotFoundException\"", ":", "raise", "InvalidStream", "(", "f\"The stream arn {stream_arn!r} does not exist.\"", ")", "from", "error", "raise", "BloopException", "(", "\"Unexpected error while describing stream.\"", ")", "from", "error", "# Docs aren't clear if the terminal value is null, or won't exist.", "# Since we don't terminate the loop on None, the \"or missing\" here", "# will ensure we stop on a falsey value.", "request", "[", "\"ExclusiveStartShardId\"", "]", "=", "response", ".", "pop", "(", "\"LastEvaluatedShardId\"", ",", "None", ")", "or", "missing", "description", "[", "\"Shards\"", "]", ".", "extend", "(", "response", ".", "pop", "(", "\"Shards\"", ",", "[", "]", ")", ")", "description", ".", "update", "(", "response", ")", "return", "description" ]
Wraps :func:`boto3.DynamoDBStreams.Client.describe_stream`, handling continuation tokens. :param str stream_arn: Stream arn, usually from the model's ``Meta.stream["arn"]``. :param str first_shard: *(Optional)* If provided, only shards after this shard id will be returned. :return: All shards in the stream, or a subset if ``first_shard`` is provided. :rtype: dict
[ "Wraps", ":", "func", ":", "boto3", ".", "DynamoDBStreams", ".", "Client", ".", "describe_stream", "handling", "continuation", "tokens", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/session.py#L304-L332
numberoverzero/bloop
bloop/session.py
SessionWrapper.get_shard_iterator
def get_shard_iterator(self, *, stream_arn, shard_id, iterator_type, sequence_number=None): """Wraps :func:`boto3.DynamoDBStreams.Client.get_shard_iterator`. :param str stream_arn: Stream arn. Usually :data:`Shard.stream_arn <bloop.stream.shard.Shard.stream_arn>`. :param str shard_id: Shard identifier. Usually :data:`Shard.shard_id <bloop.stream.shard.Shard.shard_id>`. :param str iterator_type: "sequence_at", "sequence_after", "trim_horizon", or "latest" :param sequence_number: :return: Iterator id, valid for 15 minutes. :rtype: str :raises bloop.exceptions.RecordsExpired: Tried to get an iterator beyond the Trim Horizon. """ real_iterator_type = validate_stream_iterator_type(iterator_type) request = { "StreamArn": stream_arn, "ShardId": shard_id, "ShardIteratorType": real_iterator_type, "SequenceNumber": sequence_number } # boto3 isn't down with literal Nones. if sequence_number is None: request.pop("SequenceNumber") try: return self.stream_client.get_shard_iterator(**request)["ShardIterator"] except botocore.exceptions.ClientError as error: if error.response["Error"]["Code"] == "TrimmedDataAccessException": raise RecordsExpired from error raise BloopException("Unexpected error while creating shard iterator") from error
python
def get_shard_iterator(self, *, stream_arn, shard_id, iterator_type, sequence_number=None): """Wraps :func:`boto3.DynamoDBStreams.Client.get_shard_iterator`. :param str stream_arn: Stream arn. Usually :data:`Shard.stream_arn <bloop.stream.shard.Shard.stream_arn>`. :param str shard_id: Shard identifier. Usually :data:`Shard.shard_id <bloop.stream.shard.Shard.shard_id>`. :param str iterator_type: "sequence_at", "sequence_after", "trim_horizon", or "latest" :param sequence_number: :return: Iterator id, valid for 15 minutes. :rtype: str :raises bloop.exceptions.RecordsExpired: Tried to get an iterator beyond the Trim Horizon. """ real_iterator_type = validate_stream_iterator_type(iterator_type) request = { "StreamArn": stream_arn, "ShardId": shard_id, "ShardIteratorType": real_iterator_type, "SequenceNumber": sequence_number } # boto3 isn't down with literal Nones. if sequence_number is None: request.pop("SequenceNumber") try: return self.stream_client.get_shard_iterator(**request)["ShardIterator"] except botocore.exceptions.ClientError as error: if error.response["Error"]["Code"] == "TrimmedDataAccessException": raise RecordsExpired from error raise BloopException("Unexpected error while creating shard iterator") from error
[ "def", "get_shard_iterator", "(", "self", ",", "*", ",", "stream_arn", ",", "shard_id", ",", "iterator_type", ",", "sequence_number", "=", "None", ")", ":", "real_iterator_type", "=", "validate_stream_iterator_type", "(", "iterator_type", ")", "request", "=", "{", "\"StreamArn\"", ":", "stream_arn", ",", "\"ShardId\"", ":", "shard_id", ",", "\"ShardIteratorType\"", ":", "real_iterator_type", ",", "\"SequenceNumber\"", ":", "sequence_number", "}", "# boto3 isn't down with literal Nones.", "if", "sequence_number", "is", "None", ":", "request", ".", "pop", "(", "\"SequenceNumber\"", ")", "try", ":", "return", "self", ".", "stream_client", ".", "get_shard_iterator", "(", "*", "*", "request", ")", "[", "\"ShardIterator\"", "]", "except", "botocore", ".", "exceptions", ".", "ClientError", "as", "error", ":", "if", "error", ".", "response", "[", "\"Error\"", "]", "[", "\"Code\"", "]", "==", "\"TrimmedDataAccessException\"", ":", "raise", "RecordsExpired", "from", "error", "raise", "BloopException", "(", "\"Unexpected error while creating shard iterator\"", ")", "from", "error" ]
Wraps :func:`boto3.DynamoDBStreams.Client.get_shard_iterator`. :param str stream_arn: Stream arn. Usually :data:`Shard.stream_arn <bloop.stream.shard.Shard.stream_arn>`. :param str shard_id: Shard identifier. Usually :data:`Shard.shard_id <bloop.stream.shard.Shard.shard_id>`. :param str iterator_type: "sequence_at", "sequence_after", "trim_horizon", or "latest" :param sequence_number: :return: Iterator id, valid for 15 minutes. :rtype: str :raises bloop.exceptions.RecordsExpired: Tried to get an iterator beyond the Trim Horizon.
[ "Wraps", ":", "func", ":", "boto3", ".", "DynamoDBStreams", ".", "Client", ".", "get_shard_iterator", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/session.py#L334-L360
numberoverzero/bloop
bloop/session.py
SessionWrapper.get_stream_records
def get_stream_records(self, iterator_id): """Wraps :func:`boto3.DynamoDBStreams.Client.get_records`. :param iterator_id: Iterator id. Usually :data:`Shard.iterator_id <bloop.stream.shard.Shard.iterator_id>`. :return: Dict with "Records" list (may be empty) and "NextShardIterator" str (may not exist). :rtype: dict :raises bloop.exceptions.RecordsExpired: The iterator moved beyond the Trim Horizon since it was created. :raises bloop.exceptions.ShardIteratorExpired: The iterator was created more than 15 minutes ago. """ try: return self.stream_client.get_records(ShardIterator=iterator_id) except botocore.exceptions.ClientError as error: if error.response["Error"]["Code"] == "TrimmedDataAccessException": raise RecordsExpired from error elif error.response["Error"]["Code"] == "ExpiredIteratorException": raise ShardIteratorExpired from error raise BloopException("Unexpected error while getting records.") from error
python
def get_stream_records(self, iterator_id): """Wraps :func:`boto3.DynamoDBStreams.Client.get_records`. :param iterator_id: Iterator id. Usually :data:`Shard.iterator_id <bloop.stream.shard.Shard.iterator_id>`. :return: Dict with "Records" list (may be empty) and "NextShardIterator" str (may not exist). :rtype: dict :raises bloop.exceptions.RecordsExpired: The iterator moved beyond the Trim Horizon since it was created. :raises bloop.exceptions.ShardIteratorExpired: The iterator was created more than 15 minutes ago. """ try: return self.stream_client.get_records(ShardIterator=iterator_id) except botocore.exceptions.ClientError as error: if error.response["Error"]["Code"] == "TrimmedDataAccessException": raise RecordsExpired from error elif error.response["Error"]["Code"] == "ExpiredIteratorException": raise ShardIteratorExpired from error raise BloopException("Unexpected error while getting records.") from error
[ "def", "get_stream_records", "(", "self", ",", "iterator_id", ")", ":", "try", ":", "return", "self", ".", "stream_client", ".", "get_records", "(", "ShardIterator", "=", "iterator_id", ")", "except", "botocore", ".", "exceptions", ".", "ClientError", "as", "error", ":", "if", "error", ".", "response", "[", "\"Error\"", "]", "[", "\"Code\"", "]", "==", "\"TrimmedDataAccessException\"", ":", "raise", "RecordsExpired", "from", "error", "elif", "error", ".", "response", "[", "\"Error\"", "]", "[", "\"Code\"", "]", "==", "\"ExpiredIteratorException\"", ":", "raise", "ShardIteratorExpired", "from", "error", "raise", "BloopException", "(", "\"Unexpected error while getting records.\"", ")", "from", "error" ]
Wraps :func:`boto3.DynamoDBStreams.Client.get_records`. :param iterator_id: Iterator id. Usually :data:`Shard.iterator_id <bloop.stream.shard.Shard.iterator_id>`. :return: Dict with "Records" list (may be empty) and "NextShardIterator" str (may not exist). :rtype: dict :raises bloop.exceptions.RecordsExpired: The iterator moved beyond the Trim Horizon since it was created. :raises bloop.exceptions.ShardIteratorExpired: The iterator was created more than 15 minutes ago.
[ "Wraps", ":", "func", ":", "boto3", ".", "DynamoDBStreams", ".", "Client", ".", "get_records", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/session.py#L362-L378
numberoverzero/bloop
bloop/session.py
SessionWrapper.transaction_read
def transaction_read(self, items): """ Wraps :func:`boto3.DynamoDB.Client.db.transact_get_items`. :param items: Unpacked into "TransactionItems" for :func:`boto3.DynamoDB.Client.transact_get_items` :raises bloop.exceptions.TransactionCanceled: if the transaction was canceled. :return: Dict with "Records" list """ try: return self.dynamodb_client.transact_get_items(TransactItems=items) except botocore.exceptions.ClientError as error: if error.response["Error"]["Code"] == "TransactionCanceledException": raise TransactionCanceled from error raise BloopException("Unexpected error during transaction read.") from error
python
def transaction_read(self, items): """ Wraps :func:`boto3.DynamoDB.Client.db.transact_get_items`. :param items: Unpacked into "TransactionItems" for :func:`boto3.DynamoDB.Client.transact_get_items` :raises bloop.exceptions.TransactionCanceled: if the transaction was canceled. :return: Dict with "Records" list """ try: return self.dynamodb_client.transact_get_items(TransactItems=items) except botocore.exceptions.ClientError as error: if error.response["Error"]["Code"] == "TransactionCanceledException": raise TransactionCanceled from error raise BloopException("Unexpected error during transaction read.") from error
[ "def", "transaction_read", "(", "self", ",", "items", ")", ":", "try", ":", "return", "self", ".", "dynamodb_client", ".", "transact_get_items", "(", "TransactItems", "=", "items", ")", "except", "botocore", ".", "exceptions", ".", "ClientError", "as", "error", ":", "if", "error", ".", "response", "[", "\"Error\"", "]", "[", "\"Code\"", "]", "==", "\"TransactionCanceledException\"", ":", "raise", "TransactionCanceled", "from", "error", "raise", "BloopException", "(", "\"Unexpected error during transaction read.\"", ")", "from", "error" ]
Wraps :func:`boto3.DynamoDB.Client.db.transact_get_items`. :param items: Unpacked into "TransactionItems" for :func:`boto3.DynamoDB.Client.transact_get_items` :raises bloop.exceptions.TransactionCanceled: if the transaction was canceled. :return: Dict with "Records" list
[ "Wraps", ":", "func", ":", "boto3", ".", "DynamoDB", ".", "Client", ".", "db", ".", "transact_get_items", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/session.py#L380-L393
numberoverzero/bloop
bloop/session.py
SessionWrapper.transaction_write
def transaction_write(self, items, client_request_token): """ Wraps :func:`boto3.DynamoDB.Client.db.transact_write_items`. :param items: Unpacked into "TransactionItems" for :func:`boto3.DynamoDB.Client.transact_write_items` :param client_request_token: Idempotency token valid for 10 minutes from first use. Unpacked into "ClientRequestToken" :raises bloop.exceptions.TransactionCanceled: if the transaction was canceled. """ try: self.dynamodb_client.transact_write_items( TransactItems=items, ClientRequestToken=client_request_token ) except botocore.exceptions.ClientError as error: if error.response["Error"]["Code"] == "TransactionCanceledException": raise TransactionCanceled from error raise BloopException("Unexpected error during transaction write.") from error
python
def transaction_write(self, items, client_request_token): """ Wraps :func:`boto3.DynamoDB.Client.db.transact_write_items`. :param items: Unpacked into "TransactionItems" for :func:`boto3.DynamoDB.Client.transact_write_items` :param client_request_token: Idempotency token valid for 10 minutes from first use. Unpacked into "ClientRequestToken" :raises bloop.exceptions.TransactionCanceled: if the transaction was canceled. """ try: self.dynamodb_client.transact_write_items( TransactItems=items, ClientRequestToken=client_request_token ) except botocore.exceptions.ClientError as error: if error.response["Error"]["Code"] == "TransactionCanceledException": raise TransactionCanceled from error raise BloopException("Unexpected error during transaction write.") from error
[ "def", "transaction_write", "(", "self", ",", "items", ",", "client_request_token", ")", ":", "try", ":", "self", ".", "dynamodb_client", ".", "transact_write_items", "(", "TransactItems", "=", "items", ",", "ClientRequestToken", "=", "client_request_token", ")", "except", "botocore", ".", "exceptions", ".", "ClientError", "as", "error", ":", "if", "error", ".", "response", "[", "\"Error\"", "]", "[", "\"Code\"", "]", "==", "\"TransactionCanceledException\"", ":", "raise", "TransactionCanceled", "from", "error", "raise", "BloopException", "(", "\"Unexpected error during transaction write.\"", ")", "from", "error" ]
Wraps :func:`boto3.DynamoDB.Client.db.transact_write_items`. :param items: Unpacked into "TransactionItems" for :func:`boto3.DynamoDB.Client.transact_write_items` :param client_request_token: Idempotency token valid for 10 minutes from first use. Unpacked into "ClientRequestToken" :raises bloop.exceptions.TransactionCanceled: if the transaction was canceled.
[ "Wraps", ":", "func", ":", "boto3", ".", "DynamoDB", ".", "Client", ".", "db", ".", "transact_write_items", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/session.py#L395-L412
numberoverzero/bloop
bloop/search.py
check_hash_key
def check_hash_key(query_on, key): """Only allows == against query_on.hash_key""" return ( isinstance(key, BaseCondition) and (key.operation == "==") and (key.column is query_on.hash_key) )
python
def check_hash_key(query_on, key): """Only allows == against query_on.hash_key""" return ( isinstance(key, BaseCondition) and (key.operation == "==") and (key.column is query_on.hash_key) )
[ "def", "check_hash_key", "(", "query_on", ",", "key", ")", ":", "return", "(", "isinstance", "(", "key", ",", "BaseCondition", ")", "and", "(", "key", ".", "operation", "==", "\"==\"", ")", "and", "(", "key", ".", "column", "is", "query_on", ".", "hash_key", ")", ")" ]
Only allows == against query_on.hash_key
[ "Only", "allows", "==", "against", "query_on", ".", "hash_key" ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/search.py#L131-L137
numberoverzero/bloop
bloop/search.py
check_range_key
def check_range_key(query_on, key): """BeginsWith, Between, or any Comparison except '!=' against query_on.range_key""" return ( isinstance(key, BaseCondition) and key.operation in ("begins_with", "between", "<", ">", "<=", ">=", "==") and key.column is query_on.range_key )
python
def check_range_key(query_on, key): """BeginsWith, Between, or any Comparison except '!=' against query_on.range_key""" return ( isinstance(key, BaseCondition) and key.operation in ("begins_with", "between", "<", ">", "<=", ">=", "==") and key.column is query_on.range_key )
[ "def", "check_range_key", "(", "query_on", ",", "key", ")", ":", "return", "(", "isinstance", "(", "key", ",", "BaseCondition", ")", "and", "key", ".", "operation", "in", "(", "\"begins_with\"", ",", "\"between\"", ",", "\"<\"", ",", "\">\"", ",", "\"<=\"", ",", "\">=\"", ",", "\"==\"", ")", "and", "key", ".", "column", "is", "query_on", ".", "range_key", ")" ]
BeginsWith, Between, or any Comparison except '!=' against query_on.range_key
[ "BeginsWith", "Between", "or", "any", "Comparison", "except", "!", "=", "against", "query_on", ".", "range_key" ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/search.py#L140-L146
numberoverzero/bloop
bloop/search.py
Search.prepare
def prepare(self): """Constructs a :class:`~bloop.search.PreparedSearch`.""" p = PreparedSearch() p.prepare( engine=self.engine, mode=self.mode, model=self.model, index=self.index, key=self.key, filter=self.filter, projection=self.projection, consistent=self.consistent, forward=self.forward, parallel=self.parallel ) return p
python
def prepare(self): """Constructs a :class:`~bloop.search.PreparedSearch`.""" p = PreparedSearch() p.prepare( engine=self.engine, mode=self.mode, model=self.model, index=self.index, key=self.key, filter=self.filter, projection=self.projection, consistent=self.consistent, forward=self.forward, parallel=self.parallel ) return p
[ "def", "prepare", "(", "self", ")", ":", "p", "=", "PreparedSearch", "(", ")", "p", ".", "prepare", "(", "engine", "=", "self", ".", "engine", ",", "mode", "=", "self", ".", "mode", ",", "model", "=", "self", ".", "model", ",", "index", "=", "self", ".", "index", ",", "key", "=", "self", ".", "key", ",", "filter", "=", "self", ".", "filter", ",", "projection", "=", "self", ".", "projection", ",", "consistent", "=", "self", ".", "consistent", ",", "forward", "=", "self", ".", "forward", ",", "parallel", "=", "self", ".", "parallel", ")", "return", "p" ]
Constructs a :class:`~bloop.search.PreparedSearch`.
[ "Constructs", "a", ":", "class", ":", "~bloop", ".", "search", ".", "PreparedSearch", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/search.py#L200-L215
numberoverzero/bloop
bloop/search.py
PreparedSearch.prepare
def prepare( self, engine=None, mode=None, model=None, index=None, key=None, filter=None, projection=None, consistent=None, forward=None, parallel=None): """Validates the search parameters and builds the base request dict for each Query/Scan call.""" self.prepare_iterator_cls(engine, mode) self.prepare_model(model, index, consistent) self.prepare_key(key) self.prepare_projection(projection) self.prepare_filter(filter) self.prepare_constraints(forward, parallel) self.prepare_request()
python
def prepare( self, engine=None, mode=None, model=None, index=None, key=None, filter=None, projection=None, consistent=None, forward=None, parallel=None): """Validates the search parameters and builds the base request dict for each Query/Scan call.""" self.prepare_iterator_cls(engine, mode) self.prepare_model(model, index, consistent) self.prepare_key(key) self.prepare_projection(projection) self.prepare_filter(filter) self.prepare_constraints(forward, parallel) self.prepare_request()
[ "def", "prepare", "(", "self", ",", "engine", "=", "None", ",", "mode", "=", "None", ",", "model", "=", "None", ",", "index", "=", "None", ",", "key", "=", "None", ",", "filter", "=", "None", ",", "projection", "=", "None", ",", "consistent", "=", "None", ",", "forward", "=", "None", ",", "parallel", "=", "None", ")", ":", "self", ".", "prepare_iterator_cls", "(", "engine", ",", "mode", ")", "self", ".", "prepare_model", "(", "model", ",", "index", ",", "consistent", ")", "self", ".", "prepare_key", "(", "key", ")", "self", ".", "prepare_projection", "(", "projection", ")", "self", ".", "prepare_filter", "(", "filter", ")", "self", ".", "prepare_constraints", "(", "forward", ",", "parallel", ")", "self", ".", "prepare_request", "(", ")" ]
Validates the search parameters and builds the base request dict for each Query/Scan call.
[ "Validates", "the", "search", "parameters", "and", "builds", "the", "base", "request", "dict", "for", "each", "Query", "/", "Scan", "call", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/search.py#L245-L257
numberoverzero/bloop
bloop/search.py
SearchIterator.count
def count(self): """Number of items that have been loaded from DynamoDB so far, including buffered items.""" if self.request["Select"] == "COUNT": while not self.exhausted: next(self, None) return self._count
python
def count(self): """Number of items that have been loaded from DynamoDB so far, including buffered items.""" if self.request["Select"] == "COUNT": while not self.exhausted: next(self, None) return self._count
[ "def", "count", "(", "self", ")", ":", "if", "self", ".", "request", "[", "\"Select\"", "]", "==", "\"COUNT\"", ":", "while", "not", "self", ".", "exhausted", ":", "next", "(", "self", ",", "None", ")", "return", "self", ".", "_count" ]
Number of items that have been loaded from DynamoDB so far, including buffered items.
[ "Number", "of", "items", "that", "have", "been", "loaded", "from", "DynamoDB", "so", "far", "including", "buffered", "items", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/search.py#L366-L371
numberoverzero/bloop
bloop/search.py
SearchIterator.scanned
def scanned(self): """Number of items that DynamoDB evaluated, before any filter was applied.""" if self.request["Select"] == "COUNT": while not self.exhausted: next(self, None) return self._scanned
python
def scanned(self): """Number of items that DynamoDB evaluated, before any filter was applied.""" if self.request["Select"] == "COUNT": while not self.exhausted: next(self, None) return self._scanned
[ "def", "scanned", "(", "self", ")", ":", "if", "self", ".", "request", "[", "\"Select\"", "]", "==", "\"COUNT\"", ":", "while", "not", "self", ".", "exhausted", ":", "next", "(", "self", ",", "None", ")", "return", "self", ".", "_scanned" ]
Number of items that DynamoDB evaluated, before any filter was applied.
[ "Number", "of", "items", "that", "DynamoDB", "evaluated", "before", "any", "filter", "was", "applied", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/search.py#L374-L379
numberoverzero/bloop
bloop/search.py
SearchIterator.first
def first(self): """Return the first result. If there are no results, raises :exc:`~bloop.exceptions.ConstraintViolation`. :return: The first result. :raises bloop.exceptions.ConstraintViolation: No results. """ self.reset() value = next(self, None) if value is None: raise ConstraintViolation("{} did not find any results.".format(self.mode.capitalize())) return value
python
def first(self): """Return the first result. If there are no results, raises :exc:`~bloop.exceptions.ConstraintViolation`. :return: The first result. :raises bloop.exceptions.ConstraintViolation: No results. """ self.reset() value = next(self, None) if value is None: raise ConstraintViolation("{} did not find any results.".format(self.mode.capitalize())) return value
[ "def", "first", "(", "self", ")", ":", "self", ".", "reset", "(", ")", "value", "=", "next", "(", "self", ",", "None", ")", "if", "value", "is", "None", ":", "raise", "ConstraintViolation", "(", "\"{} did not find any results.\"", ".", "format", "(", "self", ".", "mode", ".", "capitalize", "(", ")", ")", ")", "return", "value" ]
Return the first result. If there are no results, raises :exc:`~bloop.exceptions.ConstraintViolation`. :return: The first result. :raises bloop.exceptions.ConstraintViolation: No results.
[ "Return", "the", "first", "result", ".", "If", "there", "are", "no", "results", "raises", ":", "exc", ":", "~bloop", ".", "exceptions", ".", "ConstraintViolation", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/search.py#L389-L399
numberoverzero/bloop
bloop/search.py
SearchIterator.one
def one(self): """Return the unique result. If there is not exactly one result, raises :exc:`~bloop.exceptions.ConstraintViolation`. :return: The unique result. :raises bloop.exceptions.ConstraintViolation: Not exactly one result. """ first = self.first() second = next(self, None) if second is not None: raise ConstraintViolation("{} found more than one result.".format(self.mode.capitalize())) return first
python
def one(self): """Return the unique result. If there is not exactly one result, raises :exc:`~bloop.exceptions.ConstraintViolation`. :return: The unique result. :raises bloop.exceptions.ConstraintViolation: Not exactly one result. """ first = self.first() second = next(self, None) if second is not None: raise ConstraintViolation("{} found more than one result.".format(self.mode.capitalize())) return first
[ "def", "one", "(", "self", ")", ":", "first", "=", "self", ".", "first", "(", ")", "second", "=", "next", "(", "self", ",", "None", ")", "if", "second", "is", "not", "None", ":", "raise", "ConstraintViolation", "(", "\"{} found more than one result.\"", ".", "format", "(", "self", ".", "mode", ".", "capitalize", "(", ")", ")", ")", "return", "first" ]
Return the unique result. If there is not exactly one result, raises :exc:`~bloop.exceptions.ConstraintViolation`. :return: The unique result. :raises bloop.exceptions.ConstraintViolation: Not exactly one result.
[ "Return", "the", "unique", "result", ".", "If", "there", "is", "not", "exactly", "one", "result", "raises", ":", "exc", ":", "~bloop", ".", "exceptions", ".", "ConstraintViolation", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/search.py#L401-L412
numberoverzero/bloop
bloop/search.py
SearchIterator.reset
def reset(self): """Reset to the initial state, clearing the buffer and zeroing count and scanned.""" self.buffer.clear() self._count = 0 self._scanned = 0 self._exhausted = False self.request.pop("ExclusiveStartKey", None)
python
def reset(self): """Reset to the initial state, clearing the buffer and zeroing count and scanned.""" self.buffer.clear() self._count = 0 self._scanned = 0 self._exhausted = False self.request.pop("ExclusiveStartKey", None)
[ "def", "reset", "(", "self", ")", ":", "self", ".", "buffer", ".", "clear", "(", ")", "self", ".", "_count", "=", "0", "self", ".", "_scanned", "=", "0", "self", ".", "_exhausted", "=", "False", "self", ".", "request", ".", "pop", "(", "\"ExclusiveStartKey\"", ",", "None", ")" ]
Reset to the initial state, clearing the buffer and zeroing count and scanned.
[ "Reset", "to", "the", "initial", "state", "clearing", "the", "buffer", "and", "zeroing", "count", "and", "scanned", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/search.py#L414-L420
numberoverzero/bloop
bloop/transactions.py
TxType.by_alias
def by_alias(cls, name: str) -> "TxType": """get a type by the common bloop operation name: get/check/delete/save""" return { "get": TxType.Get, "check": TxType.Check, "delete": TxType.Delete, "save": TxType.Update, }[name]
python
def by_alias(cls, name: str) -> "TxType": """get a type by the common bloop operation name: get/check/delete/save""" return { "get": TxType.Get, "check": TxType.Check, "delete": TxType.Delete, "save": TxType.Update, }[name]
[ "def", "by_alias", "(", "cls", ",", "name", ":", "str", ")", "->", "\"TxType\"", ":", "return", "{", "\"get\"", ":", "TxType", ".", "Get", ",", "\"check\"", ":", "TxType", ".", "Check", ",", "\"delete\"", ":", "TxType", ".", "Delete", ",", "\"save\"", ":", "TxType", ".", "Update", ",", "}", "[", "name", "]" ]
get a type by the common bloop operation name: get/check/delete/save
[ "get", "a", "type", "by", "the", "common", "bloop", "operation", "name", ":", "get", "/", "check", "/", "delete", "/", "save" ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/transactions.py#L36-L43
numberoverzero/bloop
bloop/transactions.py
Transaction.prepare
def prepare(self): """ Create a new PreparedTransaction that can be committed. This is called automatically when exiting the transaction as a context: .. code-block:: python >>> engine = Engine() >>> tx = WriteTransaction(engine) >>> prepared = tx.prepare() >>> prepared.commit() # automatically calls commit when exiting >>> with WriteTransaction(engine) as tx: ... # modify the transaction here ... pass >>> # tx commits here :return: """ tx = PreparedTransaction() tx.prepare( engine=self.engine, mode=self.mode, items=self._items, ) return tx
python
def prepare(self): """ Create a new PreparedTransaction that can be committed. This is called automatically when exiting the transaction as a context: .. code-block:: python >>> engine = Engine() >>> tx = WriteTransaction(engine) >>> prepared = tx.prepare() >>> prepared.commit() # automatically calls commit when exiting >>> with WriteTransaction(engine) as tx: ... # modify the transaction here ... pass >>> # tx commits here :return: """ tx = PreparedTransaction() tx.prepare( engine=self.engine, mode=self.mode, items=self._items, ) return tx
[ "def", "prepare", "(", "self", ")", ":", "tx", "=", "PreparedTransaction", "(", ")", "tx", ".", "prepare", "(", "engine", "=", "self", ".", "engine", ",", "mode", "=", "self", ".", "mode", ",", "items", "=", "self", ".", "_items", ",", ")", "return", "tx" ]
Create a new PreparedTransaction that can be committed. This is called automatically when exiting the transaction as a context: .. code-block:: python >>> engine = Engine() >>> tx = WriteTransaction(engine) >>> prepared = tx.prepare() >>> prepared.commit() # automatically calls commit when exiting >>> with WriteTransaction(engine) as tx: ... # modify the transaction here ... pass >>> # tx commits here :return:
[ "Create", "a", "new", "PreparedTransaction", "that", "can", "be", "committed", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/transactions.py#L135-L162
numberoverzero/bloop
bloop/transactions.py
PreparedTransaction.prepare
def prepare(self, engine, mode, items) -> None: """ Create a unique transaction id and dumps the items into a cached request object. """ self.tx_id = str(uuid.uuid4()).replace("-", "") self.engine = engine self.mode = mode self.items = items self._prepare_request()
python
def prepare(self, engine, mode, items) -> None: """ Create a unique transaction id and dumps the items into a cached request object. """ self.tx_id = str(uuid.uuid4()).replace("-", "") self.engine = engine self.mode = mode self.items = items self._prepare_request()
[ "def", "prepare", "(", "self", ",", "engine", ",", "mode", ",", "items", ")", "->", "None", ":", "self", ".", "tx_id", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", ".", "replace", "(", "\"-\"", ",", "\"\"", ")", "self", ".", "engine", "=", "engine", "self", ".", "mode", "=", "mode", "self", ".", "items", "=", "items", "self", ".", "_prepare_request", "(", ")" ]
Create a unique transaction id and dumps the items into a cached request object.
[ "Create", "a", "unique", "transaction", "id", "and", "dumps", "the", "items", "into", "a", "cached", "request", "object", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/transactions.py#L186-L194
numberoverzero/bloop
bloop/transactions.py
PreparedTransaction.commit
def commit(self) -> None: """ Commit the transaction with a fixed transaction id. A read transaction can call commit() any number of times, while a write transaction can only use the same tx_id for 10 minutes from the first call. """ now = datetime.now(timezone.utc) if self.first_commit_at is None: self.first_commit_at = now if self.mode == "r": response = self.engine.session.transaction_read(self._request) elif self.mode == "w": if now - self.first_commit_at > MAX_TOKEN_LIFETIME: raise TransactionTokenExpired response = self.engine.session.transaction_write(self._request, self.tx_id) else: raise ValueError(f"unrecognized mode {self.mode}") self._handle_response(response)
python
def commit(self) -> None: """ Commit the transaction with a fixed transaction id. A read transaction can call commit() any number of times, while a write transaction can only use the same tx_id for 10 minutes from the first call. """ now = datetime.now(timezone.utc) if self.first_commit_at is None: self.first_commit_at = now if self.mode == "r": response = self.engine.session.transaction_read(self._request) elif self.mode == "w": if now - self.first_commit_at > MAX_TOKEN_LIFETIME: raise TransactionTokenExpired response = self.engine.session.transaction_write(self._request, self.tx_id) else: raise ValueError(f"unrecognized mode {self.mode}") self._handle_response(response)
[ "def", "commit", "(", "self", ")", "->", "None", ":", "now", "=", "datetime", ".", "now", "(", "timezone", ".", "utc", ")", "if", "self", ".", "first_commit_at", "is", "None", ":", "self", ".", "first_commit_at", "=", "now", "if", "self", ".", "mode", "==", "\"r\"", ":", "response", "=", "self", ".", "engine", ".", "session", ".", "transaction_read", "(", "self", ".", "_request", ")", "elif", "self", ".", "mode", "==", "\"w\"", ":", "if", "now", "-", "self", ".", "first_commit_at", ">", "MAX_TOKEN_LIFETIME", ":", "raise", "TransactionTokenExpired", "response", "=", "self", ".", "engine", ".", "session", ".", "transaction_write", "(", "self", ".", "_request", ",", "self", ".", "tx_id", ")", "else", ":", "raise", "ValueError", "(", "f\"unrecognized mode {self.mode}\"", ")", "self", ".", "_handle_response", "(", "response", ")" ]
Commit the transaction with a fixed transaction id. A read transaction can call commit() any number of times, while a write transaction can only use the same tx_id for 10 minutes from the first call.
[ "Commit", "the", "transaction", "with", "a", "fixed", "transaction", "id", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/transactions.py#L213-L233
numberoverzero/bloop
bloop/transactions.py
ReadTransaction.load
def load(self, *objs) -> "ReadTransaction": """ Add one or more objects to be loaded in this transaction. At most 10 items can be loaded in the same transaction. All objects will be loaded each time you call commit(). :param objs: Objects to add to the set that are loaded in this transaction. :return: this transaction for chaining :raises bloop.exceptions.MissingObjects: if one or more objects aren't loaded. """ self._extend([TxItem.new("get", obj) for obj in objs]) return self
python
def load(self, *objs) -> "ReadTransaction": """ Add one or more objects to be loaded in this transaction. At most 10 items can be loaded in the same transaction. All objects will be loaded each time you call commit(). :param objs: Objects to add to the set that are loaded in this transaction. :return: this transaction for chaining :raises bloop.exceptions.MissingObjects: if one or more objects aren't loaded. """ self._extend([TxItem.new("get", obj) for obj in objs]) return self
[ "def", "load", "(", "self", ",", "*", "objs", ")", "->", "\"ReadTransaction\"", ":", "self", ".", "_extend", "(", "[", "TxItem", ".", "new", "(", "\"get\"", ",", "obj", ")", "for", "obj", "in", "objs", "]", ")", "return", "self" ]
Add one or more objects to be loaded in this transaction. At most 10 items can be loaded in the same transaction. All objects will be loaded each time you call commit(). :param objs: Objects to add to the set that are loaded in this transaction. :return: this transaction for chaining :raises bloop.exceptions.MissingObjects: if one or more objects aren't loaded.
[ "Add", "one", "or", "more", "objects", "to", "be", "loaded", "in", "this", "transaction", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/transactions.py#L267-L280
numberoverzero/bloop
bloop/transactions.py
WriteTransaction.check
def check(self, obj, condition) -> "WriteTransaction": """ Add a condition which must be met for the transaction to commit. While the condition is checked against the provided object, that object will not be modified. It is only used to provide the hash and range key to apply the condition to. At most 10 items can be checked, saved, or deleted in the same transaction. The same idempotency token will be used for a single prepared transaction, which allows you to safely call commit on the PreparedCommit object multiple times. :param obj: The object to use for the transaction condition. This object will not be modified. :param condition: A condition on an object which must hold for the transaction to commit. :return: this transaction for chaining """ self._extend([TxItem.new("check", obj, condition)]) return self
python
def check(self, obj, condition) -> "WriteTransaction": """ Add a condition which must be met for the transaction to commit. While the condition is checked against the provided object, that object will not be modified. It is only used to provide the hash and range key to apply the condition to. At most 10 items can be checked, saved, or deleted in the same transaction. The same idempotency token will be used for a single prepared transaction, which allows you to safely call commit on the PreparedCommit object multiple times. :param obj: The object to use for the transaction condition. This object will not be modified. :param condition: A condition on an object which must hold for the transaction to commit. :return: this transaction for chaining """ self._extend([TxItem.new("check", obj, condition)]) return self
[ "def", "check", "(", "self", ",", "obj", ",", "condition", ")", "->", "\"WriteTransaction\"", ":", "self", ".", "_extend", "(", "[", "TxItem", ".", "new", "(", "\"check\"", ",", "obj", ",", "condition", ")", "]", ")", "return", "self" ]
Add a condition which must be met for the transaction to commit. While the condition is checked against the provided object, that object will not be modified. It is only used to provide the hash and range key to apply the condition to. At most 10 items can be checked, saved, or deleted in the same transaction. The same idempotency token will be used for a single prepared transaction, which allows you to safely call commit on the PreparedCommit object multiple times. :param obj: The object to use for the transaction condition. This object will not be modified. :param condition: A condition on an object which must hold for the transaction to commit. :return: this transaction for chaining
[ "Add", "a", "condition", "which", "must", "be", "met", "for", "the", "transaction", "to", "commit", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/transactions.py#L300-L317
numberoverzero/bloop
bloop/transactions.py
WriteTransaction.save
def save(self, *objs, condition=None, atomic=False) -> "WriteTransaction": """ Add one or more objects to be saved in this transaction. At most 10 items can be checked, saved, or deleted in the same transaction. The same idempotency token will be used for a single prepared transaction, which allows you to safely call commit on the PreparedCommit object multiple times. :param objs: Objects to add to the set that are updated in this transaction. :param condition: A condition for these objects which must hold for the transaction to commit. :param bool atomic: only commit the transaction if the local and DynamoDB versions of the object match. :return: this transaction for chaining """ self._extend([TxItem.new("save", obj, condition, atomic) for obj in objs]) return self
python
def save(self, *objs, condition=None, atomic=False) -> "WriteTransaction": """ Add one or more objects to be saved in this transaction. At most 10 items can be checked, saved, or deleted in the same transaction. The same idempotency token will be used for a single prepared transaction, which allows you to safely call commit on the PreparedCommit object multiple times. :param objs: Objects to add to the set that are updated in this transaction. :param condition: A condition for these objects which must hold for the transaction to commit. :param bool atomic: only commit the transaction if the local and DynamoDB versions of the object match. :return: this transaction for chaining """ self._extend([TxItem.new("save", obj, condition, atomic) for obj in objs]) return self
[ "def", "save", "(", "self", ",", "*", "objs", ",", "condition", "=", "None", ",", "atomic", "=", "False", ")", "->", "\"WriteTransaction\"", ":", "self", ".", "_extend", "(", "[", "TxItem", ".", "new", "(", "\"save\"", ",", "obj", ",", "condition", ",", "atomic", ")", "for", "obj", "in", "objs", "]", ")", "return", "self" ]
Add one or more objects to be saved in this transaction. At most 10 items can be checked, saved, or deleted in the same transaction. The same idempotency token will be used for a single prepared transaction, which allows you to safely call commit on the PreparedCommit object multiple times. :param objs: Objects to add to the set that are updated in this transaction. :param condition: A condition for these objects which must hold for the transaction to commit. :param bool atomic: only commit the transaction if the local and DynamoDB versions of the object match. :return: this transaction for chaining
[ "Add", "one", "or", "more", "objects", "to", "be", "saved", "in", "this", "transaction", "." ]
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/transactions.py#L319-L333
ska-sa/montblanc
montblanc/impl/rime/tensorflow/cube_dim_transcoder.py
CubeDimensionTranscoder.encode
def encode(self, cube_dimensions): """ Produces a numpy array of integers which encode the supplied cube dimensions. """ return np.asarray([getattr(cube_dimensions[d], s) for d in self._dimensions for s in self._schema], dtype=np.int32)
python
def encode(self, cube_dimensions): """ Produces a numpy array of integers which encode the supplied cube dimensions. """ return np.asarray([getattr(cube_dimensions[d], s) for d in self._dimensions for s in self._schema], dtype=np.int32)
[ "def", "encode", "(", "self", ",", "cube_dimensions", ")", ":", "return", "np", ".", "asarray", "(", "[", "getattr", "(", "cube_dimensions", "[", "d", "]", ",", "s", ")", "for", "d", "in", "self", ".", "_dimensions", "for", "s", "in", "self", ".", "_schema", "]", ",", "dtype", "=", "np", ".", "int32", ")" ]
Produces a numpy array of integers which encode the supplied cube dimensions.
[ "Produces", "a", "numpy", "array", "of", "integers", "which", "encode", "the", "supplied", "cube", "dimensions", "." ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/cube_dim_transcoder.py#L43-L51
ska-sa/montblanc
montblanc/impl/rime/tensorflow/cube_dim_transcoder.py
CubeDimensionTranscoder.decode
def decode(self, descriptor): """ Produce a list of dictionaries for each dimension in this transcoder """ i = iter(descriptor) n = len(self._schema) # Add the name key to our schema schema = self._schema + ('name',) # For each dimensions, generator takes n items off iterator # wrapping the descriptor, making a tuple with the dimension # name appended tuple_gen = (tuple(itertools.islice(i, n)) + (d, ) for d in self._dimensions) # Generate dictionary by mapping schema keys to generated tuples return [{ k: v for k, v in zip(schema, t) } for t in tuple_gen]
python
def decode(self, descriptor): """ Produce a list of dictionaries for each dimension in this transcoder """ i = iter(descriptor) n = len(self._schema) # Add the name key to our schema schema = self._schema + ('name',) # For each dimensions, generator takes n items off iterator # wrapping the descriptor, making a tuple with the dimension # name appended tuple_gen = (tuple(itertools.islice(i, n)) + (d, ) for d in self._dimensions) # Generate dictionary by mapping schema keys to generated tuples return [{ k: v for k, v in zip(schema, t) } for t in tuple_gen]
[ "def", "decode", "(", "self", ",", "descriptor", ")", ":", "i", "=", "iter", "(", "descriptor", ")", "n", "=", "len", "(", "self", ".", "_schema", ")", "# Add the name key to our schema", "schema", "=", "self", ".", "_schema", "+", "(", "'name'", ",", ")", "# For each dimensions, generator takes n items off iterator", "# wrapping the descriptor, making a tuple with the dimension", "# name appended", "tuple_gen", "=", "(", "tuple", "(", "itertools", ".", "islice", "(", "i", ",", "n", ")", ")", "+", "(", "d", ",", ")", "for", "d", "in", "self", ".", "_dimensions", ")", "# Generate dictionary by mapping schema keys to generated tuples", "return", "[", "{", "k", ":", "v", "for", "k", ",", "v", "in", "zip", "(", "schema", ",", "t", ")", "}", "for", "t", "in", "tuple_gen", "]" ]
Produce a list of dictionaries for each dimension in this transcoder
[ "Produce", "a", "list", "of", "dictionaries", "for", "each", "dimension", "in", "this", "transcoder" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/cube_dim_transcoder.py#L53-L67
ska-sa/montblanc
install/cub.py
dl_cub
def dl_cub(cub_url, cub_archive_name): """ Download cub archive from cub_url and store it in cub_archive_name """ with open(cub_archive_name, 'wb') as f: remote_file = urllib2.urlopen(cub_url) meta = remote_file.info() # The server may provide us with the size of the file. cl_header = meta.getheaders("Content-Length") remote_file_size = int(cl_header[0]) if len(cl_header) > 0 else None # Initialise variables local_file_size = 0 block_size = 128*1024 # Do the download while True: data = remote_file.read(block_size) if not data: break f.write(data) local_file_size += len(data) if (remote_file_size is not None and not local_file_size == remote_file_size): log.warn("Local file size '{}' " "does not match remote '{}'".format( local_file_size, remote_file_size)) remote_file.close()
python
def dl_cub(cub_url, cub_archive_name): """ Download cub archive from cub_url and store it in cub_archive_name """ with open(cub_archive_name, 'wb') as f: remote_file = urllib2.urlopen(cub_url) meta = remote_file.info() # The server may provide us with the size of the file. cl_header = meta.getheaders("Content-Length") remote_file_size = int(cl_header[0]) if len(cl_header) > 0 else None # Initialise variables local_file_size = 0 block_size = 128*1024 # Do the download while True: data = remote_file.read(block_size) if not data: break f.write(data) local_file_size += len(data) if (remote_file_size is not None and not local_file_size == remote_file_size): log.warn("Local file size '{}' " "does not match remote '{}'".format( local_file_size, remote_file_size)) remote_file.close()
[ "def", "dl_cub", "(", "cub_url", ",", "cub_archive_name", ")", ":", "with", "open", "(", "cub_archive_name", ",", "'wb'", ")", "as", "f", ":", "remote_file", "=", "urllib2", ".", "urlopen", "(", "cub_url", ")", "meta", "=", "remote_file", ".", "info", "(", ")", "# The server may provide us with the size of the file.", "cl_header", "=", "meta", ".", "getheaders", "(", "\"Content-Length\"", ")", "remote_file_size", "=", "int", "(", "cl_header", "[", "0", "]", ")", "if", "len", "(", "cl_header", ")", ">", "0", "else", "None", "# Initialise variables", "local_file_size", "=", "0", "block_size", "=", "128", "*", "1024", "# Do the download", "while", "True", ":", "data", "=", "remote_file", ".", "read", "(", "block_size", ")", "if", "not", "data", ":", "break", "f", ".", "write", "(", "data", ")", "local_file_size", "+=", "len", "(", "data", ")", "if", "(", "remote_file_size", "is", "not", "None", "and", "not", "local_file_size", "==", "remote_file_size", ")", ":", "log", ".", "warn", "(", "\"Local file size '{}' \"", "\"does not match remote '{}'\"", ".", "format", "(", "local_file_size", ",", "remote_file_size", ")", ")", "remote_file", ".", "close", "(", ")" ]
Download cub archive from cub_url and store it in cub_archive_name
[ "Download", "cub", "archive", "from", "cub_url", "and", "store", "it", "in", "cub_archive_name" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/install/cub.py#L33-L63
ska-sa/montblanc
install/cub.py
sha_hash_file
def sha_hash_file(filename): """ Compute the SHA1 hash of filename """ hash_sha = hashlib.sha1() with open(filename, 'rb') as f: for chunk in iter(lambda: f.read(1024*1024), b""): hash_sha.update(chunk) return hash_sha.hexdigest()
python
def sha_hash_file(filename): """ Compute the SHA1 hash of filename """ hash_sha = hashlib.sha1() with open(filename, 'rb') as f: for chunk in iter(lambda: f.read(1024*1024), b""): hash_sha.update(chunk) return hash_sha.hexdigest()
[ "def", "sha_hash_file", "(", "filename", ")", ":", "hash_sha", "=", "hashlib", ".", "sha1", "(", ")", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "f", ":", "for", "chunk", "in", "iter", "(", "lambda", ":", "f", ".", "read", "(", "1024", "*", "1024", ")", ",", "b\"\"", ")", ":", "hash_sha", ".", "update", "(", "chunk", ")", "return", "hash_sha", ".", "hexdigest", "(", ")" ]
Compute the SHA1 hash of filename
[ "Compute", "the", "SHA1", "hash", "of", "filename" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/install/cub.py#L65-L73
ska-sa/montblanc
install/cub.py
install_cub
def install_cub(mb_inc_path): """ Downloads and installs cub into mb_inc_path """ cub_url = 'https://github.com/NVlabs/cub/archive/1.6.4.zip' cub_sha_hash = '0d5659200132c2576be0b3959383fa756de6105d' cub_version_str = 'Current release: v1.6.4 (12/06/2016)' cub_zip_file = 'cub.zip' cub_zip_dir = 'cub-1.6.4' cub_unzipped_path = os.path.join(mb_inc_path, cub_zip_dir) cub_new_unzipped_path = os.path.join(mb_inc_path, 'cub') cub_header = os.path.join(cub_new_unzipped_path, 'cub', 'cub.cuh') cub_readme = os.path.join(cub_new_unzipped_path, 'README.md' ) # Check for a reasonably valid install cub_installed, _ = is_cub_installed(cub_readme, cub_header, cub_version_str) if cub_installed: log.info("NVIDIA cub installation found " "at '{}'".format(cub_new_unzipped_path)) return log.info("No NVIDIA cub installation found") # Do we already have a valid cub zip file have_valid_cub_file = (os.path.exists(cub_zip_file) and os.path.isfile(cub_zip_file) and sha_hash_file(cub_zip_file) == cub_sha_hash) if have_valid_cub_file: log.info("Valid NVIDIA cub archive found '{}'".format(cub_zip_file)) # Download if we don't have a valid file else: log.info("Downloading cub archive '{}'".format(cub_url)) dl_cub(cub_url, cub_zip_file) cub_file_sha_hash = sha_hash_file(cub_zip_file) # Compare against our supplied hash if cub_sha_hash != cub_file_sha_hash: msg = ('Hash of file %s downloaded from %s ' 'is %s and does not match the expected ' 'hash of %s. Please manually download ' 'as per the README.md instructions.') % ( cub_zip_file, cub_url, cub_file_sha_hash, cub_sha_hash) raise InstallCubException(msg) # Unzip into montblanc/include/cub with zipfile.ZipFile(cub_zip_file, 'r') as zip_file: # Remove any existing installs shutil.rmtree(cub_unzipped_path, ignore_errors=True) shutil.rmtree(cub_new_unzipped_path, ignore_errors=True) # Unzip zip_file.extractall(mb_inc_path) # Rename. cub_unzipped_path is mb_inc_path/cub_zip_dir shutil.move(cub_unzipped_path, cub_new_unzipped_path) log.info("NVIDIA cub archive unzipped into '{}'".format( cub_new_unzipped_path)) there, reason = is_cub_installed(cub_readme, cub_header, cub_version_str) if not there: raise InstallCubException(reason)
python
def install_cub(mb_inc_path): """ Downloads and installs cub into mb_inc_path """ cub_url = 'https://github.com/NVlabs/cub/archive/1.6.4.zip' cub_sha_hash = '0d5659200132c2576be0b3959383fa756de6105d' cub_version_str = 'Current release: v1.6.4 (12/06/2016)' cub_zip_file = 'cub.zip' cub_zip_dir = 'cub-1.6.4' cub_unzipped_path = os.path.join(mb_inc_path, cub_zip_dir) cub_new_unzipped_path = os.path.join(mb_inc_path, 'cub') cub_header = os.path.join(cub_new_unzipped_path, 'cub', 'cub.cuh') cub_readme = os.path.join(cub_new_unzipped_path, 'README.md' ) # Check for a reasonably valid install cub_installed, _ = is_cub_installed(cub_readme, cub_header, cub_version_str) if cub_installed: log.info("NVIDIA cub installation found " "at '{}'".format(cub_new_unzipped_path)) return log.info("No NVIDIA cub installation found") # Do we already have a valid cub zip file have_valid_cub_file = (os.path.exists(cub_zip_file) and os.path.isfile(cub_zip_file) and sha_hash_file(cub_zip_file) == cub_sha_hash) if have_valid_cub_file: log.info("Valid NVIDIA cub archive found '{}'".format(cub_zip_file)) # Download if we don't have a valid file else: log.info("Downloading cub archive '{}'".format(cub_url)) dl_cub(cub_url, cub_zip_file) cub_file_sha_hash = sha_hash_file(cub_zip_file) # Compare against our supplied hash if cub_sha_hash != cub_file_sha_hash: msg = ('Hash of file %s downloaded from %s ' 'is %s and does not match the expected ' 'hash of %s. Please manually download ' 'as per the README.md instructions.') % ( cub_zip_file, cub_url, cub_file_sha_hash, cub_sha_hash) raise InstallCubException(msg) # Unzip into montblanc/include/cub with zipfile.ZipFile(cub_zip_file, 'r') as zip_file: # Remove any existing installs shutil.rmtree(cub_unzipped_path, ignore_errors=True) shutil.rmtree(cub_new_unzipped_path, ignore_errors=True) # Unzip zip_file.extractall(mb_inc_path) # Rename. cub_unzipped_path is mb_inc_path/cub_zip_dir shutil.move(cub_unzipped_path, cub_new_unzipped_path) log.info("NVIDIA cub archive unzipped into '{}'".format( cub_new_unzipped_path)) there, reason = is_cub_installed(cub_readme, cub_header, cub_version_str) if not there: raise InstallCubException(reason)
[ "def", "install_cub", "(", "mb_inc_path", ")", ":", "cub_url", "=", "'https://github.com/NVlabs/cub/archive/1.6.4.zip'", "cub_sha_hash", "=", "'0d5659200132c2576be0b3959383fa756de6105d'", "cub_version_str", "=", "'Current release: v1.6.4 (12/06/2016)'", "cub_zip_file", "=", "'cub.zip'", "cub_zip_dir", "=", "'cub-1.6.4'", "cub_unzipped_path", "=", "os", ".", "path", ".", "join", "(", "mb_inc_path", ",", "cub_zip_dir", ")", "cub_new_unzipped_path", "=", "os", ".", "path", ".", "join", "(", "mb_inc_path", ",", "'cub'", ")", "cub_header", "=", "os", ".", "path", ".", "join", "(", "cub_new_unzipped_path", ",", "'cub'", ",", "'cub.cuh'", ")", "cub_readme", "=", "os", ".", "path", ".", "join", "(", "cub_new_unzipped_path", ",", "'README.md'", ")", "# Check for a reasonably valid install", "cub_installed", ",", "_", "=", "is_cub_installed", "(", "cub_readme", ",", "cub_header", ",", "cub_version_str", ")", "if", "cub_installed", ":", "log", ".", "info", "(", "\"NVIDIA cub installation found \"", "\"at '{}'\"", ".", "format", "(", "cub_new_unzipped_path", ")", ")", "return", "log", ".", "info", "(", "\"No NVIDIA cub installation found\"", ")", "# Do we already have a valid cub zip file", "have_valid_cub_file", "=", "(", "os", ".", "path", ".", "exists", "(", "cub_zip_file", ")", "and", "os", ".", "path", ".", "isfile", "(", "cub_zip_file", ")", "and", "sha_hash_file", "(", "cub_zip_file", ")", "==", "cub_sha_hash", ")", "if", "have_valid_cub_file", ":", "log", ".", "info", "(", "\"Valid NVIDIA cub archive found '{}'\"", ".", "format", "(", "cub_zip_file", ")", ")", "# Download if we don't have a valid file", "else", ":", "log", ".", "info", "(", "\"Downloading cub archive '{}'\"", ".", "format", "(", "cub_url", ")", ")", "dl_cub", "(", "cub_url", ",", "cub_zip_file", ")", "cub_file_sha_hash", "=", "sha_hash_file", "(", "cub_zip_file", ")", "# Compare against our supplied hash", "if", "cub_sha_hash", "!=", "cub_file_sha_hash", ":", "msg", "=", "(", "'Hash of file %s downloaded from %s '", "'is %s and does not match the expected '", "'hash of %s. Please manually download '", "'as per the README.md instructions.'", ")", "%", "(", "cub_zip_file", ",", "cub_url", ",", "cub_file_sha_hash", ",", "cub_sha_hash", ")", "raise", "InstallCubException", "(", "msg", ")", "# Unzip into montblanc/include/cub", "with", "zipfile", ".", "ZipFile", "(", "cub_zip_file", ",", "'r'", ")", "as", "zip_file", ":", "# Remove any existing installs", "shutil", ".", "rmtree", "(", "cub_unzipped_path", ",", "ignore_errors", "=", "True", ")", "shutil", ".", "rmtree", "(", "cub_new_unzipped_path", ",", "ignore_errors", "=", "True", ")", "# Unzip", "zip_file", ".", "extractall", "(", "mb_inc_path", ")", "# Rename. cub_unzipped_path is mb_inc_path/cub_zip_dir", "shutil", ".", "move", "(", "cub_unzipped_path", ",", "cub_new_unzipped_path", ")", "log", ".", "info", "(", "\"NVIDIA cub archive unzipped into '{}'\"", ".", "format", "(", "cub_new_unzipped_path", ")", ")", "there", ",", "reason", "=", "is_cub_installed", "(", "cub_readme", ",", "cub_header", ",", "cub_version_str", ")", "if", "not", "there", ":", "raise", "InstallCubException", "(", "reason", ")" ]
Downloads and installs cub into mb_inc_path
[ "Downloads", "and", "installs", "cub", "into", "mb_inc_path" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/install/cub.py#L97-L161
ska-sa/montblanc
install/tensorflow_ops_ext.py
cuda_architecture_flags
def cuda_architecture_flags(device_info): """ Emit a list of architecture flags for each CUDA device found ['--gpu-architecture=sm_30', '--gpu-architecture=sm_52'] """ # Figure out the necessary device architectures if len(device_info['devices']) == 0: archs = ['--gpu-architecture=sm_30'] log.info("No CUDA devices found, defaulting to architecture '{}'".format(archs[0])) else: archs = set() for device in device_info['devices']: arch_str = '--gpu-architecture=sm_{}{}'.format(device['major'], device['minor']) log.info("Using '{}' for '{}'".format(arch_str, device['name'])) archs.add(arch_str) return list(archs)
python
def cuda_architecture_flags(device_info): """ Emit a list of architecture flags for each CUDA device found ['--gpu-architecture=sm_30', '--gpu-architecture=sm_52'] """ # Figure out the necessary device architectures if len(device_info['devices']) == 0: archs = ['--gpu-architecture=sm_30'] log.info("No CUDA devices found, defaulting to architecture '{}'".format(archs[0])) else: archs = set() for device in device_info['devices']: arch_str = '--gpu-architecture=sm_{}{}'.format(device['major'], device['minor']) log.info("Using '{}' for '{}'".format(arch_str, device['name'])) archs.add(arch_str) return list(archs)
[ "def", "cuda_architecture_flags", "(", "device_info", ")", ":", "# Figure out the necessary device architectures", "if", "len", "(", "device_info", "[", "'devices'", "]", ")", "==", "0", ":", "archs", "=", "[", "'--gpu-architecture=sm_30'", "]", "log", ".", "info", "(", "\"No CUDA devices found, defaulting to architecture '{}'\"", ".", "format", "(", "archs", "[", "0", "]", ")", ")", "else", ":", "archs", "=", "set", "(", ")", "for", "device", "in", "device_info", "[", "'devices'", "]", ":", "arch_str", "=", "'--gpu-architecture=sm_{}{}'", ".", "format", "(", "device", "[", "'major'", "]", ",", "device", "[", "'minor'", "]", ")", "log", ".", "info", "(", "\"Using '{}' for '{}'\"", ".", "format", "(", "arch_str", ",", "device", "[", "'name'", "]", ")", ")", "archs", ".", "add", "(", "arch_str", ")", "return", "list", "(", "archs", ")" ]
Emit a list of architecture flags for each CUDA device found ['--gpu-architecture=sm_30', '--gpu-architecture=sm_52']
[ "Emit", "a", "list", "of", "architecture", "flags", "for", "each", "CUDA", "device", "found", "[", "--", "gpu", "-", "architecture", "=", "sm_30", "--", "gpu", "-", "architecture", "=", "sm_52", "]" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/install/tensorflow_ops_ext.py#L63-L80
ska-sa/montblanc
install/tensorflow_ops_ext.py
create_tensorflow_extension
def create_tensorflow_extension(nvcc_settings, device_info): """ Create an extension that builds the custom tensorflow ops """ import tensorflow as tf import glob use_cuda = (bool(nvcc_settings['cuda_available']) and tf.test.is_built_with_cuda()) # Source and includes source_path = os.path.join('montblanc', 'impl', 'rime', 'tensorflow', 'rime_ops') sources = glob.glob(os.path.join(source_path, '*.cpp')) # Header dependencies depends = glob.glob(os.path.join(source_path, '*.h')) # Include directories tf_inc = tf.sysconfig.get_include() include_dirs = [os.path.join('montblanc', 'include'), source_path] include_dirs += [tf_inc, os.path.join(tf_inc, "external", "nsync", "public")] # Libraries library_dirs = [tf.sysconfig.get_lib()] libraries = ['tensorflow_framework'] extra_link_args = ['-fPIC', '-fopenmp', '-g0'] # Macros define_macros = [ ('_MWAITXINTRIN_H_INCLUDED', None), ('_FORCE_INLINES', None), ('_GLIBCXX_USE_CXX11_ABI', 0)] # Common flags flags = ['-std=c++11'] gcc_flags = flags + ['-g0', '-fPIC', '-fopenmp', '-O2'] gcc_flags += ['-march=native', '-mtune=native'] nvcc_flags = flags + [] # Add cuda specific build information, if it is available if use_cuda: # CUDA source files sources += glob.glob(os.path.join(source_path, '*.cu')) # CUDA include directories include_dirs += nvcc_settings['include_dirs'] # CUDA header dependencies depends += glob.glob(os.path.join(source_path, '*.cuh')) # CUDA libraries library_dirs += nvcc_settings['library_dirs'] libraries += nvcc_settings['libraries'] # Flags nvcc_flags += ['-x', 'cu'] nvcc_flags += ['--compiler-options', '"-fPIC"'] # --gpu-architecture=sm_xy flags nvcc_flags += cuda_architecture_flags(device_info) # Ideally this would be set in define_macros, but # this must be set differently for gcc and nvcc nvcc_flags += ['-DGOOGLE_CUDA=%d' % int(use_cuda)] return Extension(tensorflow_extension_name, sources=sources, include_dirs=include_dirs, depends=depends, library_dirs=library_dirs, libraries=libraries, define_macros=define_macros, # this syntax is specific to this build system # we're only going to use certain compiler args with nvcc and not with gcc # the implementation of this trick is in customize_compiler_for_nvcc() above extra_compile_args={ 'gcc': gcc_flags, 'nvcc': nvcc_flags }, extra_link_args=extra_link_args, )
python
def create_tensorflow_extension(nvcc_settings, device_info): """ Create an extension that builds the custom tensorflow ops """ import tensorflow as tf import glob use_cuda = (bool(nvcc_settings['cuda_available']) and tf.test.is_built_with_cuda()) # Source and includes source_path = os.path.join('montblanc', 'impl', 'rime', 'tensorflow', 'rime_ops') sources = glob.glob(os.path.join(source_path, '*.cpp')) # Header dependencies depends = glob.glob(os.path.join(source_path, '*.h')) # Include directories tf_inc = tf.sysconfig.get_include() include_dirs = [os.path.join('montblanc', 'include'), source_path] include_dirs += [tf_inc, os.path.join(tf_inc, "external", "nsync", "public")] # Libraries library_dirs = [tf.sysconfig.get_lib()] libraries = ['tensorflow_framework'] extra_link_args = ['-fPIC', '-fopenmp', '-g0'] # Macros define_macros = [ ('_MWAITXINTRIN_H_INCLUDED', None), ('_FORCE_INLINES', None), ('_GLIBCXX_USE_CXX11_ABI', 0)] # Common flags flags = ['-std=c++11'] gcc_flags = flags + ['-g0', '-fPIC', '-fopenmp', '-O2'] gcc_flags += ['-march=native', '-mtune=native'] nvcc_flags = flags + [] # Add cuda specific build information, if it is available if use_cuda: # CUDA source files sources += glob.glob(os.path.join(source_path, '*.cu')) # CUDA include directories include_dirs += nvcc_settings['include_dirs'] # CUDA header dependencies depends += glob.glob(os.path.join(source_path, '*.cuh')) # CUDA libraries library_dirs += nvcc_settings['library_dirs'] libraries += nvcc_settings['libraries'] # Flags nvcc_flags += ['-x', 'cu'] nvcc_flags += ['--compiler-options', '"-fPIC"'] # --gpu-architecture=sm_xy flags nvcc_flags += cuda_architecture_flags(device_info) # Ideally this would be set in define_macros, but # this must be set differently for gcc and nvcc nvcc_flags += ['-DGOOGLE_CUDA=%d' % int(use_cuda)] return Extension(tensorflow_extension_name, sources=sources, include_dirs=include_dirs, depends=depends, library_dirs=library_dirs, libraries=libraries, define_macros=define_macros, # this syntax is specific to this build system # we're only going to use certain compiler args with nvcc and not with gcc # the implementation of this trick is in customize_compiler_for_nvcc() above extra_compile_args={ 'gcc': gcc_flags, 'nvcc': nvcc_flags }, extra_link_args=extra_link_args, )
[ "def", "create_tensorflow_extension", "(", "nvcc_settings", ",", "device_info", ")", ":", "import", "tensorflow", "as", "tf", "import", "glob", "use_cuda", "=", "(", "bool", "(", "nvcc_settings", "[", "'cuda_available'", "]", ")", "and", "tf", ".", "test", ".", "is_built_with_cuda", "(", ")", ")", "# Source and includes", "source_path", "=", "os", ".", "path", ".", "join", "(", "'montblanc'", ",", "'impl'", ",", "'rime'", ",", "'tensorflow'", ",", "'rime_ops'", ")", "sources", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "source_path", ",", "'*.cpp'", ")", ")", "# Header dependencies", "depends", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "source_path", ",", "'*.h'", ")", ")", "# Include directories", "tf_inc", "=", "tf", ".", "sysconfig", ".", "get_include", "(", ")", "include_dirs", "=", "[", "os", ".", "path", ".", "join", "(", "'montblanc'", ",", "'include'", ")", ",", "source_path", "]", "include_dirs", "+=", "[", "tf_inc", ",", "os", ".", "path", ".", "join", "(", "tf_inc", ",", "\"external\"", ",", "\"nsync\"", ",", "\"public\"", ")", "]", "# Libraries", "library_dirs", "=", "[", "tf", ".", "sysconfig", ".", "get_lib", "(", ")", "]", "libraries", "=", "[", "'tensorflow_framework'", "]", "extra_link_args", "=", "[", "'-fPIC'", ",", "'-fopenmp'", ",", "'-g0'", "]", "# Macros", "define_macros", "=", "[", "(", "'_MWAITXINTRIN_H_INCLUDED'", ",", "None", ")", ",", "(", "'_FORCE_INLINES'", ",", "None", ")", ",", "(", "'_GLIBCXX_USE_CXX11_ABI'", ",", "0", ")", "]", "# Common flags", "flags", "=", "[", "'-std=c++11'", "]", "gcc_flags", "=", "flags", "+", "[", "'-g0'", ",", "'-fPIC'", ",", "'-fopenmp'", ",", "'-O2'", "]", "gcc_flags", "+=", "[", "'-march=native'", ",", "'-mtune=native'", "]", "nvcc_flags", "=", "flags", "+", "[", "]", "# Add cuda specific build information, if it is available", "if", "use_cuda", ":", "# CUDA source files", "sources", "+=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "source_path", ",", "'*.cu'", ")", ")", "# CUDA include directories", "include_dirs", "+=", "nvcc_settings", "[", "'include_dirs'", "]", "# CUDA header dependencies", "depends", "+=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "source_path", ",", "'*.cuh'", ")", ")", "# CUDA libraries", "library_dirs", "+=", "nvcc_settings", "[", "'library_dirs'", "]", "libraries", "+=", "nvcc_settings", "[", "'libraries'", "]", "# Flags", "nvcc_flags", "+=", "[", "'-x'", ",", "'cu'", "]", "nvcc_flags", "+=", "[", "'--compiler-options'", ",", "'\"-fPIC\"'", "]", "# --gpu-architecture=sm_xy flags", "nvcc_flags", "+=", "cuda_architecture_flags", "(", "device_info", ")", "# Ideally this would be set in define_macros, but", "# this must be set differently for gcc and nvcc", "nvcc_flags", "+=", "[", "'-DGOOGLE_CUDA=%d'", "%", "int", "(", "use_cuda", ")", "]", "return", "Extension", "(", "tensorflow_extension_name", ",", "sources", "=", "sources", ",", "include_dirs", "=", "include_dirs", ",", "depends", "=", "depends", ",", "library_dirs", "=", "library_dirs", ",", "libraries", "=", "libraries", ",", "define_macros", "=", "define_macros", ",", "# this syntax is specific to this build system", "# we're only going to use certain compiler args with nvcc and not with gcc", "# the implementation of this trick is in customize_compiler_for_nvcc() above", "extra_compile_args", "=", "{", "'gcc'", ":", "gcc_flags", ",", "'nvcc'", ":", "nvcc_flags", "}", ",", "extra_link_args", "=", "extra_link_args", ",", ")" ]
Create an extension that builds the custom tensorflow ops
[ "Create", "an", "extension", "that", "builds", "the", "custom", "tensorflow", "ops" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/install/tensorflow_ops_ext.py#L82-L152
ska-sa/montblanc
montblanc/examples/standalone.py
CustomSourceProvider.updated_dimensions
def updated_dimensions(self): """ Inform montblanc about dimension sizes """ return [("ntime", args.ntime), # Timesteps ("nchan", args.nchan), # Channels ("na", args.na), # Antenna ("npsrc", len(lm_coords))]
python
def updated_dimensions(self): """ Inform montblanc about dimension sizes """ return [("ntime", args.ntime), # Timesteps ("nchan", args.nchan), # Channels ("na", args.na), # Antenna ("npsrc", len(lm_coords))]
[ "def", "updated_dimensions", "(", "self", ")", ":", "return", "[", "(", "\"ntime\"", ",", "args", ".", "ntime", ")", ",", "# Timesteps", "(", "\"nchan\"", ",", "args", ".", "nchan", ")", ",", "# Channels", "(", "\"na\"", ",", "args", ".", "na", ")", ",", "# Antenna", "(", "\"npsrc\"", ",", "len", "(", "lm_coords", ")", ")", "]" ]
Inform montblanc about dimension sizes
[ "Inform", "montblanc", "about", "dimension", "sizes" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/examples/standalone.py#L45-L50
ska-sa/montblanc
montblanc/examples/standalone.py
CustomSourceProvider.point_lm
def point_lm(self, context): """ Supply point source lm coordinates to montblanc """ # Shape (npsrc, 2) (ls, us), _ = context.array_extents(context.name) return np.asarray(lm_coords[ls:us], dtype=context.dtype)
python
def point_lm(self, context): """ Supply point source lm coordinates to montblanc """ # Shape (npsrc, 2) (ls, us), _ = context.array_extents(context.name) return np.asarray(lm_coords[ls:us], dtype=context.dtype)
[ "def", "point_lm", "(", "self", ",", "context", ")", ":", "# Shape (npsrc, 2)", "(", "ls", ",", "us", ")", ",", "_", "=", "context", ".", "array_extents", "(", "context", ".", "name", ")", "return", "np", ".", "asarray", "(", "lm_coords", "[", "ls", ":", "us", "]", ",", "dtype", "=", "context", ".", "dtype", ")" ]
Supply point source lm coordinates to montblanc
[ "Supply", "point", "source", "lm", "coordinates", "to", "montblanc" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/examples/standalone.py#L52-L57
ska-sa/montblanc
montblanc/examples/standalone.py
CustomSourceProvider.point_stokes
def point_stokes(self, context): """ Supply point source stokes parameters to montblanc """ # Shape (npsrc, ntime, 4) (ls, us), (lt, ut), (l, u) = context.array_extents(context.name) data = np.empty(context.shape, context.dtype) data[ls:us,:,l:u] = np.asarray(lm_stokes)[ls:us,None,:] return data
python
def point_stokes(self, context): """ Supply point source stokes parameters to montblanc """ # Shape (npsrc, ntime, 4) (ls, us), (lt, ut), (l, u) = context.array_extents(context.name) data = np.empty(context.shape, context.dtype) data[ls:us,:,l:u] = np.asarray(lm_stokes)[ls:us,None,:] return data
[ "def", "point_stokes", "(", "self", ",", "context", ")", ":", "# Shape (npsrc, ntime, 4)", "(", "ls", ",", "us", ")", ",", "(", "lt", ",", "ut", ")", ",", "(", "l", ",", "u", ")", "=", "context", ".", "array_extents", "(", "context", ".", "name", ")", "data", "=", "np", ".", "empty", "(", "context", ".", "shape", ",", "context", ".", "dtype", ")", "data", "[", "ls", ":", "us", ",", ":", ",", "l", ":", "u", "]", "=", "np", ".", "asarray", "(", "lm_stokes", ")", "[", "ls", ":", "us", ",", "None", ",", ":", "]", "return", "data" ]
Supply point source stokes parameters to montblanc
[ "Supply", "point", "source", "stokes", "parameters", "to", "montblanc" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/examples/standalone.py#L59-L67
ska-sa/montblanc
montblanc/examples/standalone.py
CustomSourceProvider.uvw
def uvw(self, context): """ Supply UVW antenna coordinates to montblanc """ # Shape (ntime, na, 3) (lt, ut), (la, ua), (l, u) = context.array_extents(context.name) # Create empty UVW coordinates data = np.empty(context.shape, context.dtype) data[:,:,0] = np.arange(la+1, ua+1) # U = antenna index data[:,:,1] = 0 # V = 0 data[:,:,2] = 0 # W = 0 return data
python
def uvw(self, context): """ Supply UVW antenna coordinates to montblanc """ # Shape (ntime, na, 3) (lt, ut), (la, ua), (l, u) = context.array_extents(context.name) # Create empty UVW coordinates data = np.empty(context.shape, context.dtype) data[:,:,0] = np.arange(la+1, ua+1) # U = antenna index data[:,:,1] = 0 # V = 0 data[:,:,2] = 0 # W = 0 return data
[ "def", "uvw", "(", "self", ",", "context", ")", ":", "# Shape (ntime, na, 3)", "(", "lt", ",", "ut", ")", ",", "(", "la", ",", "ua", ")", ",", "(", "l", ",", "u", ")", "=", "context", ".", "array_extents", "(", "context", ".", "name", ")", "# Create empty UVW coordinates", "data", "=", "np", ".", "empty", "(", "context", ".", "shape", ",", "context", ".", "dtype", ")", "data", "[", ":", ",", ":", ",", "0", "]", "=", "np", ".", "arange", "(", "la", "+", "1", ",", "ua", "+", "1", ")", "# U = antenna index", "data", "[", ":", ",", ":", ",", "1", "]", "=", "0", "# V = 0", "data", "[", ":", ",", ":", ",", "2", "]", "=", "0", "# W = 0", "return", "data" ]
Supply UVW antenna coordinates to montblanc
[ "Supply", "UVW", "antenna", "coordinates", "to", "montblanc" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/examples/standalone.py#L69-L81
ska-sa/montblanc
setup.py
reinitialize_command
def reinitialize_command(self, command, reinit_subcommands): """ Monkeypatch distutils.Distribution.reinitialize_command() to match behavior of Distribution.get_command_obj() This fixes a problem where 'pip install -e' does not reinitialise options using the setup(options={...}) variable for the build_ext command. This also effects other option sourcs such as setup.cfg. """ cmd_obj = _DISTUTILS_REINIT(self, command, reinit_subcommands) options = self.command_options.get(command) if options: self._set_command_options(cmd_obj, options) return cmd_obj
python
def reinitialize_command(self, command, reinit_subcommands): """ Monkeypatch distutils.Distribution.reinitialize_command() to match behavior of Distribution.get_command_obj() This fixes a problem where 'pip install -e' does not reinitialise options using the setup(options={...}) variable for the build_ext command. This also effects other option sourcs such as setup.cfg. """ cmd_obj = _DISTUTILS_REINIT(self, command, reinit_subcommands) options = self.command_options.get(command) if options: self._set_command_options(cmd_obj, options) return cmd_obj
[ "def", "reinitialize_command", "(", "self", ",", "command", ",", "reinit_subcommands", ")", ":", "cmd_obj", "=", "_DISTUTILS_REINIT", "(", "self", ",", "command", ",", "reinit_subcommands", ")", "options", "=", "self", ".", "command_options", ".", "get", "(", "command", ")", "if", "options", ":", "self", ".", "_set_command_options", "(", "cmd_obj", ",", "options", ")", "return", "cmd_obj" ]
Monkeypatch distutils.Distribution.reinitialize_command() to match behavior of Distribution.get_command_obj() This fixes a problem where 'pip install -e' does not reinitialise options using the setup(options={...}) variable for the build_ext command. This also effects other option sourcs such as setup.cfg.
[ "Monkeypatch", "distutils", ".", "Distribution", ".", "reinitialize_command", "()", "to", "match", "behavior", "of", "Distribution", ".", "get_command_obj", "()", "This", "fixes", "a", "problem", "where", "pip", "install", "-", "e", "does", "not", "reinitialise", "options", "using", "the", "setup", "(", "options", "=", "{", "...", "}", ")", "variable", "for", "the", "build_ext", "command", ".", "This", "also", "effects", "other", "option", "sourcs", "such", "as", "setup", ".", "cfg", "." ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/setup.py#L64-L79
ska-sa/montblanc
montblanc/util/__init__.py
nr_of_baselines
def nr_of_baselines(na, auto_correlations=False): """ Compute the number of baselines for the given number of antenna. Can specify whether auto-correlations should be taken into account """ m = (na-1) if auto_correlations is False else (na+1) return (na*m)//2
python
def nr_of_baselines(na, auto_correlations=False): """ Compute the number of baselines for the given number of antenna. Can specify whether auto-correlations should be taken into account """ m = (na-1) if auto_correlations is False else (na+1) return (na*m)//2
[ "def", "nr_of_baselines", "(", "na", ",", "auto_correlations", "=", "False", ")", ":", "m", "=", "(", "na", "-", "1", ")", "if", "auto_correlations", "is", "False", "else", "(", "na", "+", "1", ")", "return", "(", "na", "*", "m", ")", "//", "2" ]
Compute the number of baselines for the given number of antenna. Can specify whether auto-correlations should be taken into account
[ "Compute", "the", "number", "of", "baselines", "for", "the", "given", "number", "of", "antenna", ".", "Can", "specify", "whether", "auto", "-", "correlations", "should", "be", "taken", "into", "account" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/util/__init__.py#L43-L51
ska-sa/montblanc
montblanc/util/__init__.py
nr_of_antenna
def nr_of_antenna(nbl, auto_correlations=False): """ Compute the number of antenna for the given number of baselines. Can specify whether auto-correlations should be taken into account """ t = 1 if auto_correlations is False else -1 return int(t + math.sqrt(1 + 8*nbl)) // 2
python
def nr_of_antenna(nbl, auto_correlations=False): """ Compute the number of antenna for the given number of baselines. Can specify whether auto-correlations should be taken into account """ t = 1 if auto_correlations is False else -1 return int(t + math.sqrt(1 + 8*nbl)) // 2
[ "def", "nr_of_antenna", "(", "nbl", ",", "auto_correlations", "=", "False", ")", ":", "t", "=", "1", "if", "auto_correlations", "is", "False", "else", "-", "1", "return", "int", "(", "t", "+", "math", ".", "sqrt", "(", "1", "+", "8", "*", "nbl", ")", ")", "//", "2" ]
Compute the number of antenna for the given number of baselines. Can specify whether auto-correlations should be taken into account
[ "Compute", "the", "number", "of", "antenna", "for", "the", "given", "number", "of", "baselines", ".", "Can", "specify", "whether", "auto", "-", "correlations", "should", "be", "taken", "into", "account" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/util/__init__.py#L53-L61
ska-sa/montblanc
montblanc/util/__init__.py
array_bytes
def array_bytes(shape, dtype): """ Estimates the memory in bytes required for an array of the supplied shape and dtype """ return np.product(shape)*np.dtype(dtype).itemsize
python
def array_bytes(shape, dtype): """ Estimates the memory in bytes required for an array of the supplied shape and dtype """ return np.product(shape)*np.dtype(dtype).itemsize
[ "def", "array_bytes", "(", "shape", ",", "dtype", ")", ":", "return", "np", ".", "product", "(", "shape", ")", "*", "np", ".", "dtype", "(", "dtype", ")", ".", "itemsize" ]
Estimates the memory in bytes required for an array of the supplied shape and dtype
[ "Estimates", "the", "memory", "in", "bytes", "required", "for", "an", "array", "of", "the", "supplied", "shape", "and", "dtype" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/util/__init__.py#L79-L81
ska-sa/montblanc
montblanc/util/__init__.py
random_like
def random_like(ary=None, shape=None, dtype=None): """ Returns a random array of the same shape and type as the supplied array argument, or the supplied shape and dtype """ if ary is not None: shape, dtype = ary.shape, ary.dtype elif shape is None or dtype is None: raise ValueError(( 'random_like(ary, shape, dtype) must be supplied ' 'with either an array argument, or the shape and dtype ' 'of the desired random array.')) if np.issubdtype(dtype, np.complexfloating): return (np.random.random(size=shape) + \ np.random.random(size=shape)*1j).astype(dtype) else: return np.random.random(size=shape).astype(dtype)
python
def random_like(ary=None, shape=None, dtype=None): """ Returns a random array of the same shape and type as the supplied array argument, or the supplied shape and dtype """ if ary is not None: shape, dtype = ary.shape, ary.dtype elif shape is None or dtype is None: raise ValueError(( 'random_like(ary, shape, dtype) must be supplied ' 'with either an array argument, or the shape and dtype ' 'of the desired random array.')) if np.issubdtype(dtype, np.complexfloating): return (np.random.random(size=shape) + \ np.random.random(size=shape)*1j).astype(dtype) else: return np.random.random(size=shape).astype(dtype)
[ "def", "random_like", "(", "ary", "=", "None", ",", "shape", "=", "None", ",", "dtype", "=", "None", ")", ":", "if", "ary", "is", "not", "None", ":", "shape", ",", "dtype", "=", "ary", ".", "shape", ",", "ary", ".", "dtype", "elif", "shape", "is", "None", "or", "dtype", "is", "None", ":", "raise", "ValueError", "(", "(", "'random_like(ary, shape, dtype) must be supplied '", "'with either an array argument, or the shape and dtype '", "'of the desired random array.'", ")", ")", "if", "np", ".", "issubdtype", "(", "dtype", ",", "np", ".", "complexfloating", ")", ":", "return", "(", "np", ".", "random", ".", "random", "(", "size", "=", "shape", ")", "+", "np", ".", "random", ".", "random", "(", "size", "=", "shape", ")", "*", "1j", ")", ".", "astype", "(", "dtype", ")", "else", ":", "return", "np", ".", "random", ".", "random", "(", "size", "=", "shape", ")", ".", "astype", "(", "dtype", ")" ]
Returns a random array of the same shape and type as the supplied array argument, or the supplied shape and dtype
[ "Returns", "a", "random", "array", "of", "the", "same", "shape", "and", "type", "as", "the", "supplied", "array", "argument", "or", "the", "supplied", "shape", "and", "dtype" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/util/__init__.py#L96-L113
ska-sa/montblanc
montblanc/util/__init__.py
flatten
def flatten(nested): """ Return a flatten version of the nested argument """ flat_return = list() def __inner_flat(nested,flat): for i in nested: __inner_flat(i, flat) if isinstance(i, list) else flat.append(i) return flat __inner_flat(nested,flat_return) return flat_return
python
def flatten(nested): """ Return a flatten version of the nested argument """ flat_return = list() def __inner_flat(nested,flat): for i in nested: __inner_flat(i, flat) if isinstance(i, list) else flat.append(i) return flat __inner_flat(nested,flat_return) return flat_return
[ "def", "flatten", "(", "nested", ")", ":", "flat_return", "=", "list", "(", ")", "def", "__inner_flat", "(", "nested", ",", "flat", ")", ":", "for", "i", "in", "nested", ":", "__inner_flat", "(", "i", ",", "flat", ")", "if", "isinstance", "(", "i", ",", "list", ")", "else", "flat", ".", "append", "(", "i", ")", "return", "flat", "__inner_flat", "(", "nested", ",", "flat_return", ")", "return", "flat_return" ]
Return a flatten version of the nested argument
[ "Return", "a", "flatten", "version", "of", "the", "nested", "argument" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/util/__init__.py#L115-L126
ska-sa/montblanc
montblanc/util/__init__.py
dict_array_bytes
def dict_array_bytes(ary, template): """ Return the number of bytes required by an array Arguments --------------- ary : dict Dictionary representation of an array template : dict A dictionary of key-values, used to replace any string values in the array with concrete integral values Returns ----------- The number of bytes required to represent the array. """ shape = shape_from_str_tuple(ary['shape'], template) dtype = dtype_from_str(ary['dtype'], template) return array_bytes(shape, dtype)
python
def dict_array_bytes(ary, template): """ Return the number of bytes required by an array Arguments --------------- ary : dict Dictionary representation of an array template : dict A dictionary of key-values, used to replace any string values in the array with concrete integral values Returns ----------- The number of bytes required to represent the array. """ shape = shape_from_str_tuple(ary['shape'], template) dtype = dtype_from_str(ary['dtype'], template) return array_bytes(shape, dtype)
[ "def", "dict_array_bytes", "(", "ary", ",", "template", ")", ":", "shape", "=", "shape_from_str_tuple", "(", "ary", "[", "'shape'", "]", ",", "template", ")", "dtype", "=", "dtype_from_str", "(", "ary", "[", "'dtype'", "]", ",", "template", ")", "return", "array_bytes", "(", "shape", ",", "dtype", ")" ]
Return the number of bytes required by an array Arguments --------------- ary : dict Dictionary representation of an array template : dict A dictionary of key-values, used to replace any string values in the array with concrete integral values Returns ----------- The number of bytes required to represent the array.
[ "Return", "the", "number", "of", "bytes", "required", "by", "an", "array" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/util/__init__.py#L128-L149
ska-sa/montblanc
montblanc/util/__init__.py
dict_array_bytes_required
def dict_array_bytes_required(arrays, template): """ Return the number of bytes required by a dictionary of arrays. Arguments --------------- arrays : list A list of dictionaries defining the arrays template : dict A dictionary of key-values, used to replace any string values in the arrays with concrete integral values Returns ----------- The number of bytes required to represent all the arrays. """ return np.sum([dict_array_bytes(ary, template) for ary in arrays])
python
def dict_array_bytes_required(arrays, template): """ Return the number of bytes required by a dictionary of arrays. Arguments --------------- arrays : list A list of dictionaries defining the arrays template : dict A dictionary of key-values, used to replace any string values in the arrays with concrete integral values Returns ----------- The number of bytes required to represent all the arrays. """ return np.sum([dict_array_bytes(ary, template) for ary in arrays])
[ "def", "dict_array_bytes_required", "(", "arrays", ",", "template", ")", ":", "return", "np", ".", "sum", "(", "[", "dict_array_bytes", "(", "ary", ",", "template", ")", "for", "ary", "in", "arrays", "]", ")" ]
Return the number of bytes required by a dictionary of arrays. Arguments --------------- arrays : list A list of dictionaries defining the arrays template : dict A dictionary of key-values, used to replace any string values in the arrays with concrete integral values Returns ----------- The number of bytes required to represent all the arrays.
[ "Return", "the", "number", "of", "bytes", "required", "by", "a", "dictionary", "of", "arrays", "." ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/util/__init__.py#L151-L171
ska-sa/montblanc
montblanc/util/__init__.py
viable_dim_config
def viable_dim_config(bytes_available, arrays, template, dim_ord, nsolvers=1): """ Returns the number of timesteps possible, given the registered arrays and a memory budget defined by bytes_available Arguments ---------------- bytes_available : int The memory budget, or available number of bytes for solving the problem. arrays : list List of dictionaries describing the arrays template : dict Dictionary containing key-values that will be used to replace any string representations of dimensions and types. slvr.template_dict() will return something suitable. dim_ord : list list of dimension string names that the problem should be subdivided by. e.g. ['ntime', 'nbl', 'nchan']. Multple dimensions can be reduced simultaneously using the following syntax 'nbl&na'. This is mostly useful for the baseline-antenna equivalence. nsolvers : int Number of solvers to budget for. Defaults to one. Returns ---------- A tuple (boolean, dict). The boolean is True if the problem can fit within the supplied budget, False otherwise. THe dictionary contains the reduced dimensions as key and the reduced size as value. e.g. (True, { 'time' : 1, 'nbl' : 1 }) For a dim_ord = ['ntime', 'nbl', 'nchan'], this method will try and fit a ntime x nbl x nchan problem into the available number of bytes. If this is not possible, it will first set ntime=1, and then try fit an 1 x nbl x nchan problem into the budget, then a 1 x 1 x nchan problem. One can specify reductions for specific dimensions. For e.g. ['ntime=20', 'nbl=1&na=2', 'nchan=50%'] will reduce ntime to 20, but no lower. nbl=1&na=2 sets both nbl and na to 1 and 2 in the same operation respectively. nchan=50\% will continuously halve the nchan dimension until it reaches a value of 1. """ if not isinstance(dim_ord, list): raise TypeError('dim_ord should be a list') # Don't accept non-negative memory budgets if bytes_available < 0: bytes_available = 0 modified_dims = {} T = template.copy() bytes_used = dict_array_bytes_required(arrays, T)*nsolvers # While more bytes are used than are available, set # dimensions to one in the order specified by the # dim_ord argument. while bytes_used > bytes_available: try: dims = dim_ord.pop(0) montblanc.log.debug('Applying reduction {s}. ' 'Bytes available: {a} used: {u}'.format( s=dims, a=fmt_bytes(bytes_available), u=fmt_bytes(bytes_used))) dims = dims.strip().split('&') except IndexError: # No more dimensions available for reducing # the problem size. Unable to fit the problem # within the specified memory budget return False, modified_dims # Can't fit everything into memory, # Lower dimensions and re-evaluate for dim in dims: match = re.match(__DIM_REDUCTION_RE, dim) if not match: raise ValueError( "{d} is an invalid dimension reduction string " "Valid strings are for e.g. " "'ntime', 'ntime=20' or 'ntime=20%'" .format(d=dim)) dim_name = match.group('name') dim_value = match.group('value') dim_percent = match.group('percent') dim_value = 1 if dim_value is None else int(dim_value) # Attempt reduction by a percentage if dim_percent == '%': dim_value = int(T[dim_name] * int(dim_value) / 100.0) if dim_value < 1: # This can't be reduced any further dim_value = 1 else: # Allows another attempt at reduction # by percentage on this dimension dim_ord.insert(0, dim) # Apply the dimension reduction if T[dim_name] > dim_value: modified_dims[dim_name] = dim_value T[dim_name] = dim_value else: montblanc.log.info(('Ignored reduction of {d} ' 'of size {s} to {v}. ').format( d=dim_name, s=T[dim_name], v=dim_value)) bytes_used = dict_array_bytes_required(arrays, T)*nsolvers return True, modified_dims
python
def viable_dim_config(bytes_available, arrays, template, dim_ord, nsolvers=1): """ Returns the number of timesteps possible, given the registered arrays and a memory budget defined by bytes_available Arguments ---------------- bytes_available : int The memory budget, or available number of bytes for solving the problem. arrays : list List of dictionaries describing the arrays template : dict Dictionary containing key-values that will be used to replace any string representations of dimensions and types. slvr.template_dict() will return something suitable. dim_ord : list list of dimension string names that the problem should be subdivided by. e.g. ['ntime', 'nbl', 'nchan']. Multple dimensions can be reduced simultaneously using the following syntax 'nbl&na'. This is mostly useful for the baseline-antenna equivalence. nsolvers : int Number of solvers to budget for. Defaults to one. Returns ---------- A tuple (boolean, dict). The boolean is True if the problem can fit within the supplied budget, False otherwise. THe dictionary contains the reduced dimensions as key and the reduced size as value. e.g. (True, { 'time' : 1, 'nbl' : 1 }) For a dim_ord = ['ntime', 'nbl', 'nchan'], this method will try and fit a ntime x nbl x nchan problem into the available number of bytes. If this is not possible, it will first set ntime=1, and then try fit an 1 x nbl x nchan problem into the budget, then a 1 x 1 x nchan problem. One can specify reductions for specific dimensions. For e.g. ['ntime=20', 'nbl=1&na=2', 'nchan=50%'] will reduce ntime to 20, but no lower. nbl=1&na=2 sets both nbl and na to 1 and 2 in the same operation respectively. nchan=50\% will continuously halve the nchan dimension until it reaches a value of 1. """ if not isinstance(dim_ord, list): raise TypeError('dim_ord should be a list') # Don't accept non-negative memory budgets if bytes_available < 0: bytes_available = 0 modified_dims = {} T = template.copy() bytes_used = dict_array_bytes_required(arrays, T)*nsolvers # While more bytes are used than are available, set # dimensions to one in the order specified by the # dim_ord argument. while bytes_used > bytes_available: try: dims = dim_ord.pop(0) montblanc.log.debug('Applying reduction {s}. ' 'Bytes available: {a} used: {u}'.format( s=dims, a=fmt_bytes(bytes_available), u=fmt_bytes(bytes_used))) dims = dims.strip().split('&') except IndexError: # No more dimensions available for reducing # the problem size. Unable to fit the problem # within the specified memory budget return False, modified_dims # Can't fit everything into memory, # Lower dimensions and re-evaluate for dim in dims: match = re.match(__DIM_REDUCTION_RE, dim) if not match: raise ValueError( "{d} is an invalid dimension reduction string " "Valid strings are for e.g. " "'ntime', 'ntime=20' or 'ntime=20%'" .format(d=dim)) dim_name = match.group('name') dim_value = match.group('value') dim_percent = match.group('percent') dim_value = 1 if dim_value is None else int(dim_value) # Attempt reduction by a percentage if dim_percent == '%': dim_value = int(T[dim_name] * int(dim_value) / 100.0) if dim_value < 1: # This can't be reduced any further dim_value = 1 else: # Allows another attempt at reduction # by percentage on this dimension dim_ord.insert(0, dim) # Apply the dimension reduction if T[dim_name] > dim_value: modified_dims[dim_name] = dim_value T[dim_name] = dim_value else: montblanc.log.info(('Ignored reduction of {d} ' 'of size {s} to {v}. ').format( d=dim_name, s=T[dim_name], v=dim_value)) bytes_used = dict_array_bytes_required(arrays, T)*nsolvers return True, modified_dims
[ "def", "viable_dim_config", "(", "bytes_available", ",", "arrays", ",", "template", ",", "dim_ord", ",", "nsolvers", "=", "1", ")", ":", "if", "not", "isinstance", "(", "dim_ord", ",", "list", ")", ":", "raise", "TypeError", "(", "'dim_ord should be a list'", ")", "# Don't accept non-negative memory budgets", "if", "bytes_available", "<", "0", ":", "bytes_available", "=", "0", "modified_dims", "=", "{", "}", "T", "=", "template", ".", "copy", "(", ")", "bytes_used", "=", "dict_array_bytes_required", "(", "arrays", ",", "T", ")", "*", "nsolvers", "# While more bytes are used than are available, set", "# dimensions to one in the order specified by the", "# dim_ord argument.", "while", "bytes_used", ">", "bytes_available", ":", "try", ":", "dims", "=", "dim_ord", ".", "pop", "(", "0", ")", "montblanc", ".", "log", ".", "debug", "(", "'Applying reduction {s}. '", "'Bytes available: {a} used: {u}'", ".", "format", "(", "s", "=", "dims", ",", "a", "=", "fmt_bytes", "(", "bytes_available", ")", ",", "u", "=", "fmt_bytes", "(", "bytes_used", ")", ")", ")", "dims", "=", "dims", ".", "strip", "(", ")", ".", "split", "(", "'&'", ")", "except", "IndexError", ":", "# No more dimensions available for reducing", "# the problem size. Unable to fit the problem", "# within the specified memory budget", "return", "False", ",", "modified_dims", "# Can't fit everything into memory,", "# Lower dimensions and re-evaluate", "for", "dim", "in", "dims", ":", "match", "=", "re", ".", "match", "(", "__DIM_REDUCTION_RE", ",", "dim", ")", "if", "not", "match", ":", "raise", "ValueError", "(", "\"{d} is an invalid dimension reduction string \"", "\"Valid strings are for e.g. \"", "\"'ntime', 'ntime=20' or 'ntime=20%'\"", ".", "format", "(", "d", "=", "dim", ")", ")", "dim_name", "=", "match", ".", "group", "(", "'name'", ")", "dim_value", "=", "match", ".", "group", "(", "'value'", ")", "dim_percent", "=", "match", ".", "group", "(", "'percent'", ")", "dim_value", "=", "1", "if", "dim_value", "is", "None", "else", "int", "(", "dim_value", ")", "# Attempt reduction by a percentage", "if", "dim_percent", "==", "'%'", ":", "dim_value", "=", "int", "(", "T", "[", "dim_name", "]", "*", "int", "(", "dim_value", ")", "/", "100.0", ")", "if", "dim_value", "<", "1", ":", "# This can't be reduced any further", "dim_value", "=", "1", "else", ":", "# Allows another attempt at reduction", "# by percentage on this dimension", "dim_ord", ".", "insert", "(", "0", ",", "dim", ")", "# Apply the dimension reduction", "if", "T", "[", "dim_name", "]", ">", "dim_value", ":", "modified_dims", "[", "dim_name", "]", "=", "dim_value", "T", "[", "dim_name", "]", "=", "dim_value", "else", ":", "montblanc", ".", "log", ".", "info", "(", "(", "'Ignored reduction of {d} '", "'of size {s} to {v}. '", ")", ".", "format", "(", "d", "=", "dim_name", ",", "s", "=", "T", "[", "dim_name", "]", ",", "v", "=", "dim_value", ")", ")", "bytes_used", "=", "dict_array_bytes_required", "(", "arrays", ",", "T", ")", "*", "nsolvers", "return", "True", ",", "modified_dims" ]
Returns the number of timesteps possible, given the registered arrays and a memory budget defined by bytes_available Arguments ---------------- bytes_available : int The memory budget, or available number of bytes for solving the problem. arrays : list List of dictionaries describing the arrays template : dict Dictionary containing key-values that will be used to replace any string representations of dimensions and types. slvr.template_dict() will return something suitable. dim_ord : list list of dimension string names that the problem should be subdivided by. e.g. ['ntime', 'nbl', 'nchan']. Multple dimensions can be reduced simultaneously using the following syntax 'nbl&na'. This is mostly useful for the baseline-antenna equivalence. nsolvers : int Number of solvers to budget for. Defaults to one. Returns ---------- A tuple (boolean, dict). The boolean is True if the problem can fit within the supplied budget, False otherwise. THe dictionary contains the reduced dimensions as key and the reduced size as value. e.g. (True, { 'time' : 1, 'nbl' : 1 }) For a dim_ord = ['ntime', 'nbl', 'nchan'], this method will try and fit a ntime x nbl x nchan problem into the available number of bytes. If this is not possible, it will first set ntime=1, and then try fit an 1 x nbl x nchan problem into the budget, then a 1 x 1 x nchan problem. One can specify reductions for specific dimensions. For e.g. ['ntime=20', 'nbl=1&na=2', 'nchan=50%'] will reduce ntime to 20, but no lower. nbl=1&na=2 sets both nbl and na to 1 and 2 in the same operation respectively. nchan=50\% will continuously halve the nchan dimension until it reaches a value of 1.
[ "Returns", "the", "number", "of", "timesteps", "possible", "given", "the", "registered", "arrays", "and", "a", "memory", "budget", "defined", "by", "bytes_available" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/util/__init__.py#L180-L299
ska-sa/montblanc
montblanc/util/__init__.py
shape_from_str_tuple
def shape_from_str_tuple(sshape, variables, ignore=None): """ Substitutes string values in the supplied shape parameter with integer variables stored in a dictionary Parameters ---------- sshape : tuple/string composed of integers and strings. The strings should related to integral properties registered with this Solver object variables : dictionary Keys with associated integer values. Used to replace string values within the tuple ignore : list A list of tuple strings to ignore >>> print self.shape_from_str_tuple((4,'na','ntime'),ignore=['ntime']) (4, 3) """ if ignore is None: ignore = [] if not isinstance(sshape, tuple) and not isinstance(sshape, list): raise TypeError, 'sshape argument must be a tuple or list' if not isinstance(ignore, list): raise TypeError, 'ignore argument must be a list' return tuple([int(eval_expr(v,variables)) if isinstance(v,str) else int(v) for v in sshape if v not in ignore])
python
def shape_from_str_tuple(sshape, variables, ignore=None): """ Substitutes string values in the supplied shape parameter with integer variables stored in a dictionary Parameters ---------- sshape : tuple/string composed of integers and strings. The strings should related to integral properties registered with this Solver object variables : dictionary Keys with associated integer values. Used to replace string values within the tuple ignore : list A list of tuple strings to ignore >>> print self.shape_from_str_tuple((4,'na','ntime'),ignore=['ntime']) (4, 3) """ if ignore is None: ignore = [] if not isinstance(sshape, tuple) and not isinstance(sshape, list): raise TypeError, 'sshape argument must be a tuple or list' if not isinstance(ignore, list): raise TypeError, 'ignore argument must be a list' return tuple([int(eval_expr(v,variables)) if isinstance(v,str) else int(v) for v in sshape if v not in ignore])
[ "def", "shape_from_str_tuple", "(", "sshape", ",", "variables", ",", "ignore", "=", "None", ")", ":", "if", "ignore", "is", "None", ":", "ignore", "=", "[", "]", "if", "not", "isinstance", "(", "sshape", ",", "tuple", ")", "and", "not", "isinstance", "(", "sshape", ",", "list", ")", ":", "raise", "TypeError", ",", "'sshape argument must be a tuple or list'", "if", "not", "isinstance", "(", "ignore", ",", "list", ")", ":", "raise", "TypeError", ",", "'ignore argument must be a list'", "return", "tuple", "(", "[", "int", "(", "eval_expr", "(", "v", ",", "variables", ")", ")", "if", "isinstance", "(", "v", ",", "str", ")", "else", "int", "(", "v", ")", "for", "v", "in", "sshape", "if", "v", "not", "in", "ignore", "]", ")" ]
Substitutes string values in the supplied shape parameter with integer variables stored in a dictionary Parameters ---------- sshape : tuple/string composed of integers and strings. The strings should related to integral properties registered with this Solver object variables : dictionary Keys with associated integer values. Used to replace string values within the tuple ignore : list A list of tuple strings to ignore >>> print self.shape_from_str_tuple((4,'na','ntime'),ignore=['ntime']) (4, 3)
[ "Substitutes", "string", "values", "in", "the", "supplied", "shape", "parameter", "with", "integer", "variables", "stored", "in", "a", "dictionary" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/util/__init__.py#L324-L352
ska-sa/montblanc
montblanc/util/__init__.py
shape_list
def shape_list(l,shape,dtype): """ Shape a list of lists into the appropriate shape and data type """ return np.array(l, dtype=dtype).reshape(shape)
python
def shape_list(l,shape,dtype): """ Shape a list of lists into the appropriate shape and data type """ return np.array(l, dtype=dtype).reshape(shape)
[ "def", "shape_list", "(", "l", ",", "shape", ",", "dtype", ")", ":", "return", "np", ".", "array", "(", "l", ",", "dtype", "=", "dtype", ")", ".", "reshape", "(", "shape", ")" ]
Shape a list of lists into the appropriate shape and data type
[ "Shape", "a", "list", "of", "lists", "into", "the", "appropriate", "shape", "and", "data", "type" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/util/__init__.py#L354-L356
ska-sa/montblanc
montblanc/util/__init__.py
array_convert_function
def array_convert_function(sshape_one, sshape_two, variables): """ Return a function defining the conversion process between two NumPy arrays of different shapes """ if not isinstance(sshape_one, tuple): sshape_one = (sshape_one,) if not isinstance(sshape_two, tuple): sshape_two = (sshape_two,) s_one = flatten([eval_expr_names_and_nrs(d) if isinstance(d,str) else d for d in sshape_one]) s_two = flatten([eval_expr_names_and_nrs(d) if isinstance(d,str) else d for d in sshape_two]) if len(s_one) != len(s_two): raise ValueError, ('Flattened shapes %s and %s '\ 'do not have the same length. ' 'Original shapes were %s and %s') % \ (s_one, s_two, sshape_one, sshape_two) # Reason about the transpose t_idx = tuple([s_one.index(v) for v in s_two]) # Figure out the actual numeric shape values to use n_one = shape_from_str_tuple(s_one, variables) n_two = [eval_expr(d,variables) if isinstance(d,str) else d for d in sshape_two] def f(ary): return np.reshape(ary, n_one).transpose(t_idx).reshape(n_two) return f
python
def array_convert_function(sshape_one, sshape_two, variables): """ Return a function defining the conversion process between two NumPy arrays of different shapes """ if not isinstance(sshape_one, tuple): sshape_one = (sshape_one,) if not isinstance(sshape_two, tuple): sshape_two = (sshape_two,) s_one = flatten([eval_expr_names_and_nrs(d) if isinstance(d,str) else d for d in sshape_one]) s_two = flatten([eval_expr_names_and_nrs(d) if isinstance(d,str) else d for d in sshape_two]) if len(s_one) != len(s_two): raise ValueError, ('Flattened shapes %s and %s '\ 'do not have the same length. ' 'Original shapes were %s and %s') % \ (s_one, s_two, sshape_one, sshape_two) # Reason about the transpose t_idx = tuple([s_one.index(v) for v in s_two]) # Figure out the actual numeric shape values to use n_one = shape_from_str_tuple(s_one, variables) n_two = [eval_expr(d,variables) if isinstance(d,str) else d for d in sshape_two] def f(ary): return np.reshape(ary, n_one).transpose(t_idx).reshape(n_two) return f
[ "def", "array_convert_function", "(", "sshape_one", ",", "sshape_two", ",", "variables", ")", ":", "if", "not", "isinstance", "(", "sshape_one", ",", "tuple", ")", ":", "sshape_one", "=", "(", "sshape_one", ",", ")", "if", "not", "isinstance", "(", "sshape_two", ",", "tuple", ")", ":", "sshape_two", "=", "(", "sshape_two", ",", ")", "s_one", "=", "flatten", "(", "[", "eval_expr_names_and_nrs", "(", "d", ")", "if", "isinstance", "(", "d", ",", "str", ")", "else", "d", "for", "d", "in", "sshape_one", "]", ")", "s_two", "=", "flatten", "(", "[", "eval_expr_names_and_nrs", "(", "d", ")", "if", "isinstance", "(", "d", ",", "str", ")", "else", "d", "for", "d", "in", "sshape_two", "]", ")", "if", "len", "(", "s_one", ")", "!=", "len", "(", "s_two", ")", ":", "raise", "ValueError", ",", "(", "'Flattened shapes %s and %s '", "'do not have the same length. '", "'Original shapes were %s and %s'", ")", "%", "(", "s_one", ",", "s_two", ",", "sshape_one", ",", "sshape_two", ")", "# Reason about the transpose", "t_idx", "=", "tuple", "(", "[", "s_one", ".", "index", "(", "v", ")", "for", "v", "in", "s_two", "]", ")", "# Figure out the actual numeric shape values to use", "n_one", "=", "shape_from_str_tuple", "(", "s_one", ",", "variables", ")", "n_two", "=", "[", "eval_expr", "(", "d", ",", "variables", ")", "if", "isinstance", "(", "d", ",", "str", ")", "else", "d", "for", "d", "in", "sshape_two", "]", "def", "f", "(", "ary", ")", ":", "return", "np", ".", "reshape", "(", "ary", ",", "n_one", ")", ".", "transpose", "(", "t_idx", ")", ".", "reshape", "(", "n_two", ")", "return", "f" ]
Return a function defining the conversion process between two NumPy arrays of different shapes
[ "Return", "a", "function", "defining", "the", "conversion", "process", "between", "two", "NumPy", "arrays", "of", "different", "shapes" ]
train
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/util/__init__.py#L358-L385