id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
233,800
SFDO-Tooling/CumulusCI
cumulusci/core/flowrunner.py
FlowCoordinator.resolve_return_value_options
def resolve_return_value_options(self, options): """Handle dynamic option value lookups in the format ^^task_name.attr""" for key, value in options.items(): if isinstance(value, str) and value.startswith(RETURN_VALUE_OPTION_PREFIX): path, name = value[len(RETURN_VALUE_OPTION_PREFIX) :].rsplit(".", 1) result = self._find_result_by_path(path) options[key] = result.return_values.get(name)
python
def resolve_return_value_options(self, options): """Handle dynamic option value lookups in the format ^^task_name.attr""" for key, value in options.items(): if isinstance(value, str) and value.startswith(RETURN_VALUE_OPTION_PREFIX): path, name = value[len(RETURN_VALUE_OPTION_PREFIX) :].rsplit(".", 1) result = self._find_result_by_path(path) options[key] = result.return_values.get(name)
[ "def", "resolve_return_value_options", "(", "self", ",", "options", ")", ":", "for", "key", ",", "value", "in", "options", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "str", ")", "and", "value", ".", "startswith", "(", "RETURN_VALUE_OPTION_PREFIX", ")", ":", "path", ",", "name", "=", "value", "[", "len", "(", "RETURN_VALUE_OPTION_PREFIX", ")", ":", "]", ".", "rsplit", "(", "\".\"", ",", "1", ")", "result", "=", "self", ".", "_find_result_by_path", "(", "path", ")", "options", "[", "key", "]", "=", "result", ".", "return_values", ".", "get", "(", "name", ")" ]
Handle dynamic option value lookups in the format ^^task_name.attr
[ "Handle", "dynamic", "option", "value", "lookups", "in", "the", "format", "^^task_name", ".", "attr" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/flowrunner.py#L544-L550
233,801
SFDO-Tooling/CumulusCI
cumulusci/cli/logger.py
init_logger
def init_logger(log_requests=False): """ Initialize the logger """ logger = logging.getLogger(__name__.split(".")[0]) for handler in logger.handlers: # pragma: nocover logger.removeHandler(handler) formatter = coloredlogs.ColoredFormatter(fmt="%(asctime)s: %(message)s") handler = logging.StreamHandler() handler.setLevel(logging.DEBUG) handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.DEBUG) logger.propagate = False if log_requests: requests.packages.urllib3.add_stderr_logger()
python
def init_logger(log_requests=False): """ Initialize the logger """ logger = logging.getLogger(__name__.split(".")[0]) for handler in logger.handlers: # pragma: nocover logger.removeHandler(handler) formatter = coloredlogs.ColoredFormatter(fmt="%(asctime)s: %(message)s") handler = logging.StreamHandler() handler.setLevel(logging.DEBUG) handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.DEBUG) logger.propagate = False if log_requests: requests.packages.urllib3.add_stderr_logger()
[ "def", "init_logger", "(", "log_requests", "=", "False", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ".", "split", "(", "\".\"", ")", "[", "0", "]", ")", "for", "handler", "in", "logger", ".", "handlers", ":", "# pragma: nocover", "logger", ".", "removeHandler", "(", "handler", ")", "formatter", "=", "coloredlogs", ".", "ColoredFormatter", "(", "fmt", "=", "\"%(asctime)s: %(message)s\"", ")", "handler", "=", "logging", ".", "StreamHandler", "(", ")", "handler", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "handler", ".", "setFormatter", "(", "formatter", ")", "logger", ".", "addHandler", "(", "handler", ")", "logger", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "logger", ".", "propagate", "=", "False", "if", "log_requests", ":", "requests", ".", "packages", ".", "urllib3", ".", "add_stderr_logger", "(", ")" ]
Initialize the logger
[ "Initialize", "the", "logger" ]
e19047921ca771a297e045f22f0bb201651bb6f7
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/cli/logger.py#L10-L26
233,802
johnbywater/eventsourcing
eventsourcing/contrib/suffixtrees/domain/model/suffixtree.py
register_new_node
def register_new_node(suffix_node_id=None): """Factory method, registers new node. """ node_id = uuid4() event = Node.Created(originator_id=node_id, suffix_node_id=suffix_node_id) entity = Node.mutate(event=event) publish(event) return entity
python
def register_new_node(suffix_node_id=None): """Factory method, registers new node. """ node_id = uuid4() event = Node.Created(originator_id=node_id, suffix_node_id=suffix_node_id) entity = Node.mutate(event=event) publish(event) return entity
[ "def", "register_new_node", "(", "suffix_node_id", "=", "None", ")", ":", "node_id", "=", "uuid4", "(", ")", "event", "=", "Node", ".", "Created", "(", "originator_id", "=", "node_id", ",", "suffix_node_id", "=", "suffix_node_id", ")", "entity", "=", "Node", ".", "mutate", "(", "event", "=", "event", ")", "publish", "(", "event", ")", "return", "entity" ]
Factory method, registers new node.
[ "Factory", "method", "registers", "new", "node", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/contrib/suffixtrees/domain/model/suffixtree.py#L318-L325
233,803
johnbywater/eventsourcing
eventsourcing/contrib/suffixtrees/domain/model/suffixtree.py
register_new_edge
def register_new_edge(edge_id, first_char_index, last_char_index, source_node_id, dest_node_id): """Factory method, registers new edge. """ event = Edge.Created( originator_id=edge_id, first_char_index=first_char_index, last_char_index=last_char_index, source_node_id=source_node_id, dest_node_id=dest_node_id, ) entity = Edge.mutate(event=event) publish(event) return entity
python
def register_new_edge(edge_id, first_char_index, last_char_index, source_node_id, dest_node_id): """Factory method, registers new edge. """ event = Edge.Created( originator_id=edge_id, first_char_index=first_char_index, last_char_index=last_char_index, source_node_id=source_node_id, dest_node_id=dest_node_id, ) entity = Edge.mutate(event=event) publish(event) return entity
[ "def", "register_new_edge", "(", "edge_id", ",", "first_char_index", ",", "last_char_index", ",", "source_node_id", ",", "dest_node_id", ")", ":", "event", "=", "Edge", ".", "Created", "(", "originator_id", "=", "edge_id", ",", "first_char_index", "=", "first_char_index", ",", "last_char_index", "=", "last_char_index", ",", "source_node_id", "=", "source_node_id", ",", "dest_node_id", "=", "dest_node_id", ",", ")", "entity", "=", "Edge", ".", "mutate", "(", "event", "=", "event", ")", "publish", "(", "event", ")", "return", "entity" ]
Factory method, registers new edge.
[ "Factory", "method", "registers", "new", "edge", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/contrib/suffixtrees/domain/model/suffixtree.py#L334-L346
233,804
johnbywater/eventsourcing
eventsourcing/contrib/suffixtrees/domain/model/suffixtree.py
register_new_suffix_tree
def register_new_suffix_tree(case_insensitive=False): """Factory method, returns new suffix tree object. """ assert isinstance(case_insensitive, bool) root_node = register_new_node() suffix_tree_id = uuid4() event = SuffixTree.Created( originator_id=suffix_tree_id, root_node_id=root_node.id, case_insensitive=case_insensitive, ) entity = SuffixTree.mutate(event=event) assert isinstance(entity, SuffixTree) entity.nodes[root_node.id] = root_node publish(event) return entity
python
def register_new_suffix_tree(case_insensitive=False): """Factory method, returns new suffix tree object. """ assert isinstance(case_insensitive, bool) root_node = register_new_node() suffix_tree_id = uuid4() event = SuffixTree.Created( originator_id=suffix_tree_id, root_node_id=root_node.id, case_insensitive=case_insensitive, ) entity = SuffixTree.mutate(event=event) assert isinstance(entity, SuffixTree) entity.nodes[root_node.id] = root_node publish(event) return entity
[ "def", "register_new_suffix_tree", "(", "case_insensitive", "=", "False", ")", ":", "assert", "isinstance", "(", "case_insensitive", ",", "bool", ")", "root_node", "=", "register_new_node", "(", ")", "suffix_tree_id", "=", "uuid4", "(", ")", "event", "=", "SuffixTree", ".", "Created", "(", "originator_id", "=", "suffix_tree_id", ",", "root_node_id", "=", "root_node", ".", "id", ",", "case_insensitive", "=", "case_insensitive", ",", ")", "entity", "=", "SuffixTree", ".", "mutate", "(", "event", "=", "event", ")", "assert", "isinstance", "(", "entity", ",", "SuffixTree", ")", "entity", ".", "nodes", "[", "root_node", ".", "id", "]", "=", "root_node", "publish", "(", "event", ")", "return", "entity" ]
Factory method, returns new suffix tree object.
[ "Factory", "method", "returns", "new", "suffix", "tree", "object", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/contrib/suffixtrees/domain/model/suffixtree.py#L349-L369
233,805
johnbywater/eventsourcing
eventsourcing/contrib/suffixtrees/domain/model/suffixtree.py
find_substring
def find_substring(substring, suffix_tree, edge_repo): """Returns the index if substring in tree, otherwise -1. """ assert isinstance(substring, str) assert isinstance(suffix_tree, SuffixTree) assert isinstance(edge_repo, EventSourcedRepository) if not substring: return -1 if suffix_tree.case_insensitive: substring = substring.lower() curr_node_id = suffix_tree.root_node_id i = 0 while i < len(substring): edge_id = make_edge_id(curr_node_id, substring[i]) try: edge = edge_repo[edge_id] except RepositoryKeyError: return -1 ln = min(edge.length + 1, len(substring) - i) if substring[i:i + ln] != suffix_tree.string[edge.first_char_index:edge.first_char_index + ln]: return -1 i += edge.length + 1 curr_node_id = edge.dest_node_id return edge.first_char_index - len(substring) + ln
python
def find_substring(substring, suffix_tree, edge_repo): """Returns the index if substring in tree, otherwise -1. """ assert isinstance(substring, str) assert isinstance(suffix_tree, SuffixTree) assert isinstance(edge_repo, EventSourcedRepository) if not substring: return -1 if suffix_tree.case_insensitive: substring = substring.lower() curr_node_id = suffix_tree.root_node_id i = 0 while i < len(substring): edge_id = make_edge_id(curr_node_id, substring[i]) try: edge = edge_repo[edge_id] except RepositoryKeyError: return -1 ln = min(edge.length + 1, len(substring) - i) if substring[i:i + ln] != suffix_tree.string[edge.first_char_index:edge.first_char_index + ln]: return -1 i += edge.length + 1 curr_node_id = edge.dest_node_id return edge.first_char_index - len(substring) + ln
[ "def", "find_substring", "(", "substring", ",", "suffix_tree", ",", "edge_repo", ")", ":", "assert", "isinstance", "(", "substring", ",", "str", ")", "assert", "isinstance", "(", "suffix_tree", ",", "SuffixTree", ")", "assert", "isinstance", "(", "edge_repo", ",", "EventSourcedRepository", ")", "if", "not", "substring", ":", "return", "-", "1", "if", "suffix_tree", ".", "case_insensitive", ":", "substring", "=", "substring", ".", "lower", "(", ")", "curr_node_id", "=", "suffix_tree", ".", "root_node_id", "i", "=", "0", "while", "i", "<", "len", "(", "substring", ")", ":", "edge_id", "=", "make_edge_id", "(", "curr_node_id", ",", "substring", "[", "i", "]", ")", "try", ":", "edge", "=", "edge_repo", "[", "edge_id", "]", "except", "RepositoryKeyError", ":", "return", "-", "1", "ln", "=", "min", "(", "edge", ".", "length", "+", "1", ",", "len", "(", "substring", ")", "-", "i", ")", "if", "substring", "[", "i", ":", "i", "+", "ln", "]", "!=", "suffix_tree", ".", "string", "[", "edge", ".", "first_char_index", ":", "edge", ".", "first_char_index", "+", "ln", "]", ":", "return", "-", "1", "i", "+=", "edge", ".", "length", "+", "1", "curr_node_id", "=", "edge", ".", "dest_node_id", "return", "edge", ".", "first_char_index", "-", "len", "(", "substring", ")", "+", "ln" ]
Returns the index if substring in tree, otherwise -1.
[ "Returns", "the", "index", "if", "substring", "in", "tree", "otherwise", "-", "1", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/contrib/suffixtrees/domain/model/suffixtree.py#L374-L397
233,806
johnbywater/eventsourcing
eventsourcing/contrib/suffixtrees/domain/model/suffixtree.py
SuffixTree._add_prefix
def _add_prefix(self, last_char_index): """The core construction method. """ last_parent_node_id = None while True: parent_node_id = self.active.source_node_id if self.active.explicit(): edge_id = make_edge_id(self.active.source_node_id, self.string[last_char_index]) if edge_id in self.edges: # prefix is already in tree break else: edge_id = make_edge_id(self.active.source_node_id, self.string[self.active.first_char_index]) e = self.edges[edge_id] if self.string[e.first_char_index + self.active.length + 1] == self.string[last_char_index]: # prefix is already in tree break parent_node_id = self._split_edge(e, self.active) node = register_new_node() self.nodes[node.id] = node edge_id = make_edge_id(parent_node_id, self.string[last_char_index]) e = register_new_edge( edge_id=edge_id, first_char_index=last_char_index, last_char_index=self.N, source_node_id=parent_node_id, dest_node_id=node.id, ) self._insert_edge(e) if last_parent_node_id is not None: self.nodes[last_parent_node_id].suffix_node_id = parent_node_id last_parent_node_id = parent_node_id if self.active.source_node_id == self.root_node_id: self.active.first_char_index += 1 else: self.active.source_node_id = self.nodes[self.active.source_node_id].suffix_node_id self._canonize_suffix(self.active) if last_parent_node_id is not None: self.nodes[last_parent_node_id].suffix_node_id = parent_node_id self.active.last_char_index += 1 self._canonize_suffix(self.active)
python
def _add_prefix(self, last_char_index): """The core construction method. """ last_parent_node_id = None while True: parent_node_id = self.active.source_node_id if self.active.explicit(): edge_id = make_edge_id(self.active.source_node_id, self.string[last_char_index]) if edge_id in self.edges: # prefix is already in tree break else: edge_id = make_edge_id(self.active.source_node_id, self.string[self.active.first_char_index]) e = self.edges[edge_id] if self.string[e.first_char_index + self.active.length + 1] == self.string[last_char_index]: # prefix is already in tree break parent_node_id = self._split_edge(e, self.active) node = register_new_node() self.nodes[node.id] = node edge_id = make_edge_id(parent_node_id, self.string[last_char_index]) e = register_new_edge( edge_id=edge_id, first_char_index=last_char_index, last_char_index=self.N, source_node_id=parent_node_id, dest_node_id=node.id, ) self._insert_edge(e) if last_parent_node_id is not None: self.nodes[last_parent_node_id].suffix_node_id = parent_node_id last_parent_node_id = parent_node_id if self.active.source_node_id == self.root_node_id: self.active.first_char_index += 1 else: self.active.source_node_id = self.nodes[self.active.source_node_id].suffix_node_id self._canonize_suffix(self.active) if last_parent_node_id is not None: self.nodes[last_parent_node_id].suffix_node_id = parent_node_id self.active.last_char_index += 1 self._canonize_suffix(self.active)
[ "def", "_add_prefix", "(", "self", ",", "last_char_index", ")", ":", "last_parent_node_id", "=", "None", "while", "True", ":", "parent_node_id", "=", "self", ".", "active", ".", "source_node_id", "if", "self", ".", "active", ".", "explicit", "(", ")", ":", "edge_id", "=", "make_edge_id", "(", "self", ".", "active", ".", "source_node_id", ",", "self", ".", "string", "[", "last_char_index", "]", ")", "if", "edge_id", "in", "self", ".", "edges", ":", "# prefix is already in tree", "break", "else", ":", "edge_id", "=", "make_edge_id", "(", "self", ".", "active", ".", "source_node_id", ",", "self", ".", "string", "[", "self", ".", "active", ".", "first_char_index", "]", ")", "e", "=", "self", ".", "edges", "[", "edge_id", "]", "if", "self", ".", "string", "[", "e", ".", "first_char_index", "+", "self", ".", "active", ".", "length", "+", "1", "]", "==", "self", ".", "string", "[", "last_char_index", "]", ":", "# prefix is already in tree", "break", "parent_node_id", "=", "self", ".", "_split_edge", "(", "e", ",", "self", ".", "active", ")", "node", "=", "register_new_node", "(", ")", "self", ".", "nodes", "[", "node", ".", "id", "]", "=", "node", "edge_id", "=", "make_edge_id", "(", "parent_node_id", ",", "self", ".", "string", "[", "last_char_index", "]", ")", "e", "=", "register_new_edge", "(", "edge_id", "=", "edge_id", ",", "first_char_index", "=", "last_char_index", ",", "last_char_index", "=", "self", ".", "N", ",", "source_node_id", "=", "parent_node_id", ",", "dest_node_id", "=", "node", ".", "id", ",", ")", "self", ".", "_insert_edge", "(", "e", ")", "if", "last_parent_node_id", "is", "not", "None", ":", "self", ".", "nodes", "[", "last_parent_node_id", "]", ".", "suffix_node_id", "=", "parent_node_id", "last_parent_node_id", "=", "parent_node_id", "if", "self", ".", "active", ".", "source_node_id", "==", "self", ".", "root_node_id", ":", "self", ".", "active", ".", "first_char_index", "+=", "1", "else", ":", "self", ".", "active", ".", "source_node_id", "=", "self", ".", "nodes", "[", "self", ".", "active", ".", "source_node_id", "]", ".", "suffix_node_id", "self", ".", "_canonize_suffix", "(", "self", ".", "active", ")", "if", "last_parent_node_id", "is", "not", "None", ":", "self", ".", "nodes", "[", "last_parent_node_id", "]", ".", "suffix_node_id", "=", "parent_node_id", "self", ".", "active", ".", "last_char_index", "+=", "1", "self", ".", "_canonize_suffix", "(", "self", ".", "active", ")" ]
The core construction method.
[ "The", "core", "construction", "method", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/contrib/suffixtrees/domain/model/suffixtree.py#L86-L129
233,807
johnbywater/eventsourcing
eventsourcing/contrib/suffixtrees/domain/model/suffixtree.py
SuffixTree._canonize_suffix
def _canonize_suffix(self, suffix): """This canonizes the suffix, walking along its suffix string until it is explicit or there are no more matched nodes. """ if not suffix.explicit(): edge_id = make_edge_id(suffix.source_node_id, self.string[suffix.first_char_index]) e = self.edges[edge_id] if e.length <= suffix.length: suffix.first_char_index += e.length + 1 suffix.source_node_id = e.dest_node_id self._canonize_suffix(suffix)
python
def _canonize_suffix(self, suffix): """This canonizes the suffix, walking along its suffix string until it is explicit or there are no more matched nodes. """ if not suffix.explicit(): edge_id = make_edge_id(suffix.source_node_id, self.string[suffix.first_char_index]) e = self.edges[edge_id] if e.length <= suffix.length: suffix.first_char_index += e.length + 1 suffix.source_node_id = e.dest_node_id self._canonize_suffix(suffix)
[ "def", "_canonize_suffix", "(", "self", ",", "suffix", ")", ":", "if", "not", "suffix", ".", "explicit", "(", ")", ":", "edge_id", "=", "make_edge_id", "(", "suffix", ".", "source_node_id", ",", "self", ".", "string", "[", "suffix", ".", "first_char_index", "]", ")", "e", "=", "self", ".", "edges", "[", "edge_id", "]", "if", "e", ".", "length", "<=", "suffix", ".", "length", ":", "suffix", ".", "first_char_index", "+=", "e", ".", "length", "+", "1", "suffix", ".", "source_node_id", "=", "e", ".", "dest_node_id", "self", ".", "_canonize_suffix", "(", "suffix", ")" ]
This canonizes the suffix, walking along its suffix string until it is explicit or there are no more matched nodes.
[ "This", "canonizes", "the", "suffix", "walking", "along", "its", "suffix", "string", "until", "it", "is", "explicit", "or", "there", "are", "no", "more", "matched", "nodes", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/contrib/suffixtrees/domain/model/suffixtree.py#L170-L180
233,808
johnbywater/eventsourcing
eventsourcing/infrastructure/snapshotting.py
entity_from_snapshot
def entity_from_snapshot(snapshot): """ Reconstructs domain entity from given snapshot. """ assert isinstance(snapshot, AbstractSnapshop), type(snapshot) if snapshot.state is not None: entity_class = resolve_topic(snapshot.topic) return reconstruct_object(entity_class, snapshot.state)
python
def entity_from_snapshot(snapshot): """ Reconstructs domain entity from given snapshot. """ assert isinstance(snapshot, AbstractSnapshop), type(snapshot) if snapshot.state is not None: entity_class = resolve_topic(snapshot.topic) return reconstruct_object(entity_class, snapshot.state)
[ "def", "entity_from_snapshot", "(", "snapshot", ")", ":", "assert", "isinstance", "(", "snapshot", ",", "AbstractSnapshop", ")", ",", "type", "(", "snapshot", ")", "if", "snapshot", ".", "state", "is", "not", "None", ":", "entity_class", "=", "resolve_topic", "(", "snapshot", ".", "topic", ")", "return", "reconstruct_object", "(", "entity_class", ",", "snapshot", ".", "state", ")" ]
Reconstructs domain entity from given snapshot.
[ "Reconstructs", "domain", "entity", "from", "given", "snapshot", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/snapshotting.py#L69-L76
233,809
johnbywater/eventsourcing
eventsourcing/infrastructure/snapshotting.py
EventSourcedSnapshotStrategy.get_snapshot
def get_snapshot(self, entity_id, lt=None, lte=None): """ Gets the last snapshot for entity, optionally until a particular version number. :rtype: Snapshot """ snapshots = self.snapshot_store.get_domain_events(entity_id, lt=lt, lte=lte, limit=1, is_ascending=False) if len(snapshots) == 1: return snapshots[0]
python
def get_snapshot(self, entity_id, lt=None, lte=None): """ Gets the last snapshot for entity, optionally until a particular version number. :rtype: Snapshot """ snapshots = self.snapshot_store.get_domain_events(entity_id, lt=lt, lte=lte, limit=1, is_ascending=False) if len(snapshots) == 1: return snapshots[0]
[ "def", "get_snapshot", "(", "self", ",", "entity_id", ",", "lt", "=", "None", ",", "lte", "=", "None", ")", ":", "snapshots", "=", "self", ".", "snapshot_store", ".", "get_domain_events", "(", "entity_id", ",", "lt", "=", "lt", ",", "lte", "=", "lte", ",", "limit", "=", "1", ",", "is_ascending", "=", "False", ")", "if", "len", "(", "snapshots", ")", "==", "1", ":", "return", "snapshots", "[", "0", "]" ]
Gets the last snapshot for entity, optionally until a particular version number. :rtype: Snapshot
[ "Gets", "the", "last", "snapshot", "for", "entity", "optionally", "until", "a", "particular", "version", "number", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/snapshotting.py#L36-L44
233,810
johnbywater/eventsourcing
eventsourcing/infrastructure/snapshotting.py
EventSourcedSnapshotStrategy.take_snapshot
def take_snapshot(self, entity_id, entity, last_event_version): """ Creates a Snapshot from the given state, and appends it to the snapshot store. :rtype: Snapshot """ # Create the snapshot. snapshot = Snapshot( originator_id=entity_id, originator_version=last_event_version, topic=get_topic(entity.__class__), state=None if entity is None else deepcopy(entity.__dict__) ) self.snapshot_store.store(snapshot) # Return the snapshot. return snapshot
python
def take_snapshot(self, entity_id, entity, last_event_version): """ Creates a Snapshot from the given state, and appends it to the snapshot store. :rtype: Snapshot """ # Create the snapshot. snapshot = Snapshot( originator_id=entity_id, originator_version=last_event_version, topic=get_topic(entity.__class__), state=None if entity is None else deepcopy(entity.__dict__) ) self.snapshot_store.store(snapshot) # Return the snapshot. return snapshot
[ "def", "take_snapshot", "(", "self", ",", "entity_id", ",", "entity", ",", "last_event_version", ")", ":", "# Create the snapshot.", "snapshot", "=", "Snapshot", "(", "originator_id", "=", "entity_id", ",", "originator_version", "=", "last_event_version", ",", "topic", "=", "get_topic", "(", "entity", ".", "__class__", ")", ",", "state", "=", "None", "if", "entity", "is", "None", "else", "deepcopy", "(", "entity", ".", "__dict__", ")", ")", "self", ".", "snapshot_store", ".", "store", "(", "snapshot", ")", "# Return the snapshot.", "return", "snapshot" ]
Creates a Snapshot from the given state, and appends it to the snapshot store. :rtype: Snapshot
[ "Creates", "a", "Snapshot", "from", "the", "given", "state", "and", "appends", "it", "to", "the", "snapshot", "store", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/snapshotting.py#L47-L66
233,811
johnbywater/eventsourcing
eventsourcing/infrastructure/eventsourcedrepository.py
EventSourcedRepository.get_entity
def get_entity(self, entity_id, at=None): """ Returns entity with given ID, optionally until position. """ # Get a snapshot (None if none exist). if self._snapshot_strategy is not None: snapshot = self._snapshot_strategy.get_snapshot(entity_id, lte=at) else: snapshot = None # Decide the initial state of the entity, and the # version of the last item applied to the entity. if snapshot is None: initial_state = None gt = None else: initial_state = entity_from_snapshot(snapshot) gt = snapshot.originator_version # Obtain and return current state. return self.get_and_project_events(entity_id, gt=gt, lte=at, initial_state=initial_state)
python
def get_entity(self, entity_id, at=None): """ Returns entity with given ID, optionally until position. """ # Get a snapshot (None if none exist). if self._snapshot_strategy is not None: snapshot = self._snapshot_strategy.get_snapshot(entity_id, lte=at) else: snapshot = None # Decide the initial state of the entity, and the # version of the last item applied to the entity. if snapshot is None: initial_state = None gt = None else: initial_state = entity_from_snapshot(snapshot) gt = snapshot.originator_version # Obtain and return current state. return self.get_and_project_events(entity_id, gt=gt, lte=at, initial_state=initial_state)
[ "def", "get_entity", "(", "self", ",", "entity_id", ",", "at", "=", "None", ")", ":", "# Get a snapshot (None if none exist).", "if", "self", ".", "_snapshot_strategy", "is", "not", "None", ":", "snapshot", "=", "self", ".", "_snapshot_strategy", ".", "get_snapshot", "(", "entity_id", ",", "lte", "=", "at", ")", "else", ":", "snapshot", "=", "None", "# Decide the initial state of the entity, and the", "# version of the last item applied to the entity.", "if", "snapshot", "is", "None", ":", "initial_state", "=", "None", "gt", "=", "None", "else", ":", "initial_state", "=", "entity_from_snapshot", "(", "snapshot", ")", "gt", "=", "snapshot", ".", "originator_version", "# Obtain and return current state.", "return", "self", ".", "get_and_project_events", "(", "entity_id", ",", "gt", "=", "gt", ",", "lte", "=", "at", ",", "initial_state", "=", "initial_state", ")" ]
Returns entity with given ID, optionally until position.
[ "Returns", "entity", "with", "given", "ID", "optionally", "until", "position", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/eventsourcedrepository.py#L37-L58
233,812
johnbywater/eventsourcing
eventsourcing/infrastructure/eventsourcedrepository.py
EventSourcedRepository.get_and_project_events
def get_and_project_events(self, entity_id, gt=None, gte=None, lt=None, lte=None, limit=None, initial_state=None, query_descending=False): """ Reconstitutes requested domain entity from domain events found in event store. """ # Decide if query is in ascending order. # - A "speed up" for when events are stored in descending order (e.g. # in Cassandra) and it is faster to get them in that order. # - This isn't useful when 'until' or 'after' or 'limit' are set, # because the inclusiveness or exclusiveness of until and after # and the end of the stream that is truncated by limit both depend on # the direction of the query. Also paging backwards isn't useful, because # all the events are needed eventually, so it would probably slow things # down. Paging is intended to support replaying longer event streams, and # only makes sense to work in ascending order. if gt is None and gte is None and lt is None and lte is None and self.__page_size__ is None: is_ascending = False else: is_ascending = not query_descending # Get entity's domain events from the event store. domain_events = self.event_store.get_domain_events( originator_id=entity_id, gt=gt, gte=gte, lt=lt, lte=lte, limit=limit, is_ascending=is_ascending, page_size=self.__page_size__ ) # The events will be replayed in ascending order. if not is_ascending: domain_events = list(reversed(list(domain_events))) # Project the domain events onto the initial state. return self.project_events(initial_state, domain_events)
python
def get_and_project_events(self, entity_id, gt=None, gte=None, lt=None, lte=None, limit=None, initial_state=None, query_descending=False): """ Reconstitutes requested domain entity from domain events found in event store. """ # Decide if query is in ascending order. # - A "speed up" for when events are stored in descending order (e.g. # in Cassandra) and it is faster to get them in that order. # - This isn't useful when 'until' or 'after' or 'limit' are set, # because the inclusiveness or exclusiveness of until and after # and the end of the stream that is truncated by limit both depend on # the direction of the query. Also paging backwards isn't useful, because # all the events are needed eventually, so it would probably slow things # down. Paging is intended to support replaying longer event streams, and # only makes sense to work in ascending order. if gt is None and gte is None and lt is None and lte is None and self.__page_size__ is None: is_ascending = False else: is_ascending = not query_descending # Get entity's domain events from the event store. domain_events = self.event_store.get_domain_events( originator_id=entity_id, gt=gt, gte=gte, lt=lt, lte=lte, limit=limit, is_ascending=is_ascending, page_size=self.__page_size__ ) # The events will be replayed in ascending order. if not is_ascending: domain_events = list(reversed(list(domain_events))) # Project the domain events onto the initial state. return self.project_events(initial_state, domain_events)
[ "def", "get_and_project_events", "(", "self", ",", "entity_id", ",", "gt", "=", "None", ",", "gte", "=", "None", ",", "lt", "=", "None", ",", "lte", "=", "None", ",", "limit", "=", "None", ",", "initial_state", "=", "None", ",", "query_descending", "=", "False", ")", ":", "# Decide if query is in ascending order.", "# - A \"speed up\" for when events are stored in descending order (e.g.", "# in Cassandra) and it is faster to get them in that order.", "# - This isn't useful when 'until' or 'after' or 'limit' are set,", "# because the inclusiveness or exclusiveness of until and after", "# and the end of the stream that is truncated by limit both depend on", "# the direction of the query. Also paging backwards isn't useful, because", "# all the events are needed eventually, so it would probably slow things", "# down. Paging is intended to support replaying longer event streams, and", "# only makes sense to work in ascending order.", "if", "gt", "is", "None", "and", "gte", "is", "None", "and", "lt", "is", "None", "and", "lte", "is", "None", "and", "self", ".", "__page_size__", "is", "None", ":", "is_ascending", "=", "False", "else", ":", "is_ascending", "=", "not", "query_descending", "# Get entity's domain events from the event store.", "domain_events", "=", "self", ".", "event_store", ".", "get_domain_events", "(", "originator_id", "=", "entity_id", ",", "gt", "=", "gt", ",", "gte", "=", "gte", ",", "lt", "=", "lt", ",", "lte", "=", "lte", ",", "limit", "=", "limit", ",", "is_ascending", "=", "is_ascending", ",", "page_size", "=", "self", ".", "__page_size__", ")", "# The events will be replayed in ascending order.", "if", "not", "is_ascending", ":", "domain_events", "=", "list", "(", "reversed", "(", "list", "(", "domain_events", ")", ")", ")", "# Project the domain events onto the initial state.", "return", "self", ".", "project_events", "(", "initial_state", ",", "domain_events", ")" ]
Reconstitutes requested domain entity from domain events found in event store.
[ "Reconstitutes", "requested", "domain", "entity", "from", "domain", "events", "found", "in", "event", "store", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/eventsourcedrepository.py#L60-L97
233,813
johnbywater/eventsourcing
eventsourcing/infrastructure/eventsourcedrepository.py
EventSourcedRepository.take_snapshot
def take_snapshot(self, entity_id, lt=None, lte=None): """ Takes a snapshot of the entity as it existed after the most recent event, optionally less than, or less than or equal to, a particular position. """ snapshot = None if self._snapshot_strategy: # Get the latest event (optionally until a particular position). latest_event = self.event_store.get_most_recent_event(entity_id, lt=lt, lte=lte) # If there is something to snapshot, then look for a snapshot # taken before or at the entity version of the latest event. Please # note, the snapshot might have a smaller version number than # the latest event if events occurred since the latest snapshot was taken. if latest_event is not None: latest_snapshot = self._snapshot_strategy.get_snapshot( entity_id, lt=lt, lte=lte ) latest_version = latest_event.originator_version if latest_snapshot and latest_snapshot.originator_version == latest_version: # If up-to-date snapshot exists, there's nothing to do. snapshot = latest_snapshot else: # Otherwise recover entity state from latest snapshot. if latest_snapshot: initial_state = entity_from_snapshot(latest_snapshot) gt = latest_snapshot.originator_version else: initial_state = None gt = None # Fast-forward entity state to latest version. entity = self.get_and_project_events( entity_id=entity_id, gt=gt, lte=latest_version, initial_state=initial_state, ) # Take snapshot from entity. snapshot = self._snapshot_strategy.take_snapshot(entity_id, entity, latest_version) return snapshot
python
def take_snapshot(self, entity_id, lt=None, lte=None): """ Takes a snapshot of the entity as it existed after the most recent event, optionally less than, or less than or equal to, a particular position. """ snapshot = None if self._snapshot_strategy: # Get the latest event (optionally until a particular position). latest_event = self.event_store.get_most_recent_event(entity_id, lt=lt, lte=lte) # If there is something to snapshot, then look for a snapshot # taken before or at the entity version of the latest event. Please # note, the snapshot might have a smaller version number than # the latest event if events occurred since the latest snapshot was taken. if latest_event is not None: latest_snapshot = self._snapshot_strategy.get_snapshot( entity_id, lt=lt, lte=lte ) latest_version = latest_event.originator_version if latest_snapshot and latest_snapshot.originator_version == latest_version: # If up-to-date snapshot exists, there's nothing to do. snapshot = latest_snapshot else: # Otherwise recover entity state from latest snapshot. if latest_snapshot: initial_state = entity_from_snapshot(latest_snapshot) gt = latest_snapshot.originator_version else: initial_state = None gt = None # Fast-forward entity state to latest version. entity = self.get_and_project_events( entity_id=entity_id, gt=gt, lte=latest_version, initial_state=initial_state, ) # Take snapshot from entity. snapshot = self._snapshot_strategy.take_snapshot(entity_id, entity, latest_version) return snapshot
[ "def", "take_snapshot", "(", "self", ",", "entity_id", ",", "lt", "=", "None", ",", "lte", "=", "None", ")", ":", "snapshot", "=", "None", "if", "self", ".", "_snapshot_strategy", ":", "# Get the latest event (optionally until a particular position).", "latest_event", "=", "self", ".", "event_store", ".", "get_most_recent_event", "(", "entity_id", ",", "lt", "=", "lt", ",", "lte", "=", "lte", ")", "# If there is something to snapshot, then look for a snapshot", "# taken before or at the entity version of the latest event. Please", "# note, the snapshot might have a smaller version number than", "# the latest event if events occurred since the latest snapshot was taken.", "if", "latest_event", "is", "not", "None", ":", "latest_snapshot", "=", "self", ".", "_snapshot_strategy", ".", "get_snapshot", "(", "entity_id", ",", "lt", "=", "lt", ",", "lte", "=", "lte", ")", "latest_version", "=", "latest_event", ".", "originator_version", "if", "latest_snapshot", "and", "latest_snapshot", ".", "originator_version", "==", "latest_version", ":", "# If up-to-date snapshot exists, there's nothing to do.", "snapshot", "=", "latest_snapshot", "else", ":", "# Otherwise recover entity state from latest snapshot.", "if", "latest_snapshot", ":", "initial_state", "=", "entity_from_snapshot", "(", "latest_snapshot", ")", "gt", "=", "latest_snapshot", ".", "originator_version", "else", ":", "initial_state", "=", "None", "gt", "=", "None", "# Fast-forward entity state to latest version.", "entity", "=", "self", ".", "get_and_project_events", "(", "entity_id", "=", "entity_id", ",", "gt", "=", "gt", ",", "lte", "=", "latest_version", ",", "initial_state", "=", "initial_state", ",", ")", "# Take snapshot from entity.", "snapshot", "=", "self", ".", "_snapshot_strategy", ".", "take_snapshot", "(", "entity_id", ",", "entity", ",", "latest_version", ")", "return", "snapshot" ]
Takes a snapshot of the entity as it existed after the most recent event, optionally less than, or less than or equal to, a particular position.
[ "Takes", "a", "snapshot", "of", "the", "entity", "as", "it", "existed", "after", "the", "most", "recent", "event", "optionally", "less", "than", "or", "less", "than", "or", "equal", "to", "a", "particular", "position", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/eventsourcedrepository.py#L100-L143
233,814
johnbywater/eventsourcing
eventsourcing/example/application.py
ExampleApplication.create_new_example
def create_new_example(self, foo='', a='', b=''): """Entity object factory.""" return create_new_example(foo=foo, a=a, b=b)
python
def create_new_example(self, foo='', a='', b=''): """Entity object factory.""" return create_new_example(foo=foo, a=a, b=b)
[ "def", "create_new_example", "(", "self", ",", "foo", "=", "''", ",", "a", "=", "''", ",", "b", "=", "''", ")", ":", "return", "create_new_example", "(", "foo", "=", "foo", ",", "a", "=", "a", ",", "b", "=", "b", ")" ]
Entity object factory.
[ "Entity", "object", "factory", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/example/application.py#L170-L172
233,815
johnbywater/eventsourcing
eventsourcing/utils/times.py
timestamp_long_from_uuid
def timestamp_long_from_uuid(uuid_arg): """ Returns an integer value representing a unix timestamp in tenths of microseconds. :param uuid_arg: :return: Unix timestamp integer in tenths of microseconds. :rtype: int """ if isinstance(uuid_arg, str): uuid_arg = UUID(uuid_arg) assert isinstance(uuid_arg, UUID), uuid_arg uuid_time = uuid_arg.time return uuid_time - 0x01B21DD213814000
python
def timestamp_long_from_uuid(uuid_arg): """ Returns an integer value representing a unix timestamp in tenths of microseconds. :param uuid_arg: :return: Unix timestamp integer in tenths of microseconds. :rtype: int """ if isinstance(uuid_arg, str): uuid_arg = UUID(uuid_arg) assert isinstance(uuid_arg, UUID), uuid_arg uuid_time = uuid_arg.time return uuid_time - 0x01B21DD213814000
[ "def", "timestamp_long_from_uuid", "(", "uuid_arg", ")", ":", "if", "isinstance", "(", "uuid_arg", ",", "str", ")", ":", "uuid_arg", "=", "UUID", "(", "uuid_arg", ")", "assert", "isinstance", "(", "uuid_arg", ",", "UUID", ")", ",", "uuid_arg", "uuid_time", "=", "uuid_arg", ".", "time", "return", "uuid_time", "-", "0x01B21DD213814000" ]
Returns an integer value representing a unix timestamp in tenths of microseconds. :param uuid_arg: :return: Unix timestamp integer in tenths of microseconds. :rtype: int
[ "Returns", "an", "integer", "value", "representing", "a", "unix", "timestamp", "in", "tenths", "of", "microseconds", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/utils/times.py#L20-L32
233,816
johnbywater/eventsourcing
eventsourcing/domain/model/decorators.py
subscribe_to
def subscribe_to(*event_classes): """ Decorator for making a custom event handler function subscribe to a certain class of event. The decorated function will be called once for each matching event that is published, and will be given one argument, the event, when it is called. If events are published in lists, for example the AggregateRoot publishes a list of pending events when its __save__() method is called, then the decorated function will be called once for each event that is an instance of the given event_class. Please note, this decorator isn't suitable for use with object class methods. The decorator receives in Python 3 an unbound function, and defines a handler which it subscribes that calls the decorated function for each matching event. However the method isn't called on the object, so the object instance is never available in the decorator, so the decorator can't call a normal object method because it doesn't have a value for 'self'. event_class: type used to match published events, an event matches if it is an instance of this type The following example shows a custom handler that reacts to Todo.Created event and saves a projection of a Todo model object. .. code:: @subscribe_to(Todo.Created) def new_todo_projection(event): todo = TodoProjection(id=event.originator_id, title=event.title) todo.save() """ event_classes = list(event_classes) def wrap(func): def handler(event): if isinstance(event, (list, tuple)): for e in event: handler(e) elif not event_classes or isinstance(event, tuple(event_classes)): func(event) subscribe(handler=handler, predicate=lambda _: True) return func if len(event_classes) == 1 and isfunction(event_classes[0]): func = event_classes.pop() return wrap(func) else: return wrap
python
def subscribe_to(*event_classes): """ Decorator for making a custom event handler function subscribe to a certain class of event. The decorated function will be called once for each matching event that is published, and will be given one argument, the event, when it is called. If events are published in lists, for example the AggregateRoot publishes a list of pending events when its __save__() method is called, then the decorated function will be called once for each event that is an instance of the given event_class. Please note, this decorator isn't suitable for use with object class methods. The decorator receives in Python 3 an unbound function, and defines a handler which it subscribes that calls the decorated function for each matching event. However the method isn't called on the object, so the object instance is never available in the decorator, so the decorator can't call a normal object method because it doesn't have a value for 'self'. event_class: type used to match published events, an event matches if it is an instance of this type The following example shows a custom handler that reacts to Todo.Created event and saves a projection of a Todo model object. .. code:: @subscribe_to(Todo.Created) def new_todo_projection(event): todo = TodoProjection(id=event.originator_id, title=event.title) todo.save() """ event_classes = list(event_classes) def wrap(func): def handler(event): if isinstance(event, (list, tuple)): for e in event: handler(e) elif not event_classes or isinstance(event, tuple(event_classes)): func(event) subscribe(handler=handler, predicate=lambda _: True) return func if len(event_classes) == 1 and isfunction(event_classes[0]): func = event_classes.pop() return wrap(func) else: return wrap
[ "def", "subscribe_to", "(", "*", "event_classes", ")", ":", "event_classes", "=", "list", "(", "event_classes", ")", "def", "wrap", "(", "func", ")", ":", "def", "handler", "(", "event", ")", ":", "if", "isinstance", "(", "event", ",", "(", "list", ",", "tuple", ")", ")", ":", "for", "e", "in", "event", ":", "handler", "(", "e", ")", "elif", "not", "event_classes", "or", "isinstance", "(", "event", ",", "tuple", "(", "event_classes", ")", ")", ":", "func", "(", "event", ")", "subscribe", "(", "handler", "=", "handler", ",", "predicate", "=", "lambda", "_", ":", "True", ")", "return", "func", "if", "len", "(", "event_classes", ")", "==", "1", "and", "isfunction", "(", "event_classes", "[", "0", "]", ")", ":", "func", "=", "event_classes", ".", "pop", "(", ")", "return", "wrap", "(", "func", ")", "else", ":", "return", "wrap" ]
Decorator for making a custom event handler function subscribe to a certain class of event. The decorated function will be called once for each matching event that is published, and will be given one argument, the event, when it is called. If events are published in lists, for example the AggregateRoot publishes a list of pending events when its __save__() method is called, then the decorated function will be called once for each event that is an instance of the given event_class. Please note, this decorator isn't suitable for use with object class methods. The decorator receives in Python 3 an unbound function, and defines a handler which it subscribes that calls the decorated function for each matching event. However the method isn't called on the object, so the object instance is never available in the decorator, so the decorator can't call a normal object method because it doesn't have a value for 'self'. event_class: type used to match published events, an event matches if it is an instance of this type The following example shows a custom handler that reacts to Todo.Created event and saves a projection of a Todo model object. .. code:: @subscribe_to(Todo.Created) def new_todo_projection(event): todo = TodoProjection(id=event.originator_id, title=event.title) todo.save()
[ "Decorator", "for", "making", "a", "custom", "event", "handler", "function", "subscribe", "to", "a", "certain", "class", "of", "event", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/domain/model/decorators.py#L10-L54
233,817
johnbywater/eventsourcing
eventsourcing/domain/model/decorators.py
mutator
def mutator(arg=None): """Structures mutator functions by allowing handlers to be registered for different types of event. When the decorated function is called with an initial value and an event, it will call the handler that has been registered for that type of event. It works like singledispatch, which it uses. The difference is that when the decorated function is called, this decorator dispatches according to the type of last call arg, which fits better with reduce(). The builtin Python function reduce() is used by the library to replay a sequence of events against an initial state. If a mutator function is given to reduce(), along with a list of events and an initializer, reduce() will call the mutator function once for each event in the list, but the initializer will be the first value, and the event will be the last argument, and we want to dispatch according to the type of the event. It happens that singledispatch is coded to switch on the type of the first argument, which makes it unsuitable for structuring a mutator function without the modifications introduced here. The other aspect introduced by this decorator function is the option to set the type of the handled entity in the decorator. When an entity is replayed from scratch, in other words when all its events are replayed, the initial state is None. The handler which handles the first event in the sequence will probably construct an object instance. It is possible to write the type into the handler, but that makes the entity more difficult to subclass because you will also need to write a handler for it. If the decorator is invoked with the type, when the initial value passed as a call arg to the mutator function is None, the handler will instead receive the type of the entity, which it can use to construct the entity object. .. code:: class Entity(object): class Created(object): pass @mutator(Entity) def mutate(initial, event): raise NotImplementedError(type(event)) @mutate.register(Entity.Created) def _(initial, event): return initial(**event.__dict__) entity = mutate(None, Entity.Created()) """ domain_class = None def _mutator(func): wrapped = singledispatch(func) @wraps(wrapped) def wrapper(initial, event): initial = initial or domain_class return wrapped.dispatch(type(event))(initial, event) wrapper.register = wrapped.register return wrapper if isfunction(arg): return _mutator(arg) else: domain_class = arg return _mutator
python
def mutator(arg=None): """Structures mutator functions by allowing handlers to be registered for different types of event. When the decorated function is called with an initial value and an event, it will call the handler that has been registered for that type of event. It works like singledispatch, which it uses. The difference is that when the decorated function is called, this decorator dispatches according to the type of last call arg, which fits better with reduce(). The builtin Python function reduce() is used by the library to replay a sequence of events against an initial state. If a mutator function is given to reduce(), along with a list of events and an initializer, reduce() will call the mutator function once for each event in the list, but the initializer will be the first value, and the event will be the last argument, and we want to dispatch according to the type of the event. It happens that singledispatch is coded to switch on the type of the first argument, which makes it unsuitable for structuring a mutator function without the modifications introduced here. The other aspect introduced by this decorator function is the option to set the type of the handled entity in the decorator. When an entity is replayed from scratch, in other words when all its events are replayed, the initial state is None. The handler which handles the first event in the sequence will probably construct an object instance. It is possible to write the type into the handler, but that makes the entity more difficult to subclass because you will also need to write a handler for it. If the decorator is invoked with the type, when the initial value passed as a call arg to the mutator function is None, the handler will instead receive the type of the entity, which it can use to construct the entity object. .. code:: class Entity(object): class Created(object): pass @mutator(Entity) def mutate(initial, event): raise NotImplementedError(type(event)) @mutate.register(Entity.Created) def _(initial, event): return initial(**event.__dict__) entity = mutate(None, Entity.Created()) """ domain_class = None def _mutator(func): wrapped = singledispatch(func) @wraps(wrapped) def wrapper(initial, event): initial = initial or domain_class return wrapped.dispatch(type(event))(initial, event) wrapper.register = wrapped.register return wrapper if isfunction(arg): return _mutator(arg) else: domain_class = arg return _mutator
[ "def", "mutator", "(", "arg", "=", "None", ")", ":", "domain_class", "=", "None", "def", "_mutator", "(", "func", ")", ":", "wrapped", "=", "singledispatch", "(", "func", ")", "@", "wraps", "(", "wrapped", ")", "def", "wrapper", "(", "initial", ",", "event", ")", ":", "initial", "=", "initial", "or", "domain_class", "return", "wrapped", ".", "dispatch", "(", "type", "(", "event", ")", ")", "(", "initial", ",", "event", ")", "wrapper", ".", "register", "=", "wrapped", ".", "register", "return", "wrapper", "if", "isfunction", "(", "arg", ")", ":", "return", "_mutator", "(", "arg", ")", "else", ":", "domain_class", "=", "arg", "return", "_mutator" ]
Structures mutator functions by allowing handlers to be registered for different types of event. When the decorated function is called with an initial value and an event, it will call the handler that has been registered for that type of event. It works like singledispatch, which it uses. The difference is that when the decorated function is called, this decorator dispatches according to the type of last call arg, which fits better with reduce(). The builtin Python function reduce() is used by the library to replay a sequence of events against an initial state. If a mutator function is given to reduce(), along with a list of events and an initializer, reduce() will call the mutator function once for each event in the list, but the initializer will be the first value, and the event will be the last argument, and we want to dispatch according to the type of the event. It happens that singledispatch is coded to switch on the type of the first argument, which makes it unsuitable for structuring a mutator function without the modifications introduced here. The other aspect introduced by this decorator function is the option to set the type of the handled entity in the decorator. When an entity is replayed from scratch, in other words when all its events are replayed, the initial state is None. The handler which handles the first event in the sequence will probably construct an object instance. It is possible to write the type into the handler, but that makes the entity more difficult to subclass because you will also need to write a handler for it. If the decorator is invoked with the type, when the initial value passed as a call arg to the mutator function is None, the handler will instead receive the type of the entity, which it can use to construct the entity object. .. code:: class Entity(object): class Created(object): pass @mutator(Entity) def mutate(initial, event): raise NotImplementedError(type(event)) @mutate.register(Entity.Created) def _(initial, event): return initial(**event.__dict__) entity = mutate(None, Entity.Created())
[ "Structures", "mutator", "functions", "by", "allowing", "handlers", "to", "be", "registered", "for", "different", "types", "of", "event", ".", "When", "the", "decorated", "function", "is", "called", "with", "an", "initial", "value", "and", "an", "event", "it", "will", "call", "the", "handler", "that", "has", "been", "registered", "for", "that", "type", "of", "event", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/domain/model/decorators.py#L57-L128
233,818
johnbywater/eventsourcing
eventsourcing/utils/cipher/aes.py
AESCipher.encrypt
def encrypt(self, plaintext): """Return ciphertext for given plaintext.""" # String to bytes. plainbytes = plaintext.encode('utf8') # Compress plaintext bytes. compressed = zlib.compress(plainbytes) # Construct AES-GCM cipher, with 96-bit nonce. cipher = AES.new(self.cipher_key, AES.MODE_GCM, nonce=random_bytes(12)) # Encrypt and digest. encrypted, tag = cipher.encrypt_and_digest(compressed) # Combine with nonce. combined = cipher.nonce + tag + encrypted # Encode as Base64. cipherbytes = base64.b64encode(combined) # Bytes to string. ciphertext = cipherbytes.decode('utf8') # Return ciphertext. return ciphertext
python
def encrypt(self, plaintext): """Return ciphertext for given plaintext.""" # String to bytes. plainbytes = plaintext.encode('utf8') # Compress plaintext bytes. compressed = zlib.compress(plainbytes) # Construct AES-GCM cipher, with 96-bit nonce. cipher = AES.new(self.cipher_key, AES.MODE_GCM, nonce=random_bytes(12)) # Encrypt and digest. encrypted, tag = cipher.encrypt_and_digest(compressed) # Combine with nonce. combined = cipher.nonce + tag + encrypted # Encode as Base64. cipherbytes = base64.b64encode(combined) # Bytes to string. ciphertext = cipherbytes.decode('utf8') # Return ciphertext. return ciphertext
[ "def", "encrypt", "(", "self", ",", "plaintext", ")", ":", "# String to bytes.", "plainbytes", "=", "plaintext", ".", "encode", "(", "'utf8'", ")", "# Compress plaintext bytes.", "compressed", "=", "zlib", ".", "compress", "(", "plainbytes", ")", "# Construct AES-GCM cipher, with 96-bit nonce.", "cipher", "=", "AES", ".", "new", "(", "self", ".", "cipher_key", ",", "AES", ".", "MODE_GCM", ",", "nonce", "=", "random_bytes", "(", "12", ")", ")", "# Encrypt and digest.", "encrypted", ",", "tag", "=", "cipher", ".", "encrypt_and_digest", "(", "compressed", ")", "# Combine with nonce.", "combined", "=", "cipher", ".", "nonce", "+", "tag", "+", "encrypted", "# Encode as Base64.", "cipherbytes", "=", "base64", ".", "b64encode", "(", "combined", ")", "# Bytes to string.", "ciphertext", "=", "cipherbytes", ".", "decode", "(", "'utf8'", ")", "# Return ciphertext.", "return", "ciphertext" ]
Return ciphertext for given plaintext.
[ "Return", "ciphertext", "for", "given", "plaintext", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/utils/cipher/aes.py#L24-L49
233,819
johnbywater/eventsourcing
eventsourcing/utils/cipher/aes.py
AESCipher.decrypt
def decrypt(self, ciphertext): """Return plaintext for given ciphertext.""" # String to bytes. cipherbytes = ciphertext.encode('utf8') # Decode from Base64. try: combined = base64.b64decode(cipherbytes) except (base64.binascii.Error, TypeError) as e: # base64.binascii.Error for Python 3. # TypeError for Python 2. raise DataIntegrityError("Cipher text is damaged: {}".format(e)) # Split out the nonce, tag, and encrypted data. nonce = combined[:12] if len(nonce) != 12: raise DataIntegrityError("Cipher text is damaged: invalid nonce length") tag = combined[12:28] if len(tag) != 16: raise DataIntegrityError("Cipher text is damaged: invalid tag length") encrypted = combined[28:] # Construct AES cipher, with old nonce. cipher = AES.new(self.cipher_key, AES.MODE_GCM, nonce) # Decrypt and verify. try: compressed = cipher.decrypt_and_verify(encrypted, tag) except ValueError as e: raise DataIntegrityError("Cipher text is damaged: {}".format(e)) # Decompress plaintext bytes. plainbytes = zlib.decompress(compressed) # Bytes to string. plaintext = plainbytes.decode('utf8') # Return plaintext. return plaintext
python
def decrypt(self, ciphertext): """Return plaintext for given ciphertext.""" # String to bytes. cipherbytes = ciphertext.encode('utf8') # Decode from Base64. try: combined = base64.b64decode(cipherbytes) except (base64.binascii.Error, TypeError) as e: # base64.binascii.Error for Python 3. # TypeError for Python 2. raise DataIntegrityError("Cipher text is damaged: {}".format(e)) # Split out the nonce, tag, and encrypted data. nonce = combined[:12] if len(nonce) != 12: raise DataIntegrityError("Cipher text is damaged: invalid nonce length") tag = combined[12:28] if len(tag) != 16: raise DataIntegrityError("Cipher text is damaged: invalid tag length") encrypted = combined[28:] # Construct AES cipher, with old nonce. cipher = AES.new(self.cipher_key, AES.MODE_GCM, nonce) # Decrypt and verify. try: compressed = cipher.decrypt_and_verify(encrypted, tag) except ValueError as e: raise DataIntegrityError("Cipher text is damaged: {}".format(e)) # Decompress plaintext bytes. plainbytes = zlib.decompress(compressed) # Bytes to string. plaintext = plainbytes.decode('utf8') # Return plaintext. return plaintext
[ "def", "decrypt", "(", "self", ",", "ciphertext", ")", ":", "# String to bytes.", "cipherbytes", "=", "ciphertext", ".", "encode", "(", "'utf8'", ")", "# Decode from Base64.", "try", ":", "combined", "=", "base64", ".", "b64decode", "(", "cipherbytes", ")", "except", "(", "base64", ".", "binascii", ".", "Error", ",", "TypeError", ")", "as", "e", ":", "# base64.binascii.Error for Python 3.", "# TypeError for Python 2.", "raise", "DataIntegrityError", "(", "\"Cipher text is damaged: {}\"", ".", "format", "(", "e", ")", ")", "# Split out the nonce, tag, and encrypted data.", "nonce", "=", "combined", "[", ":", "12", "]", "if", "len", "(", "nonce", ")", "!=", "12", ":", "raise", "DataIntegrityError", "(", "\"Cipher text is damaged: invalid nonce length\"", ")", "tag", "=", "combined", "[", "12", ":", "28", "]", "if", "len", "(", "tag", ")", "!=", "16", ":", "raise", "DataIntegrityError", "(", "\"Cipher text is damaged: invalid tag length\"", ")", "encrypted", "=", "combined", "[", "28", ":", "]", "# Construct AES cipher, with old nonce.", "cipher", "=", "AES", ".", "new", "(", "self", ".", "cipher_key", ",", "AES", ".", "MODE_GCM", ",", "nonce", ")", "# Decrypt and verify.", "try", ":", "compressed", "=", "cipher", ".", "decrypt_and_verify", "(", "encrypted", ",", "tag", ")", "except", "ValueError", "as", "e", ":", "raise", "DataIntegrityError", "(", "\"Cipher text is damaged: {}\"", ".", "format", "(", "e", ")", ")", "# Decompress plaintext bytes.", "plainbytes", "=", "zlib", ".", "decompress", "(", "compressed", ")", "# Bytes to string.", "plaintext", "=", "plainbytes", ".", "decode", "(", "'utf8'", ")", "# Return plaintext.", "return", "plaintext" ]
Return plaintext for given ciphertext.
[ "Return", "plaintext", "for", "given", "ciphertext", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/utils/cipher/aes.py#L51-L92
233,820
johnbywater/eventsourcing
eventsourcing/infrastructure/eventstore.py
EventStore.store
def store(self, domain_event_or_events): """ Appends given domain event, or list of domain events, to their sequence. :param domain_event_or_events: domain event, or list of domain events """ # Convert to sequenced item. sequenced_item_or_items = self.item_from_event(domain_event_or_events) # Append to the sequenced item(s) to the sequence. try: self.record_manager.record_sequenced_items(sequenced_item_or_items) except RecordConflictError as e: raise ConcurrencyError(e)
python
def store(self, domain_event_or_events): """ Appends given domain event, or list of domain events, to their sequence. :param domain_event_or_events: domain event, or list of domain events """ # Convert to sequenced item. sequenced_item_or_items = self.item_from_event(domain_event_or_events) # Append to the sequenced item(s) to the sequence. try: self.record_manager.record_sequenced_items(sequenced_item_or_items) except RecordConflictError as e: raise ConcurrencyError(e)
[ "def", "store", "(", "self", ",", "domain_event_or_events", ")", ":", "# Convert to sequenced item.", "sequenced_item_or_items", "=", "self", ".", "item_from_event", "(", "domain_event_or_events", ")", "# Append to the sequenced item(s) to the sequence.", "try", ":", "self", ".", "record_manager", ".", "record_sequenced_items", "(", "sequenced_item_or_items", ")", "except", "RecordConflictError", "as", "e", ":", "raise", "ConcurrencyError", "(", "e", ")" ]
Appends given domain event, or list of domain events, to their sequence. :param domain_event_or_events: domain event, or list of domain events
[ "Appends", "given", "domain", "event", "or", "list", "of", "domain", "events", "to", "their", "sequence", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/eventstore.py#L72-L86
233,821
johnbywater/eventsourcing
eventsourcing/infrastructure/eventstore.py
EventStore.item_from_event
def item_from_event(self, domain_event_or_events): """ Maps domain event to sequenced item namedtuple. :param domain_event_or_events: application-level object (or list) :return: namedtuple: sequence item namedtuple (or list) """ # Convert the domain event(s) to sequenced item(s). if isinstance(domain_event_or_events, (list, tuple)): return [self.item_from_event(e) for e in domain_event_or_events] else: return self.mapper.item_from_event(domain_event_or_events)
python
def item_from_event(self, domain_event_or_events): """ Maps domain event to sequenced item namedtuple. :param domain_event_or_events: application-level object (or list) :return: namedtuple: sequence item namedtuple (or list) """ # Convert the domain event(s) to sequenced item(s). if isinstance(domain_event_or_events, (list, tuple)): return [self.item_from_event(e) for e in domain_event_or_events] else: return self.mapper.item_from_event(domain_event_or_events)
[ "def", "item_from_event", "(", "self", ",", "domain_event_or_events", ")", ":", "# Convert the domain event(s) to sequenced item(s).", "if", "isinstance", "(", "domain_event_or_events", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "[", "self", ".", "item_from_event", "(", "e", ")", "for", "e", "in", "domain_event_or_events", "]", "else", ":", "return", "self", ".", "mapper", ".", "item_from_event", "(", "domain_event_or_events", ")" ]
Maps domain event to sequenced item namedtuple. :param domain_event_or_events: application-level object (or list) :return: namedtuple: sequence item namedtuple (or list)
[ "Maps", "domain", "event", "to", "sequenced", "item", "namedtuple", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/eventstore.py#L88-L99
233,822
johnbywater/eventsourcing
eventsourcing/infrastructure/eventstore.py
EventStore.get_domain_events
def get_domain_events(self, originator_id, gt=None, gte=None, lt=None, lte=None, limit=None, is_ascending=True, page_size=None): """ Gets domain events from the sequence identified by `originator_id`. :param originator_id: ID of a sequence of events :param gt: get items after this position :param gte: get items at or after this position :param lt: get items before this position :param lte: get items before or at this position :param limit: get limited number of items :param is_ascending: get items from lowest position :param page_size: restrict and repeat database query :return: list of domain events """ if page_size: sequenced_items = self.iterator_class( record_manager=self.record_manager, sequence_id=originator_id, page_size=page_size, gt=gt, gte=gte, lt=lt, lte=lte, limit=limit, is_ascending=is_ascending, ) else: sequenced_items = self.record_manager.get_items( sequence_id=originator_id, gt=gt, gte=gte, lt=lt, lte=lte, limit=limit, query_ascending=is_ascending, results_ascending=is_ascending, ) # Deserialize to domain events. domain_events = map(self.mapper.event_from_item, sequenced_items) return list(domain_events)
python
def get_domain_events(self, originator_id, gt=None, gte=None, lt=None, lte=None, limit=None, is_ascending=True, page_size=None): """ Gets domain events from the sequence identified by `originator_id`. :param originator_id: ID of a sequence of events :param gt: get items after this position :param gte: get items at or after this position :param lt: get items before this position :param lte: get items before or at this position :param limit: get limited number of items :param is_ascending: get items from lowest position :param page_size: restrict and repeat database query :return: list of domain events """ if page_size: sequenced_items = self.iterator_class( record_manager=self.record_manager, sequence_id=originator_id, page_size=page_size, gt=gt, gte=gte, lt=lt, lte=lte, limit=limit, is_ascending=is_ascending, ) else: sequenced_items = self.record_manager.get_items( sequence_id=originator_id, gt=gt, gte=gte, lt=lt, lte=lte, limit=limit, query_ascending=is_ascending, results_ascending=is_ascending, ) # Deserialize to domain events. domain_events = map(self.mapper.event_from_item, sequenced_items) return list(domain_events)
[ "def", "get_domain_events", "(", "self", ",", "originator_id", ",", "gt", "=", "None", ",", "gte", "=", "None", ",", "lt", "=", "None", ",", "lte", "=", "None", ",", "limit", "=", "None", ",", "is_ascending", "=", "True", ",", "page_size", "=", "None", ")", ":", "if", "page_size", ":", "sequenced_items", "=", "self", ".", "iterator_class", "(", "record_manager", "=", "self", ".", "record_manager", ",", "sequence_id", "=", "originator_id", ",", "page_size", "=", "page_size", ",", "gt", "=", "gt", ",", "gte", "=", "gte", ",", "lt", "=", "lt", ",", "lte", "=", "lte", ",", "limit", "=", "limit", ",", "is_ascending", "=", "is_ascending", ",", ")", "else", ":", "sequenced_items", "=", "self", ".", "record_manager", ".", "get_items", "(", "sequence_id", "=", "originator_id", ",", "gt", "=", "gt", ",", "gte", "=", "gte", ",", "lt", "=", "lt", ",", "lte", "=", "lte", ",", "limit", "=", "limit", ",", "query_ascending", "=", "is_ascending", ",", "results_ascending", "=", "is_ascending", ",", ")", "# Deserialize to domain events.", "domain_events", "=", "map", "(", "self", ".", "mapper", ".", "event_from_item", ",", "sequenced_items", ")", "return", "list", "(", "domain_events", ")" ]
Gets domain events from the sequence identified by `originator_id`. :param originator_id: ID of a sequence of events :param gt: get items after this position :param gte: get items at or after this position :param lt: get items before this position :param lte: get items before or at this position :param limit: get limited number of items :param is_ascending: get items from lowest position :param page_size: restrict and repeat database query :return: list of domain events
[ "Gets", "domain", "events", "from", "the", "sequence", "identified", "by", "originator_id", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/eventstore.py#L101-L142
233,823
johnbywater/eventsourcing
eventsourcing/infrastructure/eventstore.py
EventStore.get_domain_event
def get_domain_event(self, originator_id, position): """ Gets a domain event from the sequence identified by `originator_id` at position `eq`. :param originator_id: ID of a sequence of events :param position: get item at this position :return: domain event """ sequenced_item = self.record_manager.get_item( sequence_id=originator_id, position=position, ) return self.mapper.event_from_item(sequenced_item)
python
def get_domain_event(self, originator_id, position): """ Gets a domain event from the sequence identified by `originator_id` at position `eq`. :param originator_id: ID of a sequence of events :param position: get item at this position :return: domain event """ sequenced_item = self.record_manager.get_item( sequence_id=originator_id, position=position, ) return self.mapper.event_from_item(sequenced_item)
[ "def", "get_domain_event", "(", "self", ",", "originator_id", ",", "position", ")", ":", "sequenced_item", "=", "self", ".", "record_manager", ".", "get_item", "(", "sequence_id", "=", "originator_id", ",", "position", "=", "position", ",", ")", "return", "self", ".", "mapper", ".", "event_from_item", "(", "sequenced_item", ")" ]
Gets a domain event from the sequence identified by `originator_id` at position `eq`. :param originator_id: ID of a sequence of events :param position: get item at this position :return: domain event
[ "Gets", "a", "domain", "event", "from", "the", "sequence", "identified", "by", "originator_id", "at", "position", "eq", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/eventstore.py#L144-L158
233,824
johnbywater/eventsourcing
eventsourcing/infrastructure/eventstore.py
EventStore.get_most_recent_event
def get_most_recent_event(self, originator_id, lt=None, lte=None): """ Gets a domain event from the sequence identified by `originator_id` at the highest position. :param originator_id: ID of a sequence of events :param lt: get highest before this position :param lte: get highest at or before this position :return: domain event """ events = self.get_domain_events(originator_id=originator_id, lt=lt, lte=lte, limit=1, is_ascending=False) events = list(events) try: return events[0] except IndexError: pass
python
def get_most_recent_event(self, originator_id, lt=None, lte=None): """ Gets a domain event from the sequence identified by `originator_id` at the highest position. :param originator_id: ID of a sequence of events :param lt: get highest before this position :param lte: get highest at or before this position :return: domain event """ events = self.get_domain_events(originator_id=originator_id, lt=lt, lte=lte, limit=1, is_ascending=False) events = list(events) try: return events[0] except IndexError: pass
[ "def", "get_most_recent_event", "(", "self", ",", "originator_id", ",", "lt", "=", "None", ",", "lte", "=", "None", ")", ":", "events", "=", "self", ".", "get_domain_events", "(", "originator_id", "=", "originator_id", ",", "lt", "=", "lt", ",", "lte", "=", "lte", ",", "limit", "=", "1", ",", "is_ascending", "=", "False", ")", "events", "=", "list", "(", "events", ")", "try", ":", "return", "events", "[", "0", "]", "except", "IndexError", ":", "pass" ]
Gets a domain event from the sequence identified by `originator_id` at the highest position. :param originator_id: ID of a sequence of events :param lt: get highest before this position :param lte: get highest at or before this position :return: domain event
[ "Gets", "a", "domain", "event", "from", "the", "sequence", "identified", "by", "originator_id", "at", "the", "highest", "position", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/eventstore.py#L160-L175
233,825
johnbywater/eventsourcing
eventsourcing/infrastructure/eventstore.py
EventStore.all_domain_events
def all_domain_events(self): """ Yields all domain events in the event store. """ for originator_id in self.record_manager.all_sequence_ids(): for domain_event in self.get_domain_events(originator_id=originator_id, page_size=100): yield domain_event
python
def all_domain_events(self): """ Yields all domain events in the event store. """ for originator_id in self.record_manager.all_sequence_ids(): for domain_event in self.get_domain_events(originator_id=originator_id, page_size=100): yield domain_event
[ "def", "all_domain_events", "(", "self", ")", ":", "for", "originator_id", "in", "self", ".", "record_manager", ".", "all_sequence_ids", "(", ")", ":", "for", "domain_event", "in", "self", ".", "get_domain_events", "(", "originator_id", "=", "originator_id", ",", "page_size", "=", "100", ")", ":", "yield", "domain_event" ]
Yields all domain events in the event store.
[ "Yields", "all", "domain", "events", "in", "the", "event", "store", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/eventstore.py#L177-L183
233,826
johnbywater/eventsourcing
eventsourcing/application/process.py
ProcessApplication.publish_prompt
def publish_prompt(self, event=None): """ Publishes prompt for a given event. Used to prompt downstream process application when an event is published by this application's model, which can happen when application command methods, rather than the process policy, are called. Wraps exceptions with PromptFailed, to avoid application policy exceptions being seen directly in other applications when running synchronously in single thread. """ prompt = Prompt(self.name, self.pipeline_id) try: publish(prompt) except PromptFailed: raise except Exception as e: raise PromptFailed("{}: {}".format(type(e), str(e)))
python
def publish_prompt(self, event=None): """ Publishes prompt for a given event. Used to prompt downstream process application when an event is published by this application's model, which can happen when application command methods, rather than the process policy, are called. Wraps exceptions with PromptFailed, to avoid application policy exceptions being seen directly in other applications when running synchronously in single thread. """ prompt = Prompt(self.name, self.pipeline_id) try: publish(prompt) except PromptFailed: raise except Exception as e: raise PromptFailed("{}: {}".format(type(e), str(e)))
[ "def", "publish_prompt", "(", "self", ",", "event", "=", "None", ")", ":", "prompt", "=", "Prompt", "(", "self", ".", "name", ",", "self", ".", "pipeline_id", ")", "try", ":", "publish", "(", "prompt", ")", "except", "PromptFailed", ":", "raise", "except", "Exception", "as", "e", ":", "raise", "PromptFailed", "(", "\"{}: {}\"", ".", "format", "(", "type", "(", "e", ")", ",", "str", "(", "e", ")", ")", ")" ]
Publishes prompt for a given event. Used to prompt downstream process application when an event is published by this application's model, which can happen when application command methods, rather than the process policy, are called. Wraps exceptions with PromptFailed, to avoid application policy exceptions being seen directly in other applications when running synchronously in single thread.
[ "Publishes", "prompt", "for", "a", "given", "event", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/application/process.py#L52-L71
233,827
johnbywater/eventsourcing
eventsourcing/infrastructure/django/manager.py
DjangoRecordManager._prepare_insert
def _prepare_insert(self, tmpl, record_class, field_names, placeholder_for_id=False): """ With transaction isolation level of "read committed" this should generate records with a contiguous sequence of integer IDs, using an indexed ID column, the database-side SQL max function, the insert-select-from form, and optimistic concurrency control. """ field_names = list(field_names) if hasattr(record_class, 'application_name') and 'application_name' not in field_names: field_names.append('application_name') if hasattr(record_class, 'pipeline_id') and 'pipeline_id' not in field_names: field_names.append('pipeline_id') if hasattr(record_class, 'causal_dependencies') and 'causal_dependencies' not in field_names: field_names.append('causal_dependencies') if placeholder_for_id: if self.notification_id_name: if self.notification_id_name not in field_names: field_names.append('id') statement = tmpl.format( tablename=self.get_record_table_name(record_class), columns=", ".join(field_names), placeholders=", ".join(['%s' for _ in field_names]), notification_id=self.notification_id_name ) return statement
python
def _prepare_insert(self, tmpl, record_class, field_names, placeholder_for_id=False): """ With transaction isolation level of "read committed" this should generate records with a contiguous sequence of integer IDs, using an indexed ID column, the database-side SQL max function, the insert-select-from form, and optimistic concurrency control. """ field_names = list(field_names) if hasattr(record_class, 'application_name') and 'application_name' not in field_names: field_names.append('application_name') if hasattr(record_class, 'pipeline_id') and 'pipeline_id' not in field_names: field_names.append('pipeline_id') if hasattr(record_class, 'causal_dependencies') and 'causal_dependencies' not in field_names: field_names.append('causal_dependencies') if placeholder_for_id: if self.notification_id_name: if self.notification_id_name not in field_names: field_names.append('id') statement = tmpl.format( tablename=self.get_record_table_name(record_class), columns=", ".join(field_names), placeholders=", ".join(['%s' for _ in field_names]), notification_id=self.notification_id_name ) return statement
[ "def", "_prepare_insert", "(", "self", ",", "tmpl", ",", "record_class", ",", "field_names", ",", "placeholder_for_id", "=", "False", ")", ":", "field_names", "=", "list", "(", "field_names", ")", "if", "hasattr", "(", "record_class", ",", "'application_name'", ")", "and", "'application_name'", "not", "in", "field_names", ":", "field_names", ".", "append", "(", "'application_name'", ")", "if", "hasattr", "(", "record_class", ",", "'pipeline_id'", ")", "and", "'pipeline_id'", "not", "in", "field_names", ":", "field_names", ".", "append", "(", "'pipeline_id'", ")", "if", "hasattr", "(", "record_class", ",", "'causal_dependencies'", ")", "and", "'causal_dependencies'", "not", "in", "field_names", ":", "field_names", ".", "append", "(", "'causal_dependencies'", ")", "if", "placeholder_for_id", ":", "if", "self", ".", "notification_id_name", ":", "if", "self", ".", "notification_id_name", "not", "in", "field_names", ":", "field_names", ".", "append", "(", "'id'", ")", "statement", "=", "tmpl", ".", "format", "(", "tablename", "=", "self", ".", "get_record_table_name", "(", "record_class", ")", ",", "columns", "=", "\", \"", ".", "join", "(", "field_names", ")", ",", "placeholders", "=", "\", \"", ".", "join", "(", "[", "'%s'", "for", "_", "in", "field_names", "]", ")", ",", "notification_id", "=", "self", ".", "notification_id_name", ")", "return", "statement" ]
With transaction isolation level of "read committed" this should generate records with a contiguous sequence of integer IDs, using an indexed ID column, the database-side SQL max function, the insert-select-from form, and optimistic concurrency control.
[ "With", "transaction", "isolation", "level", "of", "read", "committed", "this", "should", "generate", "records", "with", "a", "contiguous", "sequence", "of", "integer", "IDs", "using", "an", "indexed", "ID", "column", "the", "database", "-", "side", "SQL", "max", "function", "the", "insert", "-", "select", "-", "from", "form", "and", "optimistic", "concurrency", "control", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/django/manager.py#L68-L93
233,828
johnbywater/eventsourcing
eventsourcing/infrastructure/django/manager.py
DjangoRecordManager.get_notifications
def get_notifications(self, start=None, stop=None, *args, **kwargs): """ Returns all records in the table. """ filter_kwargs = {} # Todo: Also support sequencing by 'position' if items are sequenced by timestamp? if start is not None: filter_kwargs['%s__gte' % self.notification_id_name] = start + 1 if stop is not None: filter_kwargs['%s__lt' % self.notification_id_name] = stop + 1 objects = self.record_class.objects.filter(**filter_kwargs) if hasattr(self.record_class, 'application_name'): objects = objects.filter(application_name=self.application_name) if hasattr(self.record_class, 'pipeline_id'): objects = objects.filter(pipeline_id=self.pipeline_id) objects = objects.order_by('%s' % self.notification_id_name) return objects.all()
python
def get_notifications(self, start=None, stop=None, *args, **kwargs): """ Returns all records in the table. """ filter_kwargs = {} # Todo: Also support sequencing by 'position' if items are sequenced by timestamp? if start is not None: filter_kwargs['%s__gte' % self.notification_id_name] = start + 1 if stop is not None: filter_kwargs['%s__lt' % self.notification_id_name] = stop + 1 objects = self.record_class.objects.filter(**filter_kwargs) if hasattr(self.record_class, 'application_name'): objects = objects.filter(application_name=self.application_name) if hasattr(self.record_class, 'pipeline_id'): objects = objects.filter(pipeline_id=self.pipeline_id) objects = objects.order_by('%s' % self.notification_id_name) return objects.all()
[ "def", "get_notifications", "(", "self", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "filter_kwargs", "=", "{", "}", "# Todo: Also support sequencing by 'position' if items are sequenced by timestamp?", "if", "start", "is", "not", "None", ":", "filter_kwargs", "[", "'%s__gte'", "%", "self", ".", "notification_id_name", "]", "=", "start", "+", "1", "if", "stop", "is", "not", "None", ":", "filter_kwargs", "[", "'%s__lt'", "%", "self", ".", "notification_id_name", "]", "=", "stop", "+", "1", "objects", "=", "self", ".", "record_class", ".", "objects", ".", "filter", "(", "*", "*", "filter_kwargs", ")", "if", "hasattr", "(", "self", ".", "record_class", ",", "'application_name'", ")", ":", "objects", "=", "objects", ".", "filter", "(", "application_name", "=", "self", ".", "application_name", ")", "if", "hasattr", "(", "self", ".", "record_class", ",", "'pipeline_id'", ")", ":", "objects", "=", "objects", ".", "filter", "(", "pipeline_id", "=", "self", ".", "pipeline_id", ")", "objects", "=", "objects", ".", "order_by", "(", "'%s'", "%", "self", ".", "notification_id_name", ")", "return", "objects", ".", "all", "(", ")" ]
Returns all records in the table.
[ "Returns", "all", "records", "in", "the", "table", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/django/manager.py#L151-L169
233,829
johnbywater/eventsourcing
eventsourcing/application/actors.py
ActorModelRunner.start
def start(self): """ Starts all the actors to run a system of process applications. """ # Subscribe to broadcast prompts published by a process # application in the parent operating system process. subscribe(handler=self.forward_prompt, predicate=self.is_prompt) # Initialise the system actor. msg = SystemInitRequest( self.system.process_classes, self.infrastructure_class, self.system.followings, self.pipeline_ids ) response = self.actor_system.ask(self.system_actor, msg) # Keep the pipeline actor addresses, to send prompts directly. assert isinstance(response, SystemInitResponse), type(response) assert list(response.pipeline_actors.keys()) == self.pipeline_ids, ( "Configured pipeline IDs mismatch initialised system {} {}").format( list(self.pipeline_actors.keys()), self.pipeline_ids ) self.pipeline_actors = response.pipeline_actors
python
def start(self): """ Starts all the actors to run a system of process applications. """ # Subscribe to broadcast prompts published by a process # application in the parent operating system process. subscribe(handler=self.forward_prompt, predicate=self.is_prompt) # Initialise the system actor. msg = SystemInitRequest( self.system.process_classes, self.infrastructure_class, self.system.followings, self.pipeline_ids ) response = self.actor_system.ask(self.system_actor, msg) # Keep the pipeline actor addresses, to send prompts directly. assert isinstance(response, SystemInitResponse), type(response) assert list(response.pipeline_actors.keys()) == self.pipeline_ids, ( "Configured pipeline IDs mismatch initialised system {} {}").format( list(self.pipeline_actors.keys()), self.pipeline_ids ) self.pipeline_actors = response.pipeline_actors
[ "def", "start", "(", "self", ")", ":", "# Subscribe to broadcast prompts published by a process", "# application in the parent operating system process.", "subscribe", "(", "handler", "=", "self", ".", "forward_prompt", ",", "predicate", "=", "self", ".", "is_prompt", ")", "# Initialise the system actor.", "msg", "=", "SystemInitRequest", "(", "self", ".", "system", ".", "process_classes", ",", "self", ".", "infrastructure_class", ",", "self", ".", "system", ".", "followings", ",", "self", ".", "pipeline_ids", ")", "response", "=", "self", ".", "actor_system", ".", "ask", "(", "self", ".", "system_actor", ",", "msg", ")", "# Keep the pipeline actor addresses, to send prompts directly.", "assert", "isinstance", "(", "response", ",", "SystemInitResponse", ")", ",", "type", "(", "response", ")", "assert", "list", "(", "response", ".", "pipeline_actors", ".", "keys", "(", ")", ")", "==", "self", ".", "pipeline_ids", ",", "(", "\"Configured pipeline IDs mismatch initialised system {} {}\"", ")", ".", "format", "(", "list", "(", "self", ".", "pipeline_actors", ".", "keys", "(", ")", ")", ",", "self", ".", "pipeline_ids", ")", "self", ".", "pipeline_actors", "=", "response", ".", "pipeline_actors" ]
Starts all the actors to run a system of process applications.
[ "Starts", "all", "the", "actors", "to", "run", "a", "system", "of", "process", "applications", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/application/actors.py#L80-L105
233,830
johnbywater/eventsourcing
eventsourcing/application/actors.py
ActorModelRunner.close
def close(self): """Stops all the actors running a system of process applications.""" super(ActorModelRunner, self).close() unsubscribe(handler=self.forward_prompt, predicate=self.is_prompt) if self.shutdown_on_close: self.shutdown()
python
def close(self): """Stops all the actors running a system of process applications.""" super(ActorModelRunner, self).close() unsubscribe(handler=self.forward_prompt, predicate=self.is_prompt) if self.shutdown_on_close: self.shutdown()
[ "def", "close", "(", "self", ")", ":", "super", "(", "ActorModelRunner", ",", "self", ")", ".", "close", "(", ")", "unsubscribe", "(", "handler", "=", "self", ".", "forward_prompt", ",", "predicate", "=", "self", ".", "is_prompt", ")", "if", "self", ".", "shutdown_on_close", ":", "self", ".", "shutdown", "(", ")" ]
Stops all the actors running a system of process applications.
[ "Stops", "all", "the", "actors", "running", "a", "system", "of", "process", "applications", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/application/actors.py#L122-L127
233,831
johnbywater/eventsourcing
eventsourcing/contrib/suffixtrees/application.py
SuffixTreeApplication.register_new_suffix_tree
def register_new_suffix_tree(self, case_insensitive=False): """Returns a new suffix tree entity. """ suffix_tree = register_new_suffix_tree(case_insensitive=case_insensitive) suffix_tree._node_repo = self.node_repo suffix_tree._node_child_collection_repo = self.node_child_collection_repo suffix_tree._edge_repo = self.edge_repo suffix_tree._stringid_collection_repo = self.stringid_collection_repo return suffix_tree
python
def register_new_suffix_tree(self, case_insensitive=False): """Returns a new suffix tree entity. """ suffix_tree = register_new_suffix_tree(case_insensitive=case_insensitive) suffix_tree._node_repo = self.node_repo suffix_tree._node_child_collection_repo = self.node_child_collection_repo suffix_tree._edge_repo = self.edge_repo suffix_tree._stringid_collection_repo = self.stringid_collection_repo return suffix_tree
[ "def", "register_new_suffix_tree", "(", "self", ",", "case_insensitive", "=", "False", ")", ":", "suffix_tree", "=", "register_new_suffix_tree", "(", "case_insensitive", "=", "case_insensitive", ")", "suffix_tree", ".", "_node_repo", "=", "self", ".", "node_repo", "suffix_tree", ".", "_node_child_collection_repo", "=", "self", ".", "node_child_collection_repo", "suffix_tree", ".", "_edge_repo", "=", "self", ".", "edge_repo", "suffix_tree", ".", "_stringid_collection_repo", "=", "self", ".", "stringid_collection_repo", "return", "suffix_tree" ]
Returns a new suffix tree entity.
[ "Returns", "a", "new", "suffix", "tree", "entity", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/contrib/suffixtrees/application.py#L29-L37
233,832
johnbywater/eventsourcing
eventsourcing/contrib/suffixtrees/application.py
SuffixTreeApplication.find_string_ids
def find_string_ids(self, substring, suffix_tree_id, limit=None): """Returns a set of IDs for strings that contain the given substring. """ # Find an edge for the substring. edge, ln = self.find_substring_edge(substring=substring, suffix_tree_id=suffix_tree_id) # If there isn't an edge, return an empty set. if edge is None: return set() # Get all the string IDs beneath the edge's destination node. string_ids = get_string_ids( node_id=edge.dest_node_id, node_repo=self.node_repo, node_child_collection_repo=self.node_child_collection_repo, stringid_collection_repo=self.stringid_collection_repo, length_until_end=edge.length + 1 - ln, limit=limit ) # Return a set of string IDs. return set(string_ids)
python
def find_string_ids(self, substring, suffix_tree_id, limit=None): """Returns a set of IDs for strings that contain the given substring. """ # Find an edge for the substring. edge, ln = self.find_substring_edge(substring=substring, suffix_tree_id=suffix_tree_id) # If there isn't an edge, return an empty set. if edge is None: return set() # Get all the string IDs beneath the edge's destination node. string_ids = get_string_ids( node_id=edge.dest_node_id, node_repo=self.node_repo, node_child_collection_repo=self.node_child_collection_repo, stringid_collection_repo=self.stringid_collection_repo, length_until_end=edge.length + 1 - ln, limit=limit ) # Return a set of string IDs. return set(string_ids)
[ "def", "find_string_ids", "(", "self", ",", "substring", ",", "suffix_tree_id", ",", "limit", "=", "None", ")", ":", "# Find an edge for the substring.", "edge", ",", "ln", "=", "self", ".", "find_substring_edge", "(", "substring", "=", "substring", ",", "suffix_tree_id", "=", "suffix_tree_id", ")", "# If there isn't an edge, return an empty set.", "if", "edge", "is", "None", ":", "return", "set", "(", ")", "# Get all the string IDs beneath the edge's destination node.", "string_ids", "=", "get_string_ids", "(", "node_id", "=", "edge", ".", "dest_node_id", ",", "node_repo", "=", "self", ".", "node_repo", ",", "node_child_collection_repo", "=", "self", ".", "node_child_collection_repo", ",", "stringid_collection_repo", "=", "self", ".", "stringid_collection_repo", ",", "length_until_end", "=", "edge", ".", "length", "+", "1", "-", "ln", ",", "limit", "=", "limit", ")", "# Return a set of string IDs.", "return", "set", "(", "string_ids", ")" ]
Returns a set of IDs for strings that contain the given substring.
[ "Returns", "a", "set", "of", "IDs", "for", "strings", "that", "contain", "the", "given", "substring", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/contrib/suffixtrees/application.py#L50-L72
233,833
johnbywater/eventsourcing
eventsourcing/contrib/suffixtrees/application.py
SuffixTreeApplication.find_substring_edge
def find_substring_edge(self, substring, suffix_tree_id): """Returns an edge that matches the given substring. """ suffix_tree = self.suffix_tree_repo[suffix_tree_id] started = datetime.datetime.now() edge, ln = find_substring_edge(substring=substring, suffix_tree=suffix_tree, edge_repo=self.edge_repo) # if edge is not None: # print("Got edge for substring '{}': {}".format(substring, edge)) # else: # print("No edge for substring '{}'".format(substring)) print(" - searched for edge in {} for substring: '{}'".format(datetime.datetime.now() - started, substring)) return edge, ln
python
def find_substring_edge(self, substring, suffix_tree_id): """Returns an edge that matches the given substring. """ suffix_tree = self.suffix_tree_repo[suffix_tree_id] started = datetime.datetime.now() edge, ln = find_substring_edge(substring=substring, suffix_tree=suffix_tree, edge_repo=self.edge_repo) # if edge is not None: # print("Got edge for substring '{}': {}".format(substring, edge)) # else: # print("No edge for substring '{}'".format(substring)) print(" - searched for edge in {} for substring: '{}'".format(datetime.datetime.now() - started, substring)) return edge, ln
[ "def", "find_substring_edge", "(", "self", ",", "substring", ",", "suffix_tree_id", ")", ":", "suffix_tree", "=", "self", ".", "suffix_tree_repo", "[", "suffix_tree_id", "]", "started", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "edge", ",", "ln", "=", "find_substring_edge", "(", "substring", "=", "substring", ",", "suffix_tree", "=", "suffix_tree", ",", "edge_repo", "=", "self", ".", "edge_repo", ")", "# if edge is not None:", "# print(\"Got edge for substring '{}': {}\".format(substring, edge))", "# else:", "# print(\"No edge for substring '{}'\".format(substring))", "print", "(", "\" - searched for edge in {} for substring: '{}'\"", ".", "format", "(", "datetime", ".", "datetime", ".", "now", "(", ")", "-", "started", ",", "substring", ")", ")", "return", "edge", ",", "ln" ]
Returns an edge that matches the given substring.
[ "Returns", "an", "edge", "that", "matches", "the", "given", "substring", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/contrib/suffixtrees/application.py#L74-L85
233,834
johnbywater/eventsourcing
eventsourcing/application/system.py
SingleThreadedRunner.run_followers
def run_followers(self, prompt): """ First caller adds a prompt to queue and runs followers until there are no more pending prompts. Subsequent callers just add a prompt to the queue, avoiding recursion. """ assert isinstance(prompt, Prompt) # Put the prompt on the queue. self.pending_prompts.put(prompt) if self.iteration_lock.acquire(False): start_time = time.time() i = 0 try: while True: try: prompt = self.pending_prompts.get(False) except Empty: break else: followers = self.system.followers[prompt.process_name] for follower_name in followers: follower = self.system.processes[follower_name] follower.run(prompt) i += 1 self.pending_prompts.task_done() finally: run_frequency = i / (time.time() - start_time) # print(f"Run frequency: {run_frequency}") self.iteration_lock.release()
python
def run_followers(self, prompt): """ First caller adds a prompt to queue and runs followers until there are no more pending prompts. Subsequent callers just add a prompt to the queue, avoiding recursion. """ assert isinstance(prompt, Prompt) # Put the prompt on the queue. self.pending_prompts.put(prompt) if self.iteration_lock.acquire(False): start_time = time.time() i = 0 try: while True: try: prompt = self.pending_prompts.get(False) except Empty: break else: followers = self.system.followers[prompt.process_name] for follower_name in followers: follower = self.system.processes[follower_name] follower.run(prompt) i += 1 self.pending_prompts.task_done() finally: run_frequency = i / (time.time() - start_time) # print(f"Run frequency: {run_frequency}") self.iteration_lock.release()
[ "def", "run_followers", "(", "self", ",", "prompt", ")", ":", "assert", "isinstance", "(", "prompt", ",", "Prompt", ")", "# Put the prompt on the queue.", "self", ".", "pending_prompts", ".", "put", "(", "prompt", ")", "if", "self", ".", "iteration_lock", ".", "acquire", "(", "False", ")", ":", "start_time", "=", "time", ".", "time", "(", ")", "i", "=", "0", "try", ":", "while", "True", ":", "try", ":", "prompt", "=", "self", ".", "pending_prompts", ".", "get", "(", "False", ")", "except", "Empty", ":", "break", "else", ":", "followers", "=", "self", ".", "system", ".", "followers", "[", "prompt", ".", "process_name", "]", "for", "follower_name", "in", "followers", ":", "follower", "=", "self", ".", "system", ".", "processes", "[", "follower_name", "]", "follower", ".", "run", "(", "prompt", ")", "i", "+=", "1", "self", ".", "pending_prompts", ".", "task_done", "(", ")", "finally", ":", "run_frequency", "=", "i", "/", "(", "time", ".", "time", "(", ")", "-", "start_time", ")", "# print(f\"Run frequency: {run_frequency}\")", "self", ".", "iteration_lock", ".", "release", "(", ")" ]
First caller adds a prompt to queue and runs followers until there are no more pending prompts. Subsequent callers just add a prompt to the queue, avoiding recursion.
[ "First", "caller", "adds", "a", "prompt", "to", "queue", "and", "runs", "followers", "until", "there", "are", "no", "more", "pending", "prompts", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/application/system.py#L223-L255
233,835
johnbywater/eventsourcing
eventsourcing/example/domainmodel.py
create_new_example
def create_new_example(foo='', a='', b=''): """ Factory method for example entities. :rtype: Example """ return Example.__create__(foo=foo, a=a, b=b)
python
def create_new_example(foo='', a='', b=''): """ Factory method for example entities. :rtype: Example """ return Example.__create__(foo=foo, a=a, b=b)
[ "def", "create_new_example", "(", "foo", "=", "''", ",", "a", "=", "''", ",", "b", "=", "''", ")", ":", "return", "Example", ".", "__create__", "(", "foo", "=", "foo", ",", "a", "=", "a", ",", "b", "=", "b", ")" ]
Factory method for example entities. :rtype: Example
[ "Factory", "method", "for", "example", "entities", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/example/domainmodel.py#L62-L68
233,836
johnbywater/eventsourcing
eventsourcing/application/decorators.py
applicationpolicy
def applicationpolicy(arg=None): """ Decorator for application policy method. Allows policy to be built up from methods registered for different event classes. """ def _mutator(func): wrapped = singledispatch(func) @wraps(wrapped) def wrapper(*args, **kwargs): event = kwargs.get('event') or args[-1] return wrapped.dispatch(type(event))(*args, **kwargs) wrapper.register = wrapped.register return wrapper assert isfunction(arg), arg return _mutator(arg)
python
def applicationpolicy(arg=None): """ Decorator for application policy method. Allows policy to be built up from methods registered for different event classes. """ def _mutator(func): wrapped = singledispatch(func) @wraps(wrapped) def wrapper(*args, **kwargs): event = kwargs.get('event') or args[-1] return wrapped.dispatch(type(event))(*args, **kwargs) wrapper.register = wrapped.register return wrapper assert isfunction(arg), arg return _mutator(arg)
[ "def", "applicationpolicy", "(", "arg", "=", "None", ")", ":", "def", "_mutator", "(", "func", ")", ":", "wrapped", "=", "singledispatch", "(", "func", ")", "@", "wraps", "(", "wrapped", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "event", "=", "kwargs", ".", "get", "(", "'event'", ")", "or", "args", "[", "-", "1", "]", "return", "wrapped", ".", "dispatch", "(", "type", "(", "event", ")", ")", "(", "*", "args", ",", "*", "*", "kwargs", ")", "wrapper", ".", "register", "=", "wrapped", ".", "register", "return", "wrapper", "assert", "isfunction", "(", "arg", ")", ",", "arg", "return", "_mutator", "(", "arg", ")" ]
Decorator for application policy method. Allows policy to be built up from methods registered for different event classes.
[ "Decorator", "for", "application", "policy", "method", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/application/decorators.py#L5-L26
233,837
johnbywater/eventsourcing
eventsourcing/infrastructure/sqlalchemy/manager.py
SQLAlchemyRecordManager._prepare_insert
def _prepare_insert(self, tmpl, record_class, field_names, placeholder_for_id=False): """ With transaction isolation level of "read committed" this should generate records with a contiguous sequence of integer IDs, assumes an indexed ID column, the database-side SQL max function, the insert-select-from form, and optimistic concurrency control. """ field_names = list(field_names) if hasattr(record_class, 'application_name') and 'application_name' not in field_names: field_names.append('application_name') if hasattr(record_class, 'pipeline_id') and 'pipeline_id' not in field_names: field_names.append('pipeline_id') if hasattr(record_class, 'causal_dependencies') and 'causal_dependencies' not in field_names: field_names.append('causal_dependencies') if self.notification_id_name: if placeholder_for_id: if self.notification_id_name not in field_names: field_names.append(self.notification_id_name) statement = text(tmpl.format( tablename=self.get_record_table_name(record_class), columns=", ".join(field_names), placeholders=", ".join([":{}".format(f) for f in field_names]), notification_id=self.notification_id_name )) # Define bind parameters with explicit types taken from record column types. bindparams = [] for col_name in field_names: column_type = getattr(record_class, col_name).type bindparams.append(bindparam(col_name, type_=column_type)) # Redefine statement with explicitly typed bind parameters. statement = statement.bindparams(*bindparams) # Compile the statement with the session dialect. compiled = statement.compile(dialect=self.session.bind.dialect) return compiled
python
def _prepare_insert(self, tmpl, record_class, field_names, placeholder_for_id=False): """ With transaction isolation level of "read committed" this should generate records with a contiguous sequence of integer IDs, assumes an indexed ID column, the database-side SQL max function, the insert-select-from form, and optimistic concurrency control. """ field_names = list(field_names) if hasattr(record_class, 'application_name') and 'application_name' not in field_names: field_names.append('application_name') if hasattr(record_class, 'pipeline_id') and 'pipeline_id' not in field_names: field_names.append('pipeline_id') if hasattr(record_class, 'causal_dependencies') and 'causal_dependencies' not in field_names: field_names.append('causal_dependencies') if self.notification_id_name: if placeholder_for_id: if self.notification_id_name not in field_names: field_names.append(self.notification_id_name) statement = text(tmpl.format( tablename=self.get_record_table_name(record_class), columns=", ".join(field_names), placeholders=", ".join([":{}".format(f) for f in field_names]), notification_id=self.notification_id_name )) # Define bind parameters with explicit types taken from record column types. bindparams = [] for col_name in field_names: column_type = getattr(record_class, col_name).type bindparams.append(bindparam(col_name, type_=column_type)) # Redefine statement with explicitly typed bind parameters. statement = statement.bindparams(*bindparams) # Compile the statement with the session dialect. compiled = statement.compile(dialect=self.session.bind.dialect) return compiled
[ "def", "_prepare_insert", "(", "self", ",", "tmpl", ",", "record_class", ",", "field_names", ",", "placeholder_for_id", "=", "False", ")", ":", "field_names", "=", "list", "(", "field_names", ")", "if", "hasattr", "(", "record_class", ",", "'application_name'", ")", "and", "'application_name'", "not", "in", "field_names", ":", "field_names", ".", "append", "(", "'application_name'", ")", "if", "hasattr", "(", "record_class", ",", "'pipeline_id'", ")", "and", "'pipeline_id'", "not", "in", "field_names", ":", "field_names", ".", "append", "(", "'pipeline_id'", ")", "if", "hasattr", "(", "record_class", ",", "'causal_dependencies'", ")", "and", "'causal_dependencies'", "not", "in", "field_names", ":", "field_names", ".", "append", "(", "'causal_dependencies'", ")", "if", "self", ".", "notification_id_name", ":", "if", "placeholder_for_id", ":", "if", "self", ".", "notification_id_name", "not", "in", "field_names", ":", "field_names", ".", "append", "(", "self", ".", "notification_id_name", ")", "statement", "=", "text", "(", "tmpl", ".", "format", "(", "tablename", "=", "self", ".", "get_record_table_name", "(", "record_class", ")", ",", "columns", "=", "\", \"", ".", "join", "(", "field_names", ")", ",", "placeholders", "=", "\", \"", ".", "join", "(", "[", "\":{}\"", ".", "format", "(", "f", ")", "for", "f", "in", "field_names", "]", ")", ",", "notification_id", "=", "self", ".", "notification_id_name", ")", ")", "# Define bind parameters with explicit types taken from record column types.", "bindparams", "=", "[", "]", "for", "col_name", "in", "field_names", ":", "column_type", "=", "getattr", "(", "record_class", ",", "col_name", ")", ".", "type", "bindparams", ".", "append", "(", "bindparam", "(", "col_name", ",", "type_", "=", "column_type", ")", ")", "# Redefine statement with explicitly typed bind parameters.", "statement", "=", "statement", ".", "bindparams", "(", "*", "bindparams", ")", "# Compile the statement with the session dialect.", "compiled", "=", "statement", ".", "compile", "(", "dialect", "=", "self", ".", "session", ".", "bind", ".", "dialect", ")", "return", "compiled" ]
With transaction isolation level of "read committed" this should generate records with a contiguous sequence of integer IDs, assumes an indexed ID column, the database-side SQL max function, the insert-select-from form, and optimistic concurrency control.
[ "With", "transaction", "isolation", "level", "of", "read", "committed", "this", "should", "generate", "records", "with", "a", "contiguous", "sequence", "of", "integer", "IDs", "assumes", "an", "indexed", "ID", "column", "the", "database", "-", "side", "SQL", "max", "function", "the", "insert", "-", "select", "-", "from", "form", "and", "optimistic", "concurrency", "control", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/sqlalchemy/manager.py#L22-L60
233,838
johnbywater/eventsourcing
eventsourcing/infrastructure/sqlalchemy/manager.py
SQLAlchemyRecordManager.delete_record
def delete_record(self, record): """ Permanently removes record from table. """ try: self.session.delete(record) self.session.commit() except Exception as e: self.session.rollback() raise ProgrammingError(e) finally: self.session.close()
python
def delete_record(self, record): """ Permanently removes record from table. """ try: self.session.delete(record) self.session.commit() except Exception as e: self.session.rollback() raise ProgrammingError(e) finally: self.session.close()
[ "def", "delete_record", "(", "self", ",", "record", ")", ":", "try", ":", "self", ".", "session", ".", "delete", "(", "record", ")", "self", ".", "session", ".", "commit", "(", ")", "except", "Exception", "as", "e", ":", "self", ".", "session", ".", "rollback", "(", ")", "raise", "ProgrammingError", "(", "e", ")", "finally", ":", "self", ".", "session", ".", "close", "(", ")" ]
Permanently removes record from table.
[ "Permanently", "removes", "record", "from", "table", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/sqlalchemy/manager.py#L274-L285
233,839
johnbywater/eventsourcing
eventsourcing/domain/model/timebucketedlog.py
TimebucketedlogRepository.get_or_create
def get_or_create(self, log_name, bucket_size): """ Gets or creates a log. :rtype: Timebucketedlog """ try: return self[log_name] except RepositoryKeyError: return start_new_timebucketedlog(log_name, bucket_size=bucket_size)
python
def get_or_create(self, log_name, bucket_size): """ Gets or creates a log. :rtype: Timebucketedlog """ try: return self[log_name] except RepositoryKeyError: return start_new_timebucketedlog(log_name, bucket_size=bucket_size)
[ "def", "get_or_create", "(", "self", ",", "log_name", ",", "bucket_size", ")", ":", "try", ":", "return", "self", "[", "log_name", "]", "except", "RepositoryKeyError", ":", "return", "start_new_timebucketedlog", "(", "log_name", ",", "bucket_size", "=", "bucket_size", ")" ]
Gets or creates a log. :rtype: Timebucketedlog
[ "Gets", "or", "creates", "a", "log", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/domain/model/timebucketedlog.py#L70-L79
233,840
johnbywater/eventsourcing
eventsourcing/infrastructure/eventplayer.py
EventPlayer.project_events
def project_events(self, initial_state, domain_events): """ Evolves initial state using the sequence of domain events and a mutator function. """ return reduce(self._mutator_func or self.mutate, domain_events, initial_state)
python
def project_events(self, initial_state, domain_events): """ Evolves initial state using the sequence of domain events and a mutator function. """ return reduce(self._mutator_func or self.mutate, domain_events, initial_state)
[ "def", "project_events", "(", "self", ",", "initial_state", ",", "domain_events", ")", ":", "return", "reduce", "(", "self", ".", "_mutator_func", "or", "self", ".", "mutate", ",", "domain_events", ",", "initial_state", ")" ]
Evolves initial state using the sequence of domain events and a mutator function.
[ "Evolves", "initial", "state", "using", "the", "sequence", "of", "domain", "events", "and", "a", "mutator", "function", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/eventplayer.py#L32-L36
233,841
johnbywater/eventsourcing
eventsourcing/domain/model/array.py
BigArray.get_last_array
def get_last_array(self): """ Returns last array in compound. :rtype: CompoundSequenceReader """ # Get the root array (might not have been registered). root = self.repo[self.id] # Get length and last item in the root array. apex_id, apex_height = root.get_last_item_and_next_position() # Bail if there isn't anything yet. if apex_id is None: return None, None # Get the current apex array. apex = self.repo[apex_id] assert isinstance(apex, Array) # Descend until hitting the bottom. array = apex array_i = 0 height = apex_height while height > 1: height -= 1 array_id, width = array.get_last_item_and_next_position() assert width > 0 offset = width - 1 array_i += offset * self.repo.array_size ** height array = self.repo[array_id] return array, array_i
python
def get_last_array(self): """ Returns last array in compound. :rtype: CompoundSequenceReader """ # Get the root array (might not have been registered). root = self.repo[self.id] # Get length and last item in the root array. apex_id, apex_height = root.get_last_item_and_next_position() # Bail if there isn't anything yet. if apex_id is None: return None, None # Get the current apex array. apex = self.repo[apex_id] assert isinstance(apex, Array) # Descend until hitting the bottom. array = apex array_i = 0 height = apex_height while height > 1: height -= 1 array_id, width = array.get_last_item_and_next_position() assert width > 0 offset = width - 1 array_i += offset * self.repo.array_size ** height array = self.repo[array_id] return array, array_i
[ "def", "get_last_array", "(", "self", ")", ":", "# Get the root array (might not have been registered).", "root", "=", "self", ".", "repo", "[", "self", ".", "id", "]", "# Get length and last item in the root array.", "apex_id", ",", "apex_height", "=", "root", ".", "get_last_item_and_next_position", "(", ")", "# Bail if there isn't anything yet.", "if", "apex_id", "is", "None", ":", "return", "None", ",", "None", "# Get the current apex array.", "apex", "=", "self", ".", "repo", "[", "apex_id", "]", "assert", "isinstance", "(", "apex", ",", "Array", ")", "# Descend until hitting the bottom.", "array", "=", "apex", "array_i", "=", "0", "height", "=", "apex_height", "while", "height", ">", "1", ":", "height", "-=", "1", "array_id", ",", "width", "=", "array", ".", "get_last_item_and_next_position", "(", ")", "assert", "width", ">", "0", "offset", "=", "width", "-", "1", "array_i", "+=", "offset", "*", "self", ".", "repo", ".", "array_size", "**", "height", "array", "=", "self", ".", "repo", "[", "array_id", "]", "return", "array", ",", "array_i" ]
Returns last array in compound. :rtype: CompoundSequenceReader
[ "Returns", "last", "array", "in", "compound", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/domain/model/array.py#L213-L245
233,842
johnbywater/eventsourcing
eventsourcing/domain/model/array.py
BigArray.calc_parent
def calc_parent(self, i, j, h): """ Returns get_big_array and end of span of parent sequence that contains given child. """ N = self.repo.array_size c_i = i c_j = j c_h = h # Calculate the number of the sequence in its row (sequences # with same height), from left to right, starting from 0. c_n = c_i // (N ** c_h) p_n = c_n // N # Position of the child ID in the parent array. p_p = c_n % N # Parent height is child height plus one. p_h = c_h + 1 # Span of sequences in parent row is max size N, to the power of the height. span = N ** p_h # Calculate parent i and j. p_i = p_n * span p_j = p_i + span # Check the parent i,j bounds the child i,j, ie child span is contained by parent span. assert p_i <= c_i, 'i greater on parent than child: {}'.format(p_i, p_j) assert p_j >= c_j, 'j less on parent than child: {}'.format(p_i, p_j) # Return parent i, j, h, p. return p_i, p_j, p_h, p_p
python
def calc_parent(self, i, j, h): """ Returns get_big_array and end of span of parent sequence that contains given child. """ N = self.repo.array_size c_i = i c_j = j c_h = h # Calculate the number of the sequence in its row (sequences # with same height), from left to right, starting from 0. c_n = c_i // (N ** c_h) p_n = c_n // N # Position of the child ID in the parent array. p_p = c_n % N # Parent height is child height plus one. p_h = c_h + 1 # Span of sequences in parent row is max size N, to the power of the height. span = N ** p_h # Calculate parent i and j. p_i = p_n * span p_j = p_i + span # Check the parent i,j bounds the child i,j, ie child span is contained by parent span. assert p_i <= c_i, 'i greater on parent than child: {}'.format(p_i, p_j) assert p_j >= c_j, 'j less on parent than child: {}'.format(p_i, p_j) # Return parent i, j, h, p. return p_i, p_j, p_h, p_p
[ "def", "calc_parent", "(", "self", ",", "i", ",", "j", ",", "h", ")", ":", "N", "=", "self", ".", "repo", ".", "array_size", "c_i", "=", "i", "c_j", "=", "j", "c_h", "=", "h", "# Calculate the number of the sequence in its row (sequences", "# with same height), from left to right, starting from 0.", "c_n", "=", "c_i", "//", "(", "N", "**", "c_h", ")", "p_n", "=", "c_n", "//", "N", "# Position of the child ID in the parent array.", "p_p", "=", "c_n", "%", "N", "# Parent height is child height plus one.", "p_h", "=", "c_h", "+", "1", "# Span of sequences in parent row is max size N, to the power of the height.", "span", "=", "N", "**", "p_h", "# Calculate parent i and j.", "p_i", "=", "p_n", "*", "span", "p_j", "=", "p_i", "+", "span", "# Check the parent i,j bounds the child i,j, ie child span is contained by parent span.", "assert", "p_i", "<=", "c_i", ",", "'i greater on parent than child: {}'", ".", "format", "(", "p_i", ",", "p_j", ")", "assert", "p_j", ">=", "c_j", ",", "'j less on parent than child: {}'", ".", "format", "(", "p_i", ",", "p_j", ")", "# Return parent i, j, h, p.", "return", "p_i", ",", "p_j", ",", "p_h", ",", "p_p" ]
Returns get_big_array and end of span of parent sequence that contains given child.
[ "Returns", "get_big_array", "and", "end", "of", "span", "of", "parent", "sequence", "that", "contains", "given", "child", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/domain/model/array.py#L372-L397
233,843
johnbywater/eventsourcing
eventsourcing/infrastructure/sequenceditemmapper.py
SequencedItemMapper.item_from_event
def item_from_event(self, domain_event): """ Constructs a sequenced item from a domain event. """ item_args = self.construct_item_args(domain_event) return self.construct_sequenced_item(item_args)
python
def item_from_event(self, domain_event): """ Constructs a sequenced item from a domain event. """ item_args = self.construct_item_args(domain_event) return self.construct_sequenced_item(item_args)
[ "def", "item_from_event", "(", "self", ",", "domain_event", ")", ":", "item_args", "=", "self", ".", "construct_item_args", "(", "domain_event", ")", "return", "self", ".", "construct_sequenced_item", "(", "item_args", ")" ]
Constructs a sequenced item from a domain event.
[ "Constructs", "a", "sequenced", "item", "from", "a", "domain", "event", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/sequenceditemmapper.py#L41-L46
233,844
johnbywater/eventsourcing
eventsourcing/infrastructure/sequenceditemmapper.py
SequencedItemMapper.construct_item_args
def construct_item_args(self, domain_event): """ Constructs attributes of a sequenced item from the given domain event. """ # Get the sequence ID. sequence_id = domain_event.__dict__[self.sequence_id_attr_name] # Get the position in the sequence. position = getattr(domain_event, self.position_attr_name, None) # Get topic and data. topic, state = self.get_item_topic_and_state( domain_event.__class__, domain_event.__dict__ ) # Get the 'other' args. # - these are meant to be derivative of the other attributes, # to populate database fields, and shouldn't affect the hash. other_args = tuple((getattr(domain_event, name) for name in self.other_attr_names)) return (sequence_id, position, topic, state) + other_args
python
def construct_item_args(self, domain_event): """ Constructs attributes of a sequenced item from the given domain event. """ # Get the sequence ID. sequence_id = domain_event.__dict__[self.sequence_id_attr_name] # Get the position in the sequence. position = getattr(domain_event, self.position_attr_name, None) # Get topic and data. topic, state = self.get_item_topic_and_state( domain_event.__class__, domain_event.__dict__ ) # Get the 'other' args. # - these are meant to be derivative of the other attributes, # to populate database fields, and shouldn't affect the hash. other_args = tuple((getattr(domain_event, name) for name in self.other_attr_names)) return (sequence_id, position, topic, state) + other_args
[ "def", "construct_item_args", "(", "self", ",", "domain_event", ")", ":", "# Get the sequence ID.", "sequence_id", "=", "domain_event", ".", "__dict__", "[", "self", ".", "sequence_id_attr_name", "]", "# Get the position in the sequence.", "position", "=", "getattr", "(", "domain_event", ",", "self", ".", "position_attr_name", ",", "None", ")", "# Get topic and data.", "topic", ",", "state", "=", "self", ".", "get_item_topic_and_state", "(", "domain_event", ".", "__class__", ",", "domain_event", ".", "__dict__", ")", "# Get the 'other' args.", "# - these are meant to be derivative of the other attributes,", "# to populate database fields, and shouldn't affect the hash.", "other_args", "=", "tuple", "(", "(", "getattr", "(", "domain_event", ",", "name", ")", "for", "name", "in", "self", ".", "other_attr_names", ")", ")", "return", "(", "sequence_id", ",", "position", ",", "topic", ",", "state", ")", "+", "other_args" ]
Constructs attributes of a sequenced item from the given domain event.
[ "Constructs", "attributes", "of", "a", "sequenced", "item", "from", "the", "given", "domain", "event", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/sequenceditemmapper.py#L48-L69
233,845
johnbywater/eventsourcing
eventsourcing/infrastructure/sequenceditemmapper.py
SequencedItemMapper.event_from_item
def event_from_item(self, sequenced_item): """ Reconstructs domain event from stored event topic and event attrs. Used in the event store when getting domain events. """ assert isinstance(sequenced_item, self.sequenced_item_class), ( self.sequenced_item_class, type(sequenced_item) ) # Get the topic and state. topic = getattr(sequenced_item, self.field_names.topic) state = getattr(sequenced_item, self.field_names.state) return self.event_from_topic_and_state(topic, state)
python
def event_from_item(self, sequenced_item): """ Reconstructs domain event from stored event topic and event attrs. Used in the event store when getting domain events. """ assert isinstance(sequenced_item, self.sequenced_item_class), ( self.sequenced_item_class, type(sequenced_item) ) # Get the topic and state. topic = getattr(sequenced_item, self.field_names.topic) state = getattr(sequenced_item, self.field_names.state) return self.event_from_topic_and_state(topic, state)
[ "def", "event_from_item", "(", "self", ",", "sequenced_item", ")", ":", "assert", "isinstance", "(", "sequenced_item", ",", "self", ".", "sequenced_item_class", ")", ",", "(", "self", ".", "sequenced_item_class", ",", "type", "(", "sequenced_item", ")", ")", "# Get the topic and state.", "topic", "=", "getattr", "(", "sequenced_item", ",", "self", ".", "field_names", ".", "topic", ")", "state", "=", "getattr", "(", "sequenced_item", ",", "self", ".", "field_names", ".", "state", ")", "return", "self", ".", "event_from_topic_and_state", "(", "topic", ",", "state", ")" ]
Reconstructs domain event from stored event topic and event attrs. Used in the event store when getting domain events.
[ "Reconstructs", "domain", "event", "from", "stored", "event", "topic", "and", "event", "attrs", ".", "Used", "in", "the", "event", "store", "when", "getting", "domain", "events", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/sequenceditemmapper.py#L87-L100
233,846
johnbywater/eventsourcing
eventsourcing/infrastructure/base.py
AbstractSequencedItemRecordManager.get_item
def get_item(self, sequence_id, position): """ Gets sequenced item from the datastore. """ return self.from_record(self.get_record(sequence_id, position))
python
def get_item(self, sequence_id, position): """ Gets sequenced item from the datastore. """ return self.from_record(self.get_record(sequence_id, position))
[ "def", "get_item", "(", "self", ",", "sequence_id", ",", "position", ")", ":", "return", "self", ".", "from_record", "(", "self", ".", "get_record", "(", "sequence_id", ",", "position", ")", ")" ]
Gets sequenced item from the datastore.
[ "Gets", "sequenced", "item", "from", "the", "datastore", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/base.py#L55-L59
233,847
johnbywater/eventsourcing
eventsourcing/infrastructure/base.py
AbstractSequencedItemRecordManager.get_items
def get_items(self, sequence_id, gt=None, gte=None, lt=None, lte=None, limit=None, query_ascending=True, results_ascending=True): """ Returns sequenced item generator. """ records = self.get_records( sequence_id=sequence_id, gt=gt, gte=gte, lt=lt, lte=lte, limit=limit, query_ascending=query_ascending, results_ascending=results_ascending, ) for item in map(self.from_record, records): yield item
python
def get_items(self, sequence_id, gt=None, gte=None, lt=None, lte=None, limit=None, query_ascending=True, results_ascending=True): """ Returns sequenced item generator. """ records = self.get_records( sequence_id=sequence_id, gt=gt, gte=gte, lt=lt, lte=lte, limit=limit, query_ascending=query_ascending, results_ascending=results_ascending, ) for item in map(self.from_record, records): yield item
[ "def", "get_items", "(", "self", ",", "sequence_id", ",", "gt", "=", "None", ",", "gte", "=", "None", ",", "lt", "=", "None", ",", "lte", "=", "None", ",", "limit", "=", "None", ",", "query_ascending", "=", "True", ",", "results_ascending", "=", "True", ")", ":", "records", "=", "self", ".", "get_records", "(", "sequence_id", "=", "sequence_id", ",", "gt", "=", "gt", ",", "gte", "=", "gte", ",", "lt", "=", "lt", ",", "lte", "=", "lte", ",", "limit", "=", "limit", ",", "query_ascending", "=", "query_ascending", ",", "results_ascending", "=", "results_ascending", ",", ")", "for", "item", "in", "map", "(", "self", ".", "from_record", ",", "records", ")", ":", "yield", "item" ]
Returns sequenced item generator.
[ "Returns", "sequenced", "item", "generator", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/base.py#L67-L84
233,848
johnbywater/eventsourcing
eventsourcing/infrastructure/base.py
AbstractSequencedItemRecordManager.to_record
def to_record(self, sequenced_item): """ Constructs a record object from given sequenced item object. """ kwargs = self.get_field_kwargs(sequenced_item) # Supply application_name, if needed. if hasattr(self.record_class, 'application_name'): kwargs['application_name'] = self.application_name # Supply pipeline_id, if needed. if hasattr(self.record_class, 'pipeline_id'): kwargs['pipeline_id'] = self.pipeline_id return self.record_class(**kwargs)
python
def to_record(self, sequenced_item): """ Constructs a record object from given sequenced item object. """ kwargs = self.get_field_kwargs(sequenced_item) # Supply application_name, if needed. if hasattr(self.record_class, 'application_name'): kwargs['application_name'] = self.application_name # Supply pipeline_id, if needed. if hasattr(self.record_class, 'pipeline_id'): kwargs['pipeline_id'] = self.pipeline_id return self.record_class(**kwargs)
[ "def", "to_record", "(", "self", ",", "sequenced_item", ")", ":", "kwargs", "=", "self", ".", "get_field_kwargs", "(", "sequenced_item", ")", "# Supply application_name, if needed.", "if", "hasattr", "(", "self", ".", "record_class", ",", "'application_name'", ")", ":", "kwargs", "[", "'application_name'", "]", "=", "self", ".", "application_name", "# Supply pipeline_id, if needed.", "if", "hasattr", "(", "self", ".", "record_class", ",", "'pipeline_id'", ")", ":", "kwargs", "[", "'pipeline_id'", "]", "=", "self", ".", "pipeline_id", "return", "self", ".", "record_class", "(", "*", "*", "kwargs", ")" ]
Constructs a record object from given sequenced item object.
[ "Constructs", "a", "record", "object", "from", "given", "sequenced", "item", "object", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/base.py#L99-L110
233,849
johnbywater/eventsourcing
eventsourcing/infrastructure/base.py
AbstractSequencedItemRecordManager.from_record
def from_record(self, record): """ Constructs and returns a sequenced item object, from given ORM object. """ kwargs = self.get_field_kwargs(record) return self.sequenced_item_class(**kwargs)
python
def from_record(self, record): """ Constructs and returns a sequenced item object, from given ORM object. """ kwargs = self.get_field_kwargs(record) return self.sequenced_item_class(**kwargs)
[ "def", "from_record", "(", "self", ",", "record", ")", ":", "kwargs", "=", "self", ".", "get_field_kwargs", "(", "record", ")", "return", "self", ".", "sequenced_item_class", "(", "*", "*", "kwargs", ")" ]
Constructs and returns a sequenced item object, from given ORM object.
[ "Constructs", "and", "returns", "a", "sequenced", "item", "object", "from", "given", "ORM", "object", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/base.py#L112-L117
233,850
johnbywater/eventsourcing
eventsourcing/infrastructure/base.py
ACIDRecordManager.get_pipeline_and_notification_id
def get_pipeline_and_notification_id(self, sequence_id, position): """ Returns pipeline ID and notification ID for event at given position in given sequence. """ # Todo: Optimise query by selecting only two columns, pipeline_id and id (notification ID)? record = self.get_record(sequence_id, position) notification_id = getattr(record, self.notification_id_name) return record.pipeline_id, notification_id
python
def get_pipeline_and_notification_id(self, sequence_id, position): """ Returns pipeline ID and notification ID for event at given position in given sequence. """ # Todo: Optimise query by selecting only two columns, pipeline_id and id (notification ID)? record = self.get_record(sequence_id, position) notification_id = getattr(record, self.notification_id_name) return record.pipeline_id, notification_id
[ "def", "get_pipeline_and_notification_id", "(", "self", ",", "sequence_id", ",", "position", ")", ":", "# Todo: Optimise query by selecting only two columns, pipeline_id and id (notification ID)?", "record", "=", "self", ".", "get_record", "(", "sequence_id", ",", "position", ")", "notification_id", "=", "getattr", "(", "record", ",", "self", ".", "notification_id_name", ")", "return", "record", ".", "pipeline_id", ",", "notification_id" ]
Returns pipeline ID and notification ID for event at given position in given sequence.
[ "Returns", "pipeline", "ID", "and", "notification", "ID", "for", "event", "at", "given", "position", "in", "given", "sequence", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/base.py#L194-L202
233,851
johnbywater/eventsourcing
eventsourcing/infrastructure/base.py
SQLRecordManager.insert_select_max
def insert_select_max(self): """ SQL statement that inserts records with contiguous IDs, by selecting max ID from indexed table records. """ if self._insert_select_max is None: if hasattr(self.record_class, 'application_name'): # Todo: Maybe make it support application_name without pipeline_id? assert hasattr(self.record_class, 'pipeline_id'), self.record_class tmpl = self._insert_select_max_tmpl + self._where_application_name_tmpl else: tmpl = self._insert_select_max_tmpl self._insert_select_max = self._prepare_insert( tmpl=tmpl, record_class=self.record_class, field_names=list(self.field_names), ) return self._insert_select_max
python
def insert_select_max(self): """ SQL statement that inserts records with contiguous IDs, by selecting max ID from indexed table records. """ if self._insert_select_max is None: if hasattr(self.record_class, 'application_name'): # Todo: Maybe make it support application_name without pipeline_id? assert hasattr(self.record_class, 'pipeline_id'), self.record_class tmpl = self._insert_select_max_tmpl + self._where_application_name_tmpl else: tmpl = self._insert_select_max_tmpl self._insert_select_max = self._prepare_insert( tmpl=tmpl, record_class=self.record_class, field_names=list(self.field_names), ) return self._insert_select_max
[ "def", "insert_select_max", "(", "self", ")", ":", "if", "self", ".", "_insert_select_max", "is", "None", ":", "if", "hasattr", "(", "self", ".", "record_class", ",", "'application_name'", ")", ":", "# Todo: Maybe make it support application_name without pipeline_id?", "assert", "hasattr", "(", "self", ".", "record_class", ",", "'pipeline_id'", ")", ",", "self", ".", "record_class", "tmpl", "=", "self", ".", "_insert_select_max_tmpl", "+", "self", ".", "_where_application_name_tmpl", "else", ":", "tmpl", "=", "self", ".", "_insert_select_max_tmpl", "self", ".", "_insert_select_max", "=", "self", ".", "_prepare_insert", "(", "tmpl", "=", "tmpl", ",", "record_class", "=", "self", ".", "record_class", ",", "field_names", "=", "list", "(", "self", ".", "field_names", ")", ",", ")", "return", "self", ".", "_insert_select_max" ]
SQL statement that inserts records with contiguous IDs, by selecting max ID from indexed table records.
[ "SQL", "statement", "that", "inserts", "records", "with", "contiguous", "IDs", "by", "selecting", "max", "ID", "from", "indexed", "table", "records", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/base.py#L236-L253
233,852
johnbywater/eventsourcing
eventsourcing/infrastructure/base.py
SQLRecordManager.insert_values
def insert_values(self): """ SQL statement that inserts records without ID. """ if self._insert_values is None: self._insert_values = self._prepare_insert( tmpl=self._insert_values_tmpl, placeholder_for_id=True, record_class=self.record_class, field_names=self.field_names, ) return self._insert_values
python
def insert_values(self): """ SQL statement that inserts records without ID. """ if self._insert_values is None: self._insert_values = self._prepare_insert( tmpl=self._insert_values_tmpl, placeholder_for_id=True, record_class=self.record_class, field_names=self.field_names, ) return self._insert_values
[ "def", "insert_values", "(", "self", ")", ":", "if", "self", ".", "_insert_values", "is", "None", ":", "self", ".", "_insert_values", "=", "self", ".", "_prepare_insert", "(", "tmpl", "=", "self", ".", "_insert_values_tmpl", ",", "placeholder_for_id", "=", "True", ",", "record_class", "=", "self", ".", "record_class", ",", "field_names", "=", "self", ".", "field_names", ",", ")", "return", "self", ".", "_insert_values" ]
SQL statement that inserts records without ID.
[ "SQL", "statement", "that", "inserts", "records", "without", "ID", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/base.py#L270-L281
233,853
johnbywater/eventsourcing
eventsourcing/infrastructure/base.py
SQLRecordManager.insert_tracking_record
def insert_tracking_record(self): """ SQL statement that inserts tracking records. """ if self._insert_tracking_record is None: self._insert_tracking_record = self._prepare_insert( tmpl=self._insert_values_tmpl, placeholder_for_id=True, record_class=self.tracking_record_class, field_names=self.tracking_record_field_names, ) return self._insert_tracking_record
python
def insert_tracking_record(self): """ SQL statement that inserts tracking records. """ if self._insert_tracking_record is None: self._insert_tracking_record = self._prepare_insert( tmpl=self._insert_values_tmpl, placeholder_for_id=True, record_class=self.tracking_record_class, field_names=self.tracking_record_field_names, ) return self._insert_tracking_record
[ "def", "insert_tracking_record", "(", "self", ")", ":", "if", "self", ".", "_insert_tracking_record", "is", "None", ":", "self", ".", "_insert_tracking_record", "=", "self", ".", "_prepare_insert", "(", "tmpl", "=", "self", ".", "_insert_values_tmpl", ",", "placeholder_for_id", "=", "True", ",", "record_class", "=", "self", ".", "tracking_record_class", ",", "field_names", "=", "self", ".", "tracking_record_field_names", ",", ")", "return", "self", ".", "_insert_tracking_record" ]
SQL statement that inserts tracking records.
[ "SQL", "statement", "that", "inserts", "tracking", "records", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/infrastructure/base.py#L284-L295
233,854
johnbywater/eventsourcing
eventsourcing/contrib/paxos/application.py
PaxosAggregate.start
def start(cls, originator_id, quorum_size, network_uid): """ Factory method that returns a new Paxos aggregate. """ assert isinstance(quorum_size, int), "Not an integer: {}".format(quorum_size) return cls.__create__( event_class=cls.Started, originator_id=originator_id, quorum_size=quorum_size, network_uid=network_uid )
python
def start(cls, originator_id, quorum_size, network_uid): """ Factory method that returns a new Paxos aggregate. """ assert isinstance(quorum_size, int), "Not an integer: {}".format(quorum_size) return cls.__create__( event_class=cls.Started, originator_id=originator_id, quorum_size=quorum_size, network_uid=network_uid )
[ "def", "start", "(", "cls", ",", "originator_id", ",", "quorum_size", ",", "network_uid", ")", ":", "assert", "isinstance", "(", "quorum_size", ",", "int", ")", ",", "\"Not an integer: {}\"", ".", "format", "(", "quorum_size", ")", "return", "cls", ".", "__create__", "(", "event_class", "=", "cls", ".", "Started", ",", "originator_id", "=", "originator_id", ",", "quorum_size", "=", "quorum_size", ",", "network_uid", "=", "network_uid", ")" ]
Factory method that returns a new Paxos aggregate.
[ "Factory", "method", "that", "returns", "a", "new", "Paxos", "aggregate", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/contrib/paxos/application.py#L105-L115
233,855
johnbywater/eventsourcing
eventsourcing/contrib/paxos/application.py
PaxosAggregate.propose_value
def propose_value(self, value, assume_leader=False): """ Proposes a value to the network. """ if value is None: raise ValueError("Not allowed to propose value None") paxos = self.paxos_instance paxos.leader = assume_leader msg = paxos.propose_value(value) if msg is None: msg = paxos.prepare() self.setattrs_from_paxos(paxos) self.announce(msg) return msg
python
def propose_value(self, value, assume_leader=False): """ Proposes a value to the network. """ if value is None: raise ValueError("Not allowed to propose value None") paxos = self.paxos_instance paxos.leader = assume_leader msg = paxos.propose_value(value) if msg is None: msg = paxos.prepare() self.setattrs_from_paxos(paxos) self.announce(msg) return msg
[ "def", "propose_value", "(", "self", ",", "value", ",", "assume_leader", "=", "False", ")", ":", "if", "value", "is", "None", ":", "raise", "ValueError", "(", "\"Not allowed to propose value None\"", ")", "paxos", "=", "self", ".", "paxos_instance", "paxos", ".", "leader", "=", "assume_leader", "msg", "=", "paxos", ".", "propose_value", "(", "value", ")", "if", "msg", "is", "None", ":", "msg", "=", "paxos", ".", "prepare", "(", ")", "self", ".", "setattrs_from_paxos", "(", "paxos", ")", "self", ".", "announce", "(", "msg", ")", "return", "msg" ]
Proposes a value to the network.
[ "Proposes", "a", "value", "to", "the", "network", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/contrib/paxos/application.py#L117-L130
233,856
johnbywater/eventsourcing
eventsourcing/contrib/paxos/application.py
PaxosAggregate.receive_message
def receive_message(self, msg): """ Responds to messages from other participants. """ if isinstance(msg, Resolution): return paxos = self.paxos_instance while msg: if isinstance(msg, Resolution): self.print_if_verbose("{} resolved value {}".format(self.network_uid, msg.value)) break else: self.print_if_verbose("{} <- {} <- {}".format(self.network_uid, msg.__class__.__name__, msg.from_uid)) msg = paxos.receive(msg) # Todo: Make it optional not to announce resolution (without which it's hard to see final value). do_announce_resolution = True if msg and (do_announce_resolution or not isinstance(msg, Resolution)): self.announce(msg) self.setattrs_from_paxos(paxos)
python
def receive_message(self, msg): """ Responds to messages from other participants. """ if isinstance(msg, Resolution): return paxos = self.paxos_instance while msg: if isinstance(msg, Resolution): self.print_if_verbose("{} resolved value {}".format(self.network_uid, msg.value)) break else: self.print_if_verbose("{} <- {} <- {}".format(self.network_uid, msg.__class__.__name__, msg.from_uid)) msg = paxos.receive(msg) # Todo: Make it optional not to announce resolution (without which it's hard to see final value). do_announce_resolution = True if msg and (do_announce_resolution or not isinstance(msg, Resolution)): self.announce(msg) self.setattrs_from_paxos(paxos)
[ "def", "receive_message", "(", "self", ",", "msg", ")", ":", "if", "isinstance", "(", "msg", ",", "Resolution", ")", ":", "return", "paxos", "=", "self", ".", "paxos_instance", "while", "msg", ":", "if", "isinstance", "(", "msg", ",", "Resolution", ")", ":", "self", ".", "print_if_verbose", "(", "\"{} resolved value {}\"", ".", "format", "(", "self", ".", "network_uid", ",", "msg", ".", "value", ")", ")", "break", "else", ":", "self", ".", "print_if_verbose", "(", "\"{} <- {} <- {}\"", ".", "format", "(", "self", ".", "network_uid", ",", "msg", ".", "__class__", ".", "__name__", ",", "msg", ".", "from_uid", ")", ")", "msg", "=", "paxos", ".", "receive", "(", "msg", ")", "# Todo: Make it optional not to announce resolution (without which it's hard to see final value).", "do_announce_resolution", "=", "True", "if", "msg", "and", "(", "do_announce_resolution", "or", "not", "isinstance", "(", "msg", ",", "Resolution", ")", ")", ":", "self", ".", "announce", "(", "msg", ")", "self", ".", "setattrs_from_paxos", "(", "paxos", ")" ]
Responds to messages from other participants.
[ "Responds", "to", "messages", "from", "other", "participants", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/contrib/paxos/application.py#L132-L151
233,857
johnbywater/eventsourcing
eventsourcing/contrib/paxos/application.py
PaxosAggregate.announce
def announce(self, msg): """ Announces a Paxos message. """ self.print_if_verbose("{} -> {}".format(self.network_uid, msg.__class__.__name__)) self.__trigger_event__( event_class=self.MessageAnnounced, msg=msg, )
python
def announce(self, msg): """ Announces a Paxos message. """ self.print_if_verbose("{} -> {}".format(self.network_uid, msg.__class__.__name__)) self.__trigger_event__( event_class=self.MessageAnnounced, msg=msg, )
[ "def", "announce", "(", "self", ",", "msg", ")", ":", "self", ".", "print_if_verbose", "(", "\"{} -> {}\"", ".", "format", "(", "self", ".", "network_uid", ",", "msg", ".", "__class__", ".", "__name__", ")", ")", "self", ".", "__trigger_event__", "(", "event_class", "=", "self", ".", "MessageAnnounced", ",", "msg", "=", "msg", ",", ")" ]
Announces a Paxos message.
[ "Announces", "a", "Paxos", "message", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/contrib/paxos/application.py#L153-L161
233,858
johnbywater/eventsourcing
eventsourcing/contrib/paxos/application.py
PaxosAggregate.setattrs_from_paxos
def setattrs_from_paxos(self, paxos): """ Registers changes of attribute value on Paxos instance. """ changes = {} for name in self.paxos_variables: paxos_value = getattr(paxos, name) if paxos_value != getattr(self, name, None): self.print_if_verbose("{} {}: {}".format(self.network_uid, name, paxos_value)) changes[name] = paxos_value setattr(self, name, paxos_value) if changes: self.__trigger_event__( event_class=self.AttributesChanged, changes=changes )
python
def setattrs_from_paxos(self, paxos): """ Registers changes of attribute value on Paxos instance. """ changes = {} for name in self.paxos_variables: paxos_value = getattr(paxos, name) if paxos_value != getattr(self, name, None): self.print_if_verbose("{} {}: {}".format(self.network_uid, name, paxos_value)) changes[name] = paxos_value setattr(self, name, paxos_value) if changes: self.__trigger_event__( event_class=self.AttributesChanged, changes=changes )
[ "def", "setattrs_from_paxos", "(", "self", ",", "paxos", ")", ":", "changes", "=", "{", "}", "for", "name", "in", "self", ".", "paxos_variables", ":", "paxos_value", "=", "getattr", "(", "paxos", ",", "name", ")", "if", "paxos_value", "!=", "getattr", "(", "self", ",", "name", ",", "None", ")", ":", "self", ".", "print_if_verbose", "(", "\"{} {}: {}\"", ".", "format", "(", "self", ".", "network_uid", ",", "name", ",", "paxos_value", ")", ")", "changes", "[", "name", "]", "=", "paxos_value", "setattr", "(", "self", ",", "name", ",", "paxos_value", ")", "if", "changes", ":", "self", ".", "__trigger_event__", "(", "event_class", "=", "self", ".", "AttributesChanged", ",", "changes", "=", "changes", ")" ]
Registers changes of attribute value on Paxos instance.
[ "Registers", "changes", "of", "attribute", "value", "on", "Paxos", "instance", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/contrib/paxos/application.py#L163-L178
233,859
johnbywater/eventsourcing
eventsourcing/contrib/paxos/application.py
PaxosProcess.propose_value
def propose_value(self, key, value, assume_leader=False): """ Starts new Paxos aggregate and proposes a value for a key. Decorated with retry in case of notification log conflict or operational error. """ assert isinstance(key, UUID) paxos_aggregate = PaxosAggregate.start( originator_id=key, quorum_size=self.quorum_size, network_uid=self.name ) msg = paxos_aggregate.propose_value(value, assume_leader=assume_leader) while msg: msg = paxos_aggregate.receive_message(msg) new_events = paxos_aggregate.__batch_pending_events__() self.record_process_event(ProcessEvent(new_events)) self.repository.take_snapshot(paxos_aggregate.id) self.publish_prompt() return paxos_aggregate
python
def propose_value(self, key, value, assume_leader=False): """ Starts new Paxos aggregate and proposes a value for a key. Decorated with retry in case of notification log conflict or operational error. """ assert isinstance(key, UUID) paxos_aggregate = PaxosAggregate.start( originator_id=key, quorum_size=self.quorum_size, network_uid=self.name ) msg = paxos_aggregate.propose_value(value, assume_leader=assume_leader) while msg: msg = paxos_aggregate.receive_message(msg) new_events = paxos_aggregate.__batch_pending_events__() self.record_process_event(ProcessEvent(new_events)) self.repository.take_snapshot(paxos_aggregate.id) self.publish_prompt() return paxos_aggregate
[ "def", "propose_value", "(", "self", ",", "key", ",", "value", ",", "assume_leader", "=", "False", ")", ":", "assert", "isinstance", "(", "key", ",", "UUID", ")", "paxos_aggregate", "=", "PaxosAggregate", ".", "start", "(", "originator_id", "=", "key", ",", "quorum_size", "=", "self", ".", "quorum_size", ",", "network_uid", "=", "self", ".", "name", ")", "msg", "=", "paxos_aggregate", ".", "propose_value", "(", "value", ",", "assume_leader", "=", "assume_leader", ")", "while", "msg", ":", "msg", "=", "paxos_aggregate", ".", "receive_message", "(", "msg", ")", "new_events", "=", "paxos_aggregate", ".", "__batch_pending_events__", "(", ")", "self", ".", "record_process_event", "(", "ProcessEvent", "(", "new_events", ")", ")", "self", ".", "repository", ".", "take_snapshot", "(", "paxos_aggregate", ".", "id", ")", "self", ".", "publish_prompt", "(", ")", "return", "paxos_aggregate" ]
Starts new Paxos aggregate and proposes a value for a key. Decorated with retry in case of notification log conflict or operational error.
[ "Starts", "new", "Paxos", "aggregate", "and", "proposes", "a", "value", "for", "a", "key", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/contrib/paxos/application.py#L203-L223
233,860
johnbywater/eventsourcing
eventsourcing/contrib/paxos/composable.py
MessageHandler.receive
def receive(self, msg): ''' Message dispatching function. This function accepts any PaxosMessage subclass and calls the appropriate handler function ''' handler = getattr(self, 'receive_' + msg.__class__.__name__.lower(), None) if handler is None: raise InvalidMessageError('Receiving class does not support messages of type: ' + msg.__class__.__name__) return handler(msg)
python
def receive(self, msg): ''' Message dispatching function. This function accepts any PaxosMessage subclass and calls the appropriate handler function ''' handler = getattr(self, 'receive_' + msg.__class__.__name__.lower(), None) if handler is None: raise InvalidMessageError('Receiving class does not support messages of type: ' + msg.__class__.__name__) return handler(msg)
[ "def", "receive", "(", "self", ",", "msg", ")", ":", "handler", "=", "getattr", "(", "self", ",", "'receive_'", "+", "msg", ".", "__class__", ".", "__name__", ".", "lower", "(", ")", ",", "None", ")", "if", "handler", "is", "None", ":", "raise", "InvalidMessageError", "(", "'Receiving class does not support messages of type: '", "+", "msg", ".", "__class__", ".", "__name__", ")", "return", "handler", "(", "msg", ")" ]
Message dispatching function. This function accepts any PaxosMessage subclass and calls the appropriate handler function
[ "Message", "dispatching", "function", ".", "This", "function", "accepts", "any", "PaxosMessage", "subclass", "and", "calls", "the", "appropriate", "handler", "function" ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/contrib/paxos/composable.py#L176-L184
233,861
johnbywater/eventsourcing
eventsourcing/contrib/paxos/composable.py
Proposer.propose_value
def propose_value(self, value): ''' Sets the proposal value for this node iff this node is not already aware of a previous proposal value. If the node additionally believes itself to be the current leader, an Accept message will be returned ''' if self.proposed_value is None: self.proposed_value = value if self.leader: self.current_accept_msg = Accept(self.network_uid, self.proposal_id, value) return self.current_accept_msg
python
def propose_value(self, value): ''' Sets the proposal value for this node iff this node is not already aware of a previous proposal value. If the node additionally believes itself to be the current leader, an Accept message will be returned ''' if self.proposed_value is None: self.proposed_value = value if self.leader: self.current_accept_msg = Accept(self.network_uid, self.proposal_id, value) return self.current_accept_msg
[ "def", "propose_value", "(", "self", ",", "value", ")", ":", "if", "self", ".", "proposed_value", "is", "None", ":", "self", ".", "proposed_value", "=", "value", "if", "self", ".", "leader", ":", "self", ".", "current_accept_msg", "=", "Accept", "(", "self", ".", "network_uid", ",", "self", ".", "proposal_id", ",", "value", ")", "return", "self", ".", "current_accept_msg" ]
Sets the proposal value for this node iff this node is not already aware of a previous proposal value. If the node additionally believes itself to be the current leader, an Accept message will be returned
[ "Sets", "the", "proposal", "value", "for", "this", "node", "iff", "this", "node", "is", "not", "already", "aware", "of", "a", "previous", "proposal", "value", ".", "If", "the", "node", "additionally", "believes", "itself", "to", "be", "the", "current", "leader", "an", "Accept", "message", "will", "be", "returned" ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/contrib/paxos/composable.py#L210-L221
233,862
johnbywater/eventsourcing
eventsourcing/contrib/paxos/composable.py
Proposer.prepare
def prepare(self): ''' Returns a new Prepare message with a proposal id higher than that of any observed proposals. A side effect of this method is to clear the leader flag if it is currently set. ''' self.leader = False self.promises_received = set() self.nacks_received = set() self.proposal_id = ProposalID(self.highest_proposal_id.number + 1, self.network_uid) self.highest_proposal_id = self.proposal_id self.current_prepare_msg = Prepare(self.network_uid, self.proposal_id) return self.current_prepare_msg
python
def prepare(self): ''' Returns a new Prepare message with a proposal id higher than that of any observed proposals. A side effect of this method is to clear the leader flag if it is currently set. ''' self.leader = False self.promises_received = set() self.nacks_received = set() self.proposal_id = ProposalID(self.highest_proposal_id.number + 1, self.network_uid) self.highest_proposal_id = self.proposal_id self.current_prepare_msg = Prepare(self.network_uid, self.proposal_id) return self.current_prepare_msg
[ "def", "prepare", "(", "self", ")", ":", "self", ".", "leader", "=", "False", "self", ".", "promises_received", "=", "set", "(", ")", "self", ".", "nacks_received", "=", "set", "(", ")", "self", ".", "proposal_id", "=", "ProposalID", "(", "self", ".", "highest_proposal_id", ".", "number", "+", "1", ",", "self", ".", "network_uid", ")", "self", ".", "highest_proposal_id", "=", "self", ".", "proposal_id", "self", ".", "current_prepare_msg", "=", "Prepare", "(", "self", ".", "network_uid", ",", "self", ".", "proposal_id", ")", "return", "self", ".", "current_prepare_msg" ]
Returns a new Prepare message with a proposal id higher than that of any observed proposals. A side effect of this method is to clear the leader flag if it is currently set.
[ "Returns", "a", "new", "Prepare", "message", "with", "a", "proposal", "id", "higher", "than", "that", "of", "any", "observed", "proposals", ".", "A", "side", "effect", "of", "this", "method", "is", "to", "clear", "the", "leader", "flag", "if", "it", "is", "currently", "set", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/contrib/paxos/composable.py#L223-L237
233,863
johnbywater/eventsourcing
eventsourcing/contrib/paxos/composable.py
Proposer.receive_nack
def receive_nack(self, msg): ''' Returns a new Prepare message if the number of Nacks received reaches a quorum. ''' self.observe_proposal(msg.promised_proposal_id) if msg.proposal_id == self.proposal_id and self.nacks_received is not None: self.nacks_received.add(msg.from_uid) if len(self.nacks_received) == self.quorum_size: return self.prepare()
python
def receive_nack(self, msg): ''' Returns a new Prepare message if the number of Nacks received reaches a quorum. ''' self.observe_proposal(msg.promised_proposal_id) if msg.proposal_id == self.proposal_id and self.nacks_received is not None: self.nacks_received.add(msg.from_uid) if len(self.nacks_received) == self.quorum_size: return self.prepare()
[ "def", "receive_nack", "(", "self", ",", "msg", ")", ":", "self", ".", "observe_proposal", "(", "msg", ".", "promised_proposal_id", ")", "if", "msg", ".", "proposal_id", "==", "self", ".", "proposal_id", "and", "self", ".", "nacks_received", "is", "not", "None", ":", "self", ".", "nacks_received", ".", "add", "(", "msg", ".", "from_uid", ")", "if", "len", "(", "self", ".", "nacks_received", ")", "==", "self", ".", "quorum_size", ":", "return", "self", ".", "prepare", "(", ")" ]
Returns a new Prepare message if the number of Nacks received reaches a quorum.
[ "Returns", "a", "new", "Prepare", "message", "if", "the", "number", "of", "Nacks", "received", "reaches", "a", "quorum", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/contrib/paxos/composable.py#L251-L262
233,864
johnbywater/eventsourcing
eventsourcing/contrib/paxos/composable.py
Proposer.receive_promise
def receive_promise(self, msg): ''' Returns an Accept messages if a quorum of Promise messages is achieved ''' self.observe_proposal(msg.proposal_id) if not self.leader and msg.proposal_id == self.proposal_id and msg.from_uid not in self.promises_received: self.promises_received.add(msg.from_uid) if self.highest_accepted_id is None or msg.last_accepted_id > self.highest_accepted_id: self.highest_accepted_id = msg.last_accepted_id if msg.last_accepted_value is not None: self.proposed_value = msg.last_accepted_value if len(self.promises_received) == self.quorum_size: self.leader = True if self.proposed_value is not None: self.current_accept_msg = Accept(self.network_uid, self.proposal_id, self.proposed_value) return self.current_accept_msg
python
def receive_promise(self, msg): ''' Returns an Accept messages if a quorum of Promise messages is achieved ''' self.observe_proposal(msg.proposal_id) if not self.leader and msg.proposal_id == self.proposal_id and msg.from_uid not in self.promises_received: self.promises_received.add(msg.from_uid) if self.highest_accepted_id is None or msg.last_accepted_id > self.highest_accepted_id: self.highest_accepted_id = msg.last_accepted_id if msg.last_accepted_value is not None: self.proposed_value = msg.last_accepted_value if len(self.promises_received) == self.quorum_size: self.leader = True if self.proposed_value is not None: self.current_accept_msg = Accept(self.network_uid, self.proposal_id, self.proposed_value) return self.current_accept_msg
[ "def", "receive_promise", "(", "self", ",", "msg", ")", ":", "self", ".", "observe_proposal", "(", "msg", ".", "proposal_id", ")", "if", "not", "self", ".", "leader", "and", "msg", ".", "proposal_id", "==", "self", ".", "proposal_id", "and", "msg", ".", "from_uid", "not", "in", "self", ".", "promises_received", ":", "self", ".", "promises_received", ".", "add", "(", "msg", ".", "from_uid", ")", "if", "self", ".", "highest_accepted_id", "is", "None", "or", "msg", ".", "last_accepted_id", ">", "self", ".", "highest_accepted_id", ":", "self", ".", "highest_accepted_id", "=", "msg", ".", "last_accepted_id", "if", "msg", ".", "last_accepted_value", "is", "not", "None", ":", "self", ".", "proposed_value", "=", "msg", ".", "last_accepted_value", "if", "len", "(", "self", ".", "promises_received", ")", "==", "self", ".", "quorum_size", ":", "self", ".", "leader", "=", "True", "if", "self", ".", "proposed_value", "is", "not", "None", ":", "self", ".", "current_accept_msg", "=", "Accept", "(", "self", ".", "network_uid", ",", "self", ".", "proposal_id", ",", "self", ".", "proposed_value", ")", "return", "self", ".", "current_accept_msg" ]
Returns an Accept messages if a quorum of Promise messages is achieved
[ "Returns", "an", "Accept", "messages", "if", "a", "quorum", "of", "Promise", "messages", "is", "achieved" ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/contrib/paxos/composable.py#L264-L284
233,865
johnbywater/eventsourcing
eventsourcing/contrib/paxos/composable.py
Acceptor.receive_prepare
def receive_prepare(self, msg): ''' Returns either a Promise or a Nack in response. The Acceptor's state must be persisted to disk prior to transmitting the Promise message. ''' if self.promised_id is None or msg.proposal_id >= self.promised_id: self.promised_id = msg.proposal_id return Promise(self.network_uid, msg.from_uid, self.promised_id, self.accepted_id, self.accepted_value) else: return Nack(self.network_uid, msg.from_uid, msg.proposal_id, self.promised_id)
python
def receive_prepare(self, msg): ''' Returns either a Promise or a Nack in response. The Acceptor's state must be persisted to disk prior to transmitting the Promise message. ''' if self.promised_id is None or msg.proposal_id >= self.promised_id: self.promised_id = msg.proposal_id return Promise(self.network_uid, msg.from_uid, self.promised_id, self.accepted_id, self.accepted_value) else: return Nack(self.network_uid, msg.from_uid, msg.proposal_id, self.promised_id)
[ "def", "receive_prepare", "(", "self", ",", "msg", ")", ":", "if", "self", ".", "promised_id", "is", "None", "or", "msg", ".", "proposal_id", ">=", "self", ".", "promised_id", ":", "self", ".", "promised_id", "=", "msg", ".", "proposal_id", "return", "Promise", "(", "self", ".", "network_uid", ",", "msg", ".", "from_uid", ",", "self", ".", "promised_id", ",", "self", ".", "accepted_id", ",", "self", ".", "accepted_value", ")", "else", ":", "return", "Nack", "(", "self", ".", "network_uid", ",", "msg", ".", "from_uid", ",", "msg", ".", "proposal_id", ",", "self", ".", "promised_id", ")" ]
Returns either a Promise or a Nack in response. The Acceptor's state must be persisted to disk prior to transmitting the Promise message.
[ "Returns", "either", "a", "Promise", "or", "a", "Nack", "in", "response", ".", "The", "Acceptor", "s", "state", "must", "be", "persisted", "to", "disk", "prior", "to", "transmitting", "the", "Promise", "message", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/contrib/paxos/composable.py#L310-L319
233,866
johnbywater/eventsourcing
eventsourcing/contrib/paxos/composable.py
Acceptor.receive_accept
def receive_accept(self, msg): ''' Returns either an Accepted or Nack message in response. The Acceptor's state must be persisted to disk prior to transmitting the Accepted message. ''' if self.promised_id is None or msg.proposal_id >= self.promised_id: self.promised_id = msg.proposal_id self.accepted_id = msg.proposal_id self.accepted_value = msg.proposal_value return Accepted(self.network_uid, msg.proposal_id, msg.proposal_value) else: return Nack(self.network_uid, msg.from_uid, msg.proposal_id, self.promised_id)
python
def receive_accept(self, msg): ''' Returns either an Accepted or Nack message in response. The Acceptor's state must be persisted to disk prior to transmitting the Accepted message. ''' if self.promised_id is None or msg.proposal_id >= self.promised_id: self.promised_id = msg.proposal_id self.accepted_id = msg.proposal_id self.accepted_value = msg.proposal_value return Accepted(self.network_uid, msg.proposal_id, msg.proposal_value) else: return Nack(self.network_uid, msg.from_uid, msg.proposal_id, self.promised_id)
[ "def", "receive_accept", "(", "self", ",", "msg", ")", ":", "if", "self", ".", "promised_id", "is", "None", "or", "msg", ".", "proposal_id", ">=", "self", ".", "promised_id", ":", "self", ".", "promised_id", "=", "msg", ".", "proposal_id", "self", ".", "accepted_id", "=", "msg", ".", "proposal_id", "self", ".", "accepted_value", "=", "msg", ".", "proposal_value", "return", "Accepted", "(", "self", ".", "network_uid", ",", "msg", ".", "proposal_id", ",", "msg", ".", "proposal_value", ")", "else", ":", "return", "Nack", "(", "self", ".", "network_uid", ",", "msg", ".", "from_uid", ",", "msg", ".", "proposal_id", ",", "self", ".", "promised_id", ")" ]
Returns either an Accepted or Nack message in response. The Acceptor's state must be persisted to disk prior to transmitting the Accepted message.
[ "Returns", "either", "an", "Accepted", "or", "Nack", "message", "in", "response", ".", "The", "Acceptor", "s", "state", "must", "be", "persisted", "to", "disk", "prior", "to", "transmitting", "the", "Accepted", "message", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/contrib/paxos/composable.py#L321-L332
233,867
johnbywater/eventsourcing
eventsourcing/contrib/paxos/composable.py
Learner.receive_accepted
def receive_accepted(self, msg): ''' Called when an Accepted message is received from an acceptor. Once the final value is determined, the return value of this method will be a Resolution message containing the consentual value. Subsequent calls after the resolution is chosen will continue to add new Acceptors to the final_acceptors set and return Resolution messages. ''' if self.final_value is not None: if msg.proposal_id >= self.final_proposal_id and msg.proposal_value == self.final_value: self.final_acceptors.add(msg.from_uid) return Resolution(self.network_uid, self.final_value) last_pn = self.acceptors.get(msg.from_uid) if last_pn is not None and msg.proposal_id <= last_pn: return # Old message self.acceptors[msg.from_uid] = msg.proposal_id if last_pn is not None: # String proposal_key, need string keys for JSON. proposal_key = str(last_pn) ps = self.proposals[proposal_key] ps.retain_count -= 1 ps.acceptors.remove(msg.from_uid) if ps.retain_count == 0: del self.proposals[proposal_key] # String proposal_key, need string keys for JSON. proposal_key = str(msg.proposal_id) if not proposal_key in self.proposals: self.proposals[proposal_key] = ProposalStatus(msg.proposal_value) ps = self.proposals[proposal_key] assert msg.proposal_value == ps.value, 'Value mismatch for single proposal!' ps.accept_count += 1 ps.retain_count += 1 ps.acceptors.add(msg.from_uid) if ps.accept_count == self.quorum_size: self.final_proposal_id = msg.proposal_id self.final_value = msg.proposal_value self.final_acceptors = ps.acceptors self.proposals = None self.acceptors = None return Resolution(self.network_uid, self.final_value)
python
def receive_accepted(self, msg): ''' Called when an Accepted message is received from an acceptor. Once the final value is determined, the return value of this method will be a Resolution message containing the consentual value. Subsequent calls after the resolution is chosen will continue to add new Acceptors to the final_acceptors set and return Resolution messages. ''' if self.final_value is not None: if msg.proposal_id >= self.final_proposal_id and msg.proposal_value == self.final_value: self.final_acceptors.add(msg.from_uid) return Resolution(self.network_uid, self.final_value) last_pn = self.acceptors.get(msg.from_uid) if last_pn is not None and msg.proposal_id <= last_pn: return # Old message self.acceptors[msg.from_uid] = msg.proposal_id if last_pn is not None: # String proposal_key, need string keys for JSON. proposal_key = str(last_pn) ps = self.proposals[proposal_key] ps.retain_count -= 1 ps.acceptors.remove(msg.from_uid) if ps.retain_count == 0: del self.proposals[proposal_key] # String proposal_key, need string keys for JSON. proposal_key = str(msg.proposal_id) if not proposal_key in self.proposals: self.proposals[proposal_key] = ProposalStatus(msg.proposal_value) ps = self.proposals[proposal_key] assert msg.proposal_value == ps.value, 'Value mismatch for single proposal!' ps.accept_count += 1 ps.retain_count += 1 ps.acceptors.add(msg.from_uid) if ps.accept_count == self.quorum_size: self.final_proposal_id = msg.proposal_id self.final_value = msg.proposal_value self.final_acceptors = ps.acceptors self.proposals = None self.acceptors = None return Resolution(self.network_uid, self.final_value)
[ "def", "receive_accepted", "(", "self", ",", "msg", ")", ":", "if", "self", ".", "final_value", "is", "not", "None", ":", "if", "msg", ".", "proposal_id", ">=", "self", ".", "final_proposal_id", "and", "msg", ".", "proposal_value", "==", "self", ".", "final_value", ":", "self", ".", "final_acceptors", ".", "add", "(", "msg", ".", "from_uid", ")", "return", "Resolution", "(", "self", ".", "network_uid", ",", "self", ".", "final_value", ")", "last_pn", "=", "self", ".", "acceptors", ".", "get", "(", "msg", ".", "from_uid", ")", "if", "last_pn", "is", "not", "None", "and", "msg", ".", "proposal_id", "<=", "last_pn", ":", "return", "# Old message", "self", ".", "acceptors", "[", "msg", ".", "from_uid", "]", "=", "msg", ".", "proposal_id", "if", "last_pn", "is", "not", "None", ":", "# String proposal_key, need string keys for JSON.", "proposal_key", "=", "str", "(", "last_pn", ")", "ps", "=", "self", ".", "proposals", "[", "proposal_key", "]", "ps", ".", "retain_count", "-=", "1", "ps", ".", "acceptors", ".", "remove", "(", "msg", ".", "from_uid", ")", "if", "ps", ".", "retain_count", "==", "0", ":", "del", "self", ".", "proposals", "[", "proposal_key", "]", "# String proposal_key, need string keys for JSON.", "proposal_key", "=", "str", "(", "msg", ".", "proposal_id", ")", "if", "not", "proposal_key", "in", "self", ".", "proposals", ":", "self", ".", "proposals", "[", "proposal_key", "]", "=", "ProposalStatus", "(", "msg", ".", "proposal_value", ")", "ps", "=", "self", ".", "proposals", "[", "proposal_key", "]", "assert", "msg", ".", "proposal_value", "==", "ps", ".", "value", ",", "'Value mismatch for single proposal!'", "ps", ".", "accept_count", "+=", "1", "ps", ".", "retain_count", "+=", "1", "ps", ".", "acceptors", ".", "add", "(", "msg", ".", "from_uid", ")", "if", "ps", ".", "accept_count", "==", "self", ".", "quorum_size", ":", "self", ".", "final_proposal_id", "=", "msg", ".", "proposal_id", "self", ".", "final_value", "=", "msg", ".", "proposal_value", "self", ".", "final_acceptors", "=", "ps", ".", "acceptors", "self", ".", "proposals", "=", "None", "self", ".", "acceptors", "=", "None", "return", "Resolution", "(", "self", ".", "network_uid", ",", "self", ".", "final_value", ")" ]
Called when an Accepted message is received from an acceptor. Once the final value is determined, the return value of this method will be a Resolution message containing the consentual value. Subsequent calls after the resolution is chosen will continue to add new Acceptors to the final_acceptors set and return Resolution messages.
[ "Called", "when", "an", "Accepted", "message", "is", "received", "from", "an", "acceptor", ".", "Once", "the", "final", "value", "is", "determined", "the", "return", "value", "of", "this", "method", "will", "be", "a", "Resolution", "message", "containing", "the", "consentual", "value", ".", "Subsequent", "calls", "after", "the", "resolution", "is", "chosen", "will", "continue", "to", "add", "new", "Acceptors", "to", "the", "final_acceptors", "set", "and", "return", "Resolution", "messages", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/contrib/paxos/composable.py#L360-L408
233,868
johnbywater/eventsourcing
eventsourcing/utils/topic.py
resolve_topic
def resolve_topic(topic): """Return class described by given topic. Args: topic: A string describing a class. Returns: A class. Raises: TopicResolutionError: If there is no such class. """ try: module_name, _, class_name = topic.partition('#') module = importlib.import_module(module_name) except ImportError as e: raise TopicResolutionError("{}: {}".format(topic, e)) try: cls = resolve_attr(module, class_name) except AttributeError as e: raise TopicResolutionError("{}: {}".format(topic, e)) return cls
python
def resolve_topic(topic): """Return class described by given topic. Args: topic: A string describing a class. Returns: A class. Raises: TopicResolutionError: If there is no such class. """ try: module_name, _, class_name = topic.partition('#') module = importlib.import_module(module_name) except ImportError as e: raise TopicResolutionError("{}: {}".format(topic, e)) try: cls = resolve_attr(module, class_name) except AttributeError as e: raise TopicResolutionError("{}: {}".format(topic, e)) return cls
[ "def", "resolve_topic", "(", "topic", ")", ":", "try", ":", "module_name", ",", "_", ",", "class_name", "=", "topic", ".", "partition", "(", "'#'", ")", "module", "=", "importlib", ".", "import_module", "(", "module_name", ")", "except", "ImportError", "as", "e", ":", "raise", "TopicResolutionError", "(", "\"{}: {}\"", ".", "format", "(", "topic", ",", "e", ")", ")", "try", ":", "cls", "=", "resolve_attr", "(", "module", ",", "class_name", ")", "except", "AttributeError", "as", "e", ":", "raise", "TopicResolutionError", "(", "\"{}: {}\"", ".", "format", "(", "topic", ",", "e", ")", ")", "return", "cls" ]
Return class described by given topic. Args: topic: A string describing a class. Returns: A class. Raises: TopicResolutionError: If there is no such class.
[ "Return", "class", "described", "by", "given", "topic", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/utils/topic.py#L18-L39
233,869
johnbywater/eventsourcing
eventsourcing/utils/topic.py
resolve_attr
def resolve_attr(obj, path): """A recursive version of getattr for navigating dotted paths. Args: obj: An object for which we want to retrieve a nested attribute. path: A dot separated string containing zero or more attribute names. Returns: The attribute referred to by obj.a1.a2.a3... Raises: AttributeError: If there is no such attribute. """ if not path: return obj head, _, tail = path.partition('.') head_obj = getattr(obj, head) return resolve_attr(head_obj, tail)
python
def resolve_attr(obj, path): """A recursive version of getattr for navigating dotted paths. Args: obj: An object for which we want to retrieve a nested attribute. path: A dot separated string containing zero or more attribute names. Returns: The attribute referred to by obj.a1.a2.a3... Raises: AttributeError: If there is no such attribute. """ if not path: return obj head, _, tail = path.partition('.') head_obj = getattr(obj, head) return resolve_attr(head_obj, tail)
[ "def", "resolve_attr", "(", "obj", ",", "path", ")", ":", "if", "not", "path", ":", "return", "obj", "head", ",", "_", ",", "tail", "=", "path", ".", "partition", "(", "'.'", ")", "head_obj", "=", "getattr", "(", "obj", ",", "head", ")", "return", "resolve_attr", "(", "head_obj", ",", "tail", ")" ]
A recursive version of getattr for navigating dotted paths. Args: obj: An object for which we want to retrieve a nested attribute. path: A dot separated string containing zero or more attribute names. Returns: The attribute referred to by obj.a1.a2.a3... Raises: AttributeError: If there is no such attribute.
[ "A", "recursive", "version", "of", "getattr", "for", "navigating", "dotted", "paths", "." ]
de2c22c653fdccf2f5ee96faea74453ff1847e42
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/utils/topic.py#L42-L59
233,870
openeventdata/mordecai
mordecai/utilities.py
make_skip_list
def make_skip_list(cts): """ Return hand-defined list of place names to skip and not attempt to geolocate. If users would like to exclude country names, this would be the function to do it with. """ # maybe make these non-country searches but don't discard, at least for # some (esp. bodies of water) special_terms = ["Europe", "West", "the West", "South Pacific", "Gulf of Mexico", "Atlantic", "the Black Sea", "Black Sea", "North America", "Mideast", "Middle East", "the Middle East", "Asia", "the Caucasus", "Africa", "Central Asia", "Balkans", "Eastern Europe", "Arctic", "Ottoman Empire", "Asia-Pacific", "East Asia", "Horn of Africa", "Americas", "North Africa", "the Strait of Hormuz", "Mediterranean", "East", "North", "South", "Latin America", "Southeast Asia", "Western Pacific", "South Asia", "Persian Gulf", "Central Europe", "Western Hemisphere", "Western Europe", "European Union (E.U.)", "EU", "European Union", "E.U.", "Asia-Pacific", "Europe", "Caribbean", "US", "U.S.", "Persian Gulf", "West Africa", "North", "East", "South", "West", "Western Countries" ] # Some words are recurring spacy problems... spacy_problems = ["Kurd", "Qur'an"] #skip_list = list(cts.keys()) + special_terms skip_list = special_terms + spacy_problems skip_list = set(skip_list) return skip_list
python
def make_skip_list(cts): """ Return hand-defined list of place names to skip and not attempt to geolocate. If users would like to exclude country names, this would be the function to do it with. """ # maybe make these non-country searches but don't discard, at least for # some (esp. bodies of water) special_terms = ["Europe", "West", "the West", "South Pacific", "Gulf of Mexico", "Atlantic", "the Black Sea", "Black Sea", "North America", "Mideast", "Middle East", "the Middle East", "Asia", "the Caucasus", "Africa", "Central Asia", "Balkans", "Eastern Europe", "Arctic", "Ottoman Empire", "Asia-Pacific", "East Asia", "Horn of Africa", "Americas", "North Africa", "the Strait of Hormuz", "Mediterranean", "East", "North", "South", "Latin America", "Southeast Asia", "Western Pacific", "South Asia", "Persian Gulf", "Central Europe", "Western Hemisphere", "Western Europe", "European Union (E.U.)", "EU", "European Union", "E.U.", "Asia-Pacific", "Europe", "Caribbean", "US", "U.S.", "Persian Gulf", "West Africa", "North", "East", "South", "West", "Western Countries" ] # Some words are recurring spacy problems... spacy_problems = ["Kurd", "Qur'an"] #skip_list = list(cts.keys()) + special_terms skip_list = special_terms + spacy_problems skip_list = set(skip_list) return skip_list
[ "def", "make_skip_list", "(", "cts", ")", ":", "# maybe make these non-country searches but don't discard, at least for", "# some (esp. bodies of water)", "special_terms", "=", "[", "\"Europe\"", ",", "\"West\"", ",", "\"the West\"", ",", "\"South Pacific\"", ",", "\"Gulf of Mexico\"", ",", "\"Atlantic\"", ",", "\"the Black Sea\"", ",", "\"Black Sea\"", ",", "\"North America\"", ",", "\"Mideast\"", ",", "\"Middle East\"", ",", "\"the Middle East\"", ",", "\"Asia\"", ",", "\"the Caucasus\"", ",", "\"Africa\"", ",", "\"Central Asia\"", ",", "\"Balkans\"", ",", "\"Eastern Europe\"", ",", "\"Arctic\"", ",", "\"Ottoman Empire\"", ",", "\"Asia-Pacific\"", ",", "\"East Asia\"", ",", "\"Horn of Africa\"", ",", "\"Americas\"", ",", "\"North Africa\"", ",", "\"the Strait of Hormuz\"", ",", "\"Mediterranean\"", ",", "\"East\"", ",", "\"North\"", ",", "\"South\"", ",", "\"Latin America\"", ",", "\"Southeast Asia\"", ",", "\"Western Pacific\"", ",", "\"South Asia\"", ",", "\"Persian Gulf\"", ",", "\"Central Europe\"", ",", "\"Western Hemisphere\"", ",", "\"Western Europe\"", ",", "\"European Union (E.U.)\"", ",", "\"EU\"", ",", "\"European Union\"", ",", "\"E.U.\"", ",", "\"Asia-Pacific\"", ",", "\"Europe\"", ",", "\"Caribbean\"", ",", "\"US\"", ",", "\"U.S.\"", ",", "\"Persian Gulf\"", ",", "\"West Africa\"", ",", "\"North\"", ",", "\"East\"", ",", "\"South\"", ",", "\"West\"", ",", "\"Western Countries\"", "]", "# Some words are recurring spacy problems...", "spacy_problems", "=", "[", "\"Kurd\"", ",", "\"Qur'an\"", "]", "#skip_list = list(cts.keys()) + special_terms", "skip_list", "=", "special_terms", "+", "spacy_problems", "skip_list", "=", "set", "(", "skip_list", ")", "return", "skip_list" ]
Return hand-defined list of place names to skip and not attempt to geolocate. If users would like to exclude country names, this would be the function to do it with.
[ "Return", "hand", "-", "defined", "list", "of", "place", "names", "to", "skip", "and", "not", "attempt", "to", "geolocate", ".", "If", "users", "would", "like", "to", "exclude", "country", "names", "this", "would", "be", "the", "function", "to", "do", "it", "with", "." ]
bd82b8bcc27621345c57cbe9ec7f8c8552620ffc
https://github.com/openeventdata/mordecai/blob/bd82b8bcc27621345c57cbe9ec7f8c8552620ffc/mordecai/utilities.py#L138-L164
233,871
openeventdata/mordecai
mordecai/utilities.py
country_list_nlp
def country_list_nlp(cts): """NLP countries so we can use for vector comparisons""" ct_nlp = [] for i in cts.keys(): nlped = nlp(i) ct_nlp.append(nlped) return ct_nlp
python
def country_list_nlp(cts): """NLP countries so we can use for vector comparisons""" ct_nlp = [] for i in cts.keys(): nlped = nlp(i) ct_nlp.append(nlped) return ct_nlp
[ "def", "country_list_nlp", "(", "cts", ")", ":", "ct_nlp", "=", "[", "]", "for", "i", "in", "cts", ".", "keys", "(", ")", ":", "nlped", "=", "nlp", "(", "i", ")", "ct_nlp", ".", "append", "(", "nlped", ")", "return", "ct_nlp" ]
NLP countries so we can use for vector comparisons
[ "NLP", "countries", "so", "we", "can", "use", "for", "vector", "comparisons" ]
bd82b8bcc27621345c57cbe9ec7f8c8552620ffc
https://github.com/openeventdata/mordecai/blob/bd82b8bcc27621345c57cbe9ec7f8c8552620ffc/mordecai/utilities.py#L167-L173
233,872
openeventdata/mordecai
mordecai/utilities.py
make_country_nationality_list
def make_country_nationality_list(cts, ct_file): """Combine list of countries and list of nationalities""" countries = pd.read_csv(ct_file) nationality = dict(zip(countries.nationality,countries.alpha_3_code)) both_codes = {**nationality, **cts} return both_codes
python
def make_country_nationality_list(cts, ct_file): """Combine list of countries and list of nationalities""" countries = pd.read_csv(ct_file) nationality = dict(zip(countries.nationality,countries.alpha_3_code)) both_codes = {**nationality, **cts} return both_codes
[ "def", "make_country_nationality_list", "(", "cts", ",", "ct_file", ")", ":", "countries", "=", "pd", ".", "read_csv", "(", "ct_file", ")", "nationality", "=", "dict", "(", "zip", "(", "countries", ".", "nationality", ",", "countries", ".", "alpha_3_code", ")", ")", "both_codes", "=", "{", "*", "*", "nationality", ",", "*", "*", "cts", "}", "return", "both_codes" ]
Combine list of countries and list of nationalities
[ "Combine", "list", "of", "countries", "and", "list", "of", "nationalities" ]
bd82b8bcc27621345c57cbe9ec7f8c8552620ffc
https://github.com/openeventdata/mordecai/blob/bd82b8bcc27621345c57cbe9ec7f8c8552620ffc/mordecai/utilities.py#L176-L181
233,873
openeventdata/mordecai
mordecai/utilities.py
structure_results
def structure_results(res): """Format Elasticsearch result as Python dictionary""" out = {'hits': {'hits': []}} keys = [u'admin1_code', u'admin2_code', u'admin3_code', u'admin4_code', u'alternativenames', u'asciiname', u'cc2', u'coordinates', u'country_code2', u'country_code3', u'dem', u'elevation', u'feature_class', u'feature_code', u'geonameid', u'modification_date', u'name', u'population', u'timezone'] for i in res: i_out = {} for k in keys: i_out[k] = i[k] out['hits']['hits'].append(i_out) return out
python
def structure_results(res): """Format Elasticsearch result as Python dictionary""" out = {'hits': {'hits': []}} keys = [u'admin1_code', u'admin2_code', u'admin3_code', u'admin4_code', u'alternativenames', u'asciiname', u'cc2', u'coordinates', u'country_code2', u'country_code3', u'dem', u'elevation', u'feature_class', u'feature_code', u'geonameid', u'modification_date', u'name', u'population', u'timezone'] for i in res: i_out = {} for k in keys: i_out[k] = i[k] out['hits']['hits'].append(i_out) return out
[ "def", "structure_results", "(", "res", ")", ":", "out", "=", "{", "'hits'", ":", "{", "'hits'", ":", "[", "]", "}", "}", "keys", "=", "[", "u'admin1_code'", ",", "u'admin2_code'", ",", "u'admin3_code'", ",", "u'admin4_code'", ",", "u'alternativenames'", ",", "u'asciiname'", ",", "u'cc2'", ",", "u'coordinates'", ",", "u'country_code2'", ",", "u'country_code3'", ",", "u'dem'", ",", "u'elevation'", ",", "u'feature_class'", ",", "u'feature_code'", ",", "u'geonameid'", ",", "u'modification_date'", ",", "u'name'", ",", "u'population'", ",", "u'timezone'", "]", "for", "i", "in", "res", ":", "i_out", "=", "{", "}", "for", "k", "in", "keys", ":", "i_out", "[", "k", "]", "=", "i", "[", "k", "]", "out", "[", "'hits'", "]", "[", "'hits'", "]", ".", "append", "(", "i_out", ")", "return", "out" ]
Format Elasticsearch result as Python dictionary
[ "Format", "Elasticsearch", "result", "as", "Python", "dictionary" ]
bd82b8bcc27621345c57cbe9ec7f8c8552620ffc
https://github.com/openeventdata/mordecai/blob/bd82b8bcc27621345c57cbe9ec7f8c8552620ffc/mordecai/utilities.py#L218-L231
233,874
openeventdata/mordecai
mordecai/utilities.py
setup_es
def setup_es(hosts, port, use_ssl=False, auth=None): """ Setup an Elasticsearch connection Parameters ---------- hosts: list Hostnames / IP addresses for elasticsearch cluster port: string Port for elasticsearch cluster use_ssl: boolean Whether to use SSL for the elasticsearch connection auth: tuple (username, password) to use with HTTP auth Returns ------- es_conn: an elasticsearch_dsl Search connection object. """ kwargs = dict( hosts=hosts or ['localhost'], port=port or 9200, use_ssl=use_ssl, ) if auth: kwargs.update(http_auth=auth) CLIENT = Elasticsearch(**kwargs) S = Search(using=CLIENT, index="geonames") return S
python
def setup_es(hosts, port, use_ssl=False, auth=None): """ Setup an Elasticsearch connection Parameters ---------- hosts: list Hostnames / IP addresses for elasticsearch cluster port: string Port for elasticsearch cluster use_ssl: boolean Whether to use SSL for the elasticsearch connection auth: tuple (username, password) to use with HTTP auth Returns ------- es_conn: an elasticsearch_dsl Search connection object. """ kwargs = dict( hosts=hosts or ['localhost'], port=port or 9200, use_ssl=use_ssl, ) if auth: kwargs.update(http_auth=auth) CLIENT = Elasticsearch(**kwargs) S = Search(using=CLIENT, index="geonames") return S
[ "def", "setup_es", "(", "hosts", ",", "port", ",", "use_ssl", "=", "False", ",", "auth", "=", "None", ")", ":", "kwargs", "=", "dict", "(", "hosts", "=", "hosts", "or", "[", "'localhost'", "]", ",", "port", "=", "port", "or", "9200", ",", "use_ssl", "=", "use_ssl", ",", ")", "if", "auth", ":", "kwargs", ".", "update", "(", "http_auth", "=", "auth", ")", "CLIENT", "=", "Elasticsearch", "(", "*", "*", "kwargs", ")", "S", "=", "Search", "(", "using", "=", "CLIENT", ",", "index", "=", "\"geonames\"", ")", "return", "S" ]
Setup an Elasticsearch connection Parameters ---------- hosts: list Hostnames / IP addresses for elasticsearch cluster port: string Port for elasticsearch cluster use_ssl: boolean Whether to use SSL for the elasticsearch connection auth: tuple (username, password) to use with HTTP auth Returns ------- es_conn: an elasticsearch_dsl Search connection object.
[ "Setup", "an", "Elasticsearch", "connection" ]
bd82b8bcc27621345c57cbe9ec7f8c8552620ffc
https://github.com/openeventdata/mordecai/blob/bd82b8bcc27621345c57cbe9ec7f8c8552620ffc/mordecai/utilities.py#L233-L261
233,875
openeventdata/mordecai
mordecai/geoparse.py
Geoparser._feature_country_mentions
def _feature_country_mentions(self, doc): """ Given a document, count how many times different country names and adjectives are mentioned. These are features used in the country picking phase. Parameters --------- doc: a spaCy nlp'ed piece of text Returns ------- countries: dict the top two countries (ISO code) and their frequency of mentions. """ c_list = [] for i in doc.ents: try: country = self._both_codes[i.text] c_list.append(country) except KeyError: pass count = Counter(c_list).most_common() try: top, top_count = count[0] except: top = "" top_count = 0 try: two, two_count = count[1] except: two = "" two_count = 0 countries = (top, top_count, two, two_count) return countries
python
def _feature_country_mentions(self, doc): """ Given a document, count how many times different country names and adjectives are mentioned. These are features used in the country picking phase. Parameters --------- doc: a spaCy nlp'ed piece of text Returns ------- countries: dict the top two countries (ISO code) and their frequency of mentions. """ c_list = [] for i in doc.ents: try: country = self._both_codes[i.text] c_list.append(country) except KeyError: pass count = Counter(c_list).most_common() try: top, top_count = count[0] except: top = "" top_count = 0 try: two, two_count = count[1] except: two = "" two_count = 0 countries = (top, top_count, two, two_count) return countries
[ "def", "_feature_country_mentions", "(", "self", ",", "doc", ")", ":", "c_list", "=", "[", "]", "for", "i", "in", "doc", ".", "ents", ":", "try", ":", "country", "=", "self", ".", "_both_codes", "[", "i", ".", "text", "]", "c_list", ".", "append", "(", "country", ")", "except", "KeyError", ":", "pass", "count", "=", "Counter", "(", "c_list", ")", ".", "most_common", "(", ")", "try", ":", "top", ",", "top_count", "=", "count", "[", "0", "]", "except", ":", "top", "=", "\"\"", "top_count", "=", "0", "try", ":", "two", ",", "two_count", "=", "count", "[", "1", "]", "except", ":", "two", "=", "\"\"", "two_count", "=", "0", "countries", "=", "(", "top", ",", "top_count", ",", "two", ",", "two_count", ")", "return", "countries" ]
Given a document, count how many times different country names and adjectives are mentioned. These are features used in the country picking phase. Parameters --------- doc: a spaCy nlp'ed piece of text Returns ------- countries: dict the top two countries (ISO code) and their frequency of mentions.
[ "Given", "a", "document", "count", "how", "many", "times", "different", "country", "names", "and", "adjectives", "are", "mentioned", ".", "These", "are", "features", "used", "in", "the", "country", "picking", "phase", "." ]
bd82b8bcc27621345c57cbe9ec7f8c8552620ffc
https://github.com/openeventdata/mordecai/blob/bd82b8bcc27621345c57cbe9ec7f8c8552620ffc/mordecai/geoparse.py#L78-L112
233,876
openeventdata/mordecai
mordecai/geoparse.py
Geoparser.clean_entity
def clean_entity(self, ent): """ Strip out extra words that often get picked up by spaCy's NER. To do: preserve info about what got stripped out to help with ES/Geonames resolution later. Parameters --------- ent: a spaCy named entity Span Returns ------- new_ent: a spaCy Span, with extra words stripped out. """ dump_list = ['province', 'the', 'area', 'airport', 'district', 'square', 'town', 'village', 'prison', "river", "valley", "provincial", "prison", "region", "municipality", "state", "territory", "of", "in", "county", "central"] # maybe have 'city'? Works differently in different countries # also, "District of Columbia". Might need to use cap/no cap keep_positions = [] for word in ent: if word.text.lower() not in dump_list: keep_positions.append(word.i) keep_positions = np.asarray(keep_positions) try: new_ent = ent.doc[keep_positions.min():keep_positions.max() + 1] # can't set directly #new_ent.label_.__set__(ent.label_) except ValueError: new_ent = ent return new_ent
python
def clean_entity(self, ent): """ Strip out extra words that often get picked up by spaCy's NER. To do: preserve info about what got stripped out to help with ES/Geonames resolution later. Parameters --------- ent: a spaCy named entity Span Returns ------- new_ent: a spaCy Span, with extra words stripped out. """ dump_list = ['province', 'the', 'area', 'airport', 'district', 'square', 'town', 'village', 'prison', "river", "valley", "provincial", "prison", "region", "municipality", "state", "territory", "of", "in", "county", "central"] # maybe have 'city'? Works differently in different countries # also, "District of Columbia". Might need to use cap/no cap keep_positions = [] for word in ent: if word.text.lower() not in dump_list: keep_positions.append(word.i) keep_positions = np.asarray(keep_positions) try: new_ent = ent.doc[keep_positions.min():keep_positions.max() + 1] # can't set directly #new_ent.label_.__set__(ent.label_) except ValueError: new_ent = ent return new_ent
[ "def", "clean_entity", "(", "self", ",", "ent", ")", ":", "dump_list", "=", "[", "'province'", ",", "'the'", ",", "'area'", ",", "'airport'", ",", "'district'", ",", "'square'", ",", "'town'", ",", "'village'", ",", "'prison'", ",", "\"river\"", ",", "\"valley\"", ",", "\"provincial\"", ",", "\"prison\"", ",", "\"region\"", ",", "\"municipality\"", ",", "\"state\"", ",", "\"territory\"", ",", "\"of\"", ",", "\"in\"", ",", "\"county\"", ",", "\"central\"", "]", "# maybe have 'city'? Works differently in different countries", "# also, \"District of Columbia\". Might need to use cap/no cap", "keep_positions", "=", "[", "]", "for", "word", "in", "ent", ":", "if", "word", ".", "text", ".", "lower", "(", ")", "not", "in", "dump_list", ":", "keep_positions", ".", "append", "(", "word", ".", "i", ")", "keep_positions", "=", "np", ".", "asarray", "(", "keep_positions", ")", "try", ":", "new_ent", "=", "ent", ".", "doc", "[", "keep_positions", ".", "min", "(", ")", ":", "keep_positions", ".", "max", "(", ")", "+", "1", "]", "# can't set directly", "#new_ent.label_.__set__(ent.label_)", "except", "ValueError", ":", "new_ent", "=", "ent", "return", "new_ent" ]
Strip out extra words that often get picked up by spaCy's NER. To do: preserve info about what got stripped out to help with ES/Geonames resolution later. Parameters --------- ent: a spaCy named entity Span Returns ------- new_ent: a spaCy Span, with extra words stripped out.
[ "Strip", "out", "extra", "words", "that", "often", "get", "picked", "up", "by", "spaCy", "s", "NER", "." ]
bd82b8bcc27621345c57cbe9ec7f8c8552620ffc
https://github.com/openeventdata/mordecai/blob/bd82b8bcc27621345c57cbe9ec7f8c8552620ffc/mordecai/geoparse.py#L115-L149
233,877
openeventdata/mordecai
mordecai/geoparse.py
Geoparser._feature_most_alternative
def _feature_most_alternative(self, results, full_results=False): """ Find the placename with the most alternative names and return its country. More alternative names are a rough measure of importance. Paramaters ---------- results: dict output of `query_geonames` Returns ------- most_alt: str ISO code of country of place with most alternative names, or empty string if none """ try: alt_names = [len(i['alternativenames']) for i in results['hits']['hits']] most_alt = results['hits']['hits'][np.array(alt_names).argmax()] if full_results: return most_alt else: return most_alt['country_code3'] except (IndexError, ValueError, TypeError): return ""
python
def _feature_most_alternative(self, results, full_results=False): """ Find the placename with the most alternative names and return its country. More alternative names are a rough measure of importance. Paramaters ---------- results: dict output of `query_geonames` Returns ------- most_alt: str ISO code of country of place with most alternative names, or empty string if none """ try: alt_names = [len(i['alternativenames']) for i in results['hits']['hits']] most_alt = results['hits']['hits'][np.array(alt_names).argmax()] if full_results: return most_alt else: return most_alt['country_code3'] except (IndexError, ValueError, TypeError): return ""
[ "def", "_feature_most_alternative", "(", "self", ",", "results", ",", "full_results", "=", "False", ")", ":", "try", ":", "alt_names", "=", "[", "len", "(", "i", "[", "'alternativenames'", "]", ")", "for", "i", "in", "results", "[", "'hits'", "]", "[", "'hits'", "]", "]", "most_alt", "=", "results", "[", "'hits'", "]", "[", "'hits'", "]", "[", "np", ".", "array", "(", "alt_names", ")", ".", "argmax", "(", ")", "]", "if", "full_results", ":", "return", "most_alt", "else", ":", "return", "most_alt", "[", "'country_code3'", "]", "except", "(", "IndexError", ",", "ValueError", ",", "TypeError", ")", ":", "return", "\"\"" ]
Find the placename with the most alternative names and return its country. More alternative names are a rough measure of importance. Paramaters ---------- results: dict output of `query_geonames` Returns ------- most_alt: str ISO code of country of place with most alternative names, or empty string if none
[ "Find", "the", "placename", "with", "the", "most", "alternative", "names", "and", "return", "its", "country", ".", "More", "alternative", "names", "are", "a", "rough", "measure", "of", "importance", "." ]
bd82b8bcc27621345c57cbe9ec7f8c8552620ffc
https://github.com/openeventdata/mordecai/blob/bd82b8bcc27621345c57cbe9ec7f8c8552620ffc/mordecai/geoparse.py#L176-L200
233,878
openeventdata/mordecai
mordecai/geoparse.py
Geoparser._feature_most_population
def _feature_most_population(self, results): """ Find the placename with the largest population and return its country. More population is a rough measure of importance. Paramaters ---------- results: dict output of `query_geonames` Returns ------- most_pop: str ISO code of country of place with largest population, or empty string if none """ try: populations = [i['population'] for i in results['hits']['hits']] most_pop = results['hits']['hits'][np.array(populations).astype("int").argmax()] return most_pop['country_code3'] except Exception as e: return ""
python
def _feature_most_population(self, results): """ Find the placename with the largest population and return its country. More population is a rough measure of importance. Paramaters ---------- results: dict output of `query_geonames` Returns ------- most_pop: str ISO code of country of place with largest population, or empty string if none """ try: populations = [i['population'] for i in results['hits']['hits']] most_pop = results['hits']['hits'][np.array(populations).astype("int").argmax()] return most_pop['country_code3'] except Exception as e: return ""
[ "def", "_feature_most_population", "(", "self", ",", "results", ")", ":", "try", ":", "populations", "=", "[", "i", "[", "'population'", "]", "for", "i", "in", "results", "[", "'hits'", "]", "[", "'hits'", "]", "]", "most_pop", "=", "results", "[", "'hits'", "]", "[", "'hits'", "]", "[", "np", ".", "array", "(", "populations", ")", ".", "astype", "(", "\"int\"", ")", ".", "argmax", "(", ")", "]", "return", "most_pop", "[", "'country_code3'", "]", "except", "Exception", "as", "e", ":", "return", "\"\"" ]
Find the placename with the largest population and return its country. More population is a rough measure of importance. Paramaters ---------- results: dict output of `query_geonames` Returns ------- most_pop: str ISO code of country of place with largest population, or empty string if none
[ "Find", "the", "placename", "with", "the", "largest", "population", "and", "return", "its", "country", ".", "More", "population", "is", "a", "rough", "measure", "of", "importance", "." ]
bd82b8bcc27621345c57cbe9ec7f8c8552620ffc
https://github.com/openeventdata/mordecai/blob/bd82b8bcc27621345c57cbe9ec7f8c8552620ffc/mordecai/geoparse.py#L203-L225
233,879
openeventdata/mordecai
mordecai/geoparse.py
Geoparser._feature_word_embedding
def _feature_word_embedding(self, text): """ Given a word, guess the appropriate country by word vector. Parameters --------- text: str the text to extract locations from. Returns ------- country_picking: dict The top two countries (ISO codes) and two measures confidence for the first choice. """ try: simils = np.dot(self._prebuilt_vec, text.vector) except Exception as e: #print("Vector problem, ", Exception, e) return {"country_1" : "", "confid_a" : 0, "confid_b" : 0, "country_2" : ""} ranks = simils.argsort()[::-1] confid = simils.max() confid2 = simils[ranks[0]] - simils[ranks[1]] if confid == 0 or confid2 == 0: return "" country_code = self._cts[str(self._ct_nlp[ranks[0]])] country_picking = {"country_1" : country_code, "confid_a" : confid, "confid_b" : confid2, "country_2" : self._cts[str(self._ct_nlp[ranks[1]])]} return country_picking
python
def _feature_word_embedding(self, text): """ Given a word, guess the appropriate country by word vector. Parameters --------- text: str the text to extract locations from. Returns ------- country_picking: dict The top two countries (ISO codes) and two measures confidence for the first choice. """ try: simils = np.dot(self._prebuilt_vec, text.vector) except Exception as e: #print("Vector problem, ", Exception, e) return {"country_1" : "", "confid_a" : 0, "confid_b" : 0, "country_2" : ""} ranks = simils.argsort()[::-1] confid = simils.max() confid2 = simils[ranks[0]] - simils[ranks[1]] if confid == 0 or confid2 == 0: return "" country_code = self._cts[str(self._ct_nlp[ranks[0]])] country_picking = {"country_1" : country_code, "confid_a" : confid, "confid_b" : confid2, "country_2" : self._cts[str(self._ct_nlp[ranks[1]])]} return country_picking
[ "def", "_feature_word_embedding", "(", "self", ",", "text", ")", ":", "try", ":", "simils", "=", "np", ".", "dot", "(", "self", ".", "_prebuilt_vec", ",", "text", ".", "vector", ")", "except", "Exception", "as", "e", ":", "#print(\"Vector problem, \", Exception, e)", "return", "{", "\"country_1\"", ":", "\"\"", ",", "\"confid_a\"", ":", "0", ",", "\"confid_b\"", ":", "0", ",", "\"country_2\"", ":", "\"\"", "}", "ranks", "=", "simils", ".", "argsort", "(", ")", "[", ":", ":", "-", "1", "]", "confid", "=", "simils", ".", "max", "(", ")", "confid2", "=", "simils", "[", "ranks", "[", "0", "]", "]", "-", "simils", "[", "ranks", "[", "1", "]", "]", "if", "confid", "==", "0", "or", "confid2", "==", "0", ":", "return", "\"\"", "country_code", "=", "self", ".", "_cts", "[", "str", "(", "self", ".", "_ct_nlp", "[", "ranks", "[", "0", "]", "]", ")", "]", "country_picking", "=", "{", "\"country_1\"", ":", "country_code", ",", "\"confid_a\"", ":", "confid", ",", "\"confid_b\"", ":", "confid2", ",", "\"country_2\"", ":", "self", ".", "_cts", "[", "str", "(", "self", ".", "_ct_nlp", "[", "ranks", "[", "1", "]", "]", ")", "]", "}", "return", "country_picking" ]
Given a word, guess the appropriate country by word vector. Parameters --------- text: str the text to extract locations from. Returns ------- country_picking: dict The top two countries (ISO codes) and two measures confidence for the first choice.
[ "Given", "a", "word", "guess", "the", "appropriate", "country", "by", "word", "vector", "." ]
bd82b8bcc27621345c57cbe9ec7f8c8552620ffc
https://github.com/openeventdata/mordecai/blob/bd82b8bcc27621345c57cbe9ec7f8c8552620ffc/mordecai/geoparse.py#L228-L261
233,880
openeventdata/mordecai
mordecai/geoparse.py
Geoparser._feature_first_back
def _feature_first_back(self, results): """ Get the country of the first two results back from geonames. Parameters ----------- results: dict elasticsearch results Returns ------- top: tuple first and second results' country name (ISO) """ try: first_back = results['hits']['hits'][0]['country_code3'] except (TypeError, IndexError): # usually occurs if no Geonames result first_back = "" try: second_back = results['hits']['hits'][1]['country_code3'] except (TypeError, IndexError): second_back = "" top = (first_back, second_back) return top
python
def _feature_first_back(self, results): """ Get the country of the first two results back from geonames. Parameters ----------- results: dict elasticsearch results Returns ------- top: tuple first and second results' country name (ISO) """ try: first_back = results['hits']['hits'][0]['country_code3'] except (TypeError, IndexError): # usually occurs if no Geonames result first_back = "" try: second_back = results['hits']['hits'][1]['country_code3'] except (TypeError, IndexError): second_back = "" top = (first_back, second_back) return top
[ "def", "_feature_first_back", "(", "self", ",", "results", ")", ":", "try", ":", "first_back", "=", "results", "[", "'hits'", "]", "[", "'hits'", "]", "[", "0", "]", "[", "'country_code3'", "]", "except", "(", "TypeError", ",", "IndexError", ")", ":", "# usually occurs if no Geonames result", "first_back", "=", "\"\"", "try", ":", "second_back", "=", "results", "[", "'hits'", "]", "[", "'hits'", "]", "[", "1", "]", "[", "'country_code3'", "]", "except", "(", "TypeError", ",", "IndexError", ")", ":", "second_back", "=", "\"\"", "top", "=", "(", "first_back", ",", "second_back", ")", "return", "top" ]
Get the country of the first two results back from geonames. Parameters ----------- results: dict elasticsearch results Returns ------- top: tuple first and second results' country name (ISO)
[ "Get", "the", "country", "of", "the", "first", "two", "results", "back", "from", "geonames", "." ]
bd82b8bcc27621345c57cbe9ec7f8c8552620ffc
https://github.com/openeventdata/mordecai/blob/bd82b8bcc27621345c57cbe9ec7f8c8552620ffc/mordecai/geoparse.py#L264-L288
233,881
openeventdata/mordecai
mordecai/geoparse.py
Geoparser.is_country
def is_country(self, text): """Check if a piece of text is in the list of countries""" ct_list = self._just_cts.keys() if text in ct_list: return True else: return False
python
def is_country(self, text): """Check if a piece of text is in the list of countries""" ct_list = self._just_cts.keys() if text in ct_list: return True else: return False
[ "def", "is_country", "(", "self", ",", "text", ")", ":", "ct_list", "=", "self", ".", "_just_cts", ".", "keys", "(", ")", "if", "text", "in", "ct_list", ":", "return", "True", "else", ":", "return", "False" ]
Check if a piece of text is in the list of countries
[ "Check", "if", "a", "piece", "of", "text", "is", "in", "the", "list", "of", "countries" ]
bd82b8bcc27621345c57cbe9ec7f8c8552620ffc
https://github.com/openeventdata/mordecai/blob/bd82b8bcc27621345c57cbe9ec7f8c8552620ffc/mordecai/geoparse.py#L291-L297
233,882
openeventdata/mordecai
mordecai/geoparse.py
Geoparser.query_geonames
def query_geonames(self, placename): """ Wrap search parameters into an elasticsearch query to the geonames index and return results. Parameters --------- conn: an elasticsearch Search conn, like the one returned by `setup_es()` placename: str the placename text extracted by NER system Returns ------- out: The raw results of the elasticsearch query """ # first first, try for country name if self.is_country(placename): q = {"multi_match": {"query": placename, "fields": ['name', 'asciiname', 'alternativenames'], "type" : "phrase"}} res = self.conn.filter("term", feature_code='PCLI').query(q)[0:5].execute() # always 5 else: # second, try for an exact phrase match q = {"multi_match": {"query": placename, "fields": ['name^5', 'asciiname^5', 'alternativenames'], "type" : "phrase"}} res = self.conn.query(q)[0:50].execute() # if no results, use some fuzziness, but still require all terms to be present. # Fuzzy is not allowed in "phrase" searches. if res.hits.total == 0: # tried wrapping this in a {"constant_score" : {"query": ... but made it worse q = {"multi_match": {"query": placename, "fields": ['name', 'asciiname', 'alternativenames'], "fuzziness" : 1, "operator": "and" } } res = self.conn.query(q)[0:50].execute() es_result = utilities.structure_results(res) return es_result
python
def query_geonames(self, placename): """ Wrap search parameters into an elasticsearch query to the geonames index and return results. Parameters --------- conn: an elasticsearch Search conn, like the one returned by `setup_es()` placename: str the placename text extracted by NER system Returns ------- out: The raw results of the elasticsearch query """ # first first, try for country name if self.is_country(placename): q = {"multi_match": {"query": placename, "fields": ['name', 'asciiname', 'alternativenames'], "type" : "phrase"}} res = self.conn.filter("term", feature_code='PCLI').query(q)[0:5].execute() # always 5 else: # second, try for an exact phrase match q = {"multi_match": {"query": placename, "fields": ['name^5', 'asciiname^5', 'alternativenames'], "type" : "phrase"}} res = self.conn.query(q)[0:50].execute() # if no results, use some fuzziness, but still require all terms to be present. # Fuzzy is not allowed in "phrase" searches. if res.hits.total == 0: # tried wrapping this in a {"constant_score" : {"query": ... but made it worse q = {"multi_match": {"query": placename, "fields": ['name', 'asciiname', 'alternativenames'], "fuzziness" : 1, "operator": "and" } } res = self.conn.query(q)[0:50].execute() es_result = utilities.structure_results(res) return es_result
[ "def", "query_geonames", "(", "self", ",", "placename", ")", ":", "# first first, try for country name", "if", "self", ".", "is_country", "(", "placename", ")", ":", "q", "=", "{", "\"multi_match\"", ":", "{", "\"query\"", ":", "placename", ",", "\"fields\"", ":", "[", "'name'", ",", "'asciiname'", ",", "'alternativenames'", "]", ",", "\"type\"", ":", "\"phrase\"", "}", "}", "res", "=", "self", ".", "conn", ".", "filter", "(", "\"term\"", ",", "feature_code", "=", "'PCLI'", ")", ".", "query", "(", "q", ")", "[", "0", ":", "5", "]", ".", "execute", "(", ")", "# always 5", "else", ":", "# second, try for an exact phrase match", "q", "=", "{", "\"multi_match\"", ":", "{", "\"query\"", ":", "placename", ",", "\"fields\"", ":", "[", "'name^5'", ",", "'asciiname^5'", ",", "'alternativenames'", "]", ",", "\"type\"", ":", "\"phrase\"", "}", "}", "res", "=", "self", ".", "conn", ".", "query", "(", "q", ")", "[", "0", ":", "50", "]", ".", "execute", "(", ")", "# if no results, use some fuzziness, but still require all terms to be present.", "# Fuzzy is not allowed in \"phrase\" searches.", "if", "res", ".", "hits", ".", "total", "==", "0", ":", "# tried wrapping this in a {\"constant_score\" : {\"query\": ... but made it worse", "q", "=", "{", "\"multi_match\"", ":", "{", "\"query\"", ":", "placename", ",", "\"fields\"", ":", "[", "'name'", ",", "'asciiname'", ",", "'alternativenames'", "]", ",", "\"fuzziness\"", ":", "1", ",", "\"operator\"", ":", "\"and\"", "}", "}", "res", "=", "self", ".", "conn", ".", "query", "(", "q", ")", "[", "0", ":", "50", "]", ".", "execute", "(", ")", "es_result", "=", "utilities", ".", "structure_results", "(", "res", ")", "return", "es_result" ]
Wrap search parameters into an elasticsearch query to the geonames index and return results. Parameters --------- conn: an elasticsearch Search conn, like the one returned by `setup_es()` placename: str the placename text extracted by NER system Returns ------- out: The raw results of the elasticsearch query
[ "Wrap", "search", "parameters", "into", "an", "elasticsearch", "query", "to", "the", "geonames", "index", "and", "return", "results", "." ]
bd82b8bcc27621345c57cbe9ec7f8c8552620ffc
https://github.com/openeventdata/mordecai/blob/bd82b8bcc27621345c57cbe9ec7f8c8552620ffc/mordecai/geoparse.py#L301-L341
233,883
openeventdata/mordecai
mordecai/geoparse.py
Geoparser.query_geonames_country
def query_geonames_country(self, placename, country): """ Like query_geonames, but this time limited to a specified country. """ # first, try for an exact phrase match q = {"multi_match": {"query": placename, "fields": ['name^5', 'asciiname^5', 'alternativenames'], "type": "phrase"}} res = self.conn.filter("term", country_code3=country).query(q)[0:50].execute() # if no results, use some fuzziness, but still require all terms to be present. # Fuzzy is not allowed in "phrase" searches. if res.hits.total == 0: # tried wrapping this in a {"constant_score" : {"query": ... but made it worse q = {"multi_match": {"query": placename, "fields": ['name', 'asciiname', 'alternativenames'], "fuzziness": 1, "operator": "and"}} res = self.conn.filter("term", country_code3=country).query(q)[0:50].execute() out = utilities.structure_results(res) return out
python
def query_geonames_country(self, placename, country): """ Like query_geonames, but this time limited to a specified country. """ # first, try for an exact phrase match q = {"multi_match": {"query": placename, "fields": ['name^5', 'asciiname^5', 'alternativenames'], "type": "phrase"}} res = self.conn.filter("term", country_code3=country).query(q)[0:50].execute() # if no results, use some fuzziness, but still require all terms to be present. # Fuzzy is not allowed in "phrase" searches. if res.hits.total == 0: # tried wrapping this in a {"constant_score" : {"query": ... but made it worse q = {"multi_match": {"query": placename, "fields": ['name', 'asciiname', 'alternativenames'], "fuzziness": 1, "operator": "and"}} res = self.conn.filter("term", country_code3=country).query(q)[0:50].execute() out = utilities.structure_results(res) return out
[ "def", "query_geonames_country", "(", "self", ",", "placename", ",", "country", ")", ":", "# first, try for an exact phrase match", "q", "=", "{", "\"multi_match\"", ":", "{", "\"query\"", ":", "placename", ",", "\"fields\"", ":", "[", "'name^5'", ",", "'asciiname^5'", ",", "'alternativenames'", "]", ",", "\"type\"", ":", "\"phrase\"", "}", "}", "res", "=", "self", ".", "conn", ".", "filter", "(", "\"term\"", ",", "country_code3", "=", "country", ")", ".", "query", "(", "q", ")", "[", "0", ":", "50", "]", ".", "execute", "(", ")", "# if no results, use some fuzziness, but still require all terms to be present.", "# Fuzzy is not allowed in \"phrase\" searches.", "if", "res", ".", "hits", ".", "total", "==", "0", ":", "# tried wrapping this in a {\"constant_score\" : {\"query\": ... but made it worse", "q", "=", "{", "\"multi_match\"", ":", "{", "\"query\"", ":", "placename", ",", "\"fields\"", ":", "[", "'name'", ",", "'asciiname'", ",", "'alternativenames'", "]", ",", "\"fuzziness\"", ":", "1", ",", "\"operator\"", ":", "\"and\"", "}", "}", "res", "=", "self", ".", "conn", ".", "filter", "(", "\"term\"", ",", "country_code3", "=", "country", ")", ".", "query", "(", "q", ")", "[", "0", ":", "50", "]", ".", "execute", "(", ")", "out", "=", "utilities", ".", "structure_results", "(", "res", ")", "return", "out" ]
Like query_geonames, but this time limited to a specified country.
[ "Like", "query_geonames", "but", "this", "time", "limited", "to", "a", "specified", "country", "." ]
bd82b8bcc27621345c57cbe9ec7f8c8552620ffc
https://github.com/openeventdata/mordecai/blob/bd82b8bcc27621345c57cbe9ec7f8c8552620ffc/mordecai/geoparse.py#L345-L365
233,884
openeventdata/mordecai
mordecai/geoparse.py
Geoparser.make_country_matrix
def make_country_matrix(self, loc): """ Create features for all possible country labels, return as matrix for keras. Parameters ---------- loc: dict one entry from the list of locations and features that come out of make_country_features Returns -------- keras_inputs: dict with two keys, "label" and "matrix" """ top = loc['features']['ct_mention'] top_count = loc['features']['ctm_count1'] two = loc['features']['ct_mention2'] two_count = loc['features']['ctm_count2'] word_vec = loc['features']['word_vec'] first_back = loc['features']['first_back'] most_alt = loc['features']['most_alt'] most_pop = loc['features']['most_pop'] possible_labels = set([top, two, word_vec, first_back, most_alt, most_pop]) possible_labels = [i for i in possible_labels if i] X_mat = [] for label in possible_labels: inputs = np.array([word_vec, first_back, most_alt, most_pop]) x = inputs == label x = np.asarray((x * 2) - 1) # convert to -1, 1 # get missing values exists = inputs != "" exists = np.asarray((exists * 2) - 1) counts = np.asarray([top_count, two_count]) # cludgy, should be up with "inputs" right = np.asarray([top, two]) == label right = right * 2 - 1 right[counts == 0] = 0 # get correct values features = np.concatenate([x, exists, counts, right]) X_mat.append(np.asarray(features)) keras_inputs = {"labels": possible_labels, "matrix" : np.asmatrix(X_mat)} return keras_inputs
python
def make_country_matrix(self, loc): """ Create features for all possible country labels, return as matrix for keras. Parameters ---------- loc: dict one entry from the list of locations and features that come out of make_country_features Returns -------- keras_inputs: dict with two keys, "label" and "matrix" """ top = loc['features']['ct_mention'] top_count = loc['features']['ctm_count1'] two = loc['features']['ct_mention2'] two_count = loc['features']['ctm_count2'] word_vec = loc['features']['word_vec'] first_back = loc['features']['first_back'] most_alt = loc['features']['most_alt'] most_pop = loc['features']['most_pop'] possible_labels = set([top, two, word_vec, first_back, most_alt, most_pop]) possible_labels = [i for i in possible_labels if i] X_mat = [] for label in possible_labels: inputs = np.array([word_vec, first_back, most_alt, most_pop]) x = inputs == label x = np.asarray((x * 2) - 1) # convert to -1, 1 # get missing values exists = inputs != "" exists = np.asarray((exists * 2) - 1) counts = np.asarray([top_count, two_count]) # cludgy, should be up with "inputs" right = np.asarray([top, two]) == label right = right * 2 - 1 right[counts == 0] = 0 # get correct values features = np.concatenate([x, exists, counts, right]) X_mat.append(np.asarray(features)) keras_inputs = {"labels": possible_labels, "matrix" : np.asmatrix(X_mat)} return keras_inputs
[ "def", "make_country_matrix", "(", "self", ",", "loc", ")", ":", "top", "=", "loc", "[", "'features'", "]", "[", "'ct_mention'", "]", "top_count", "=", "loc", "[", "'features'", "]", "[", "'ctm_count1'", "]", "two", "=", "loc", "[", "'features'", "]", "[", "'ct_mention2'", "]", "two_count", "=", "loc", "[", "'features'", "]", "[", "'ctm_count2'", "]", "word_vec", "=", "loc", "[", "'features'", "]", "[", "'word_vec'", "]", "first_back", "=", "loc", "[", "'features'", "]", "[", "'first_back'", "]", "most_alt", "=", "loc", "[", "'features'", "]", "[", "'most_alt'", "]", "most_pop", "=", "loc", "[", "'features'", "]", "[", "'most_pop'", "]", "possible_labels", "=", "set", "(", "[", "top", ",", "two", ",", "word_vec", ",", "first_back", ",", "most_alt", ",", "most_pop", "]", ")", "possible_labels", "=", "[", "i", "for", "i", "in", "possible_labels", "if", "i", "]", "X_mat", "=", "[", "]", "for", "label", "in", "possible_labels", ":", "inputs", "=", "np", ".", "array", "(", "[", "word_vec", ",", "first_back", ",", "most_alt", ",", "most_pop", "]", ")", "x", "=", "inputs", "==", "label", "x", "=", "np", ".", "asarray", "(", "(", "x", "*", "2", ")", "-", "1", ")", "# convert to -1, 1", "# get missing values", "exists", "=", "inputs", "!=", "\"\"", "exists", "=", "np", ".", "asarray", "(", "(", "exists", "*", "2", ")", "-", "1", ")", "counts", "=", "np", ".", "asarray", "(", "[", "top_count", ",", "two_count", "]", ")", "# cludgy, should be up with \"inputs\"", "right", "=", "np", ".", "asarray", "(", "[", "top", ",", "two", "]", ")", "==", "label", "right", "=", "right", "*", "2", "-", "1", "right", "[", "counts", "==", "0", "]", "=", "0", "# get correct values", "features", "=", "np", ".", "concatenate", "(", "[", "x", ",", "exists", ",", "counts", ",", "right", "]", ")", "X_mat", ".", "append", "(", "np", ".", "asarray", "(", "features", ")", ")", "keras_inputs", "=", "{", "\"labels\"", ":", "possible_labels", ",", "\"matrix\"", ":", "np", ".", "asmatrix", "(", "X_mat", ")", "}", "return", "keras_inputs" ]
Create features for all possible country labels, return as matrix for keras. Parameters ---------- loc: dict one entry from the list of locations and features that come out of make_country_features Returns -------- keras_inputs: dict with two keys, "label" and "matrix"
[ "Create", "features", "for", "all", "possible", "country", "labels", "return", "as", "matrix", "for", "keras", "." ]
bd82b8bcc27621345c57cbe9ec7f8c8552620ffc
https://github.com/openeventdata/mordecai/blob/bd82b8bcc27621345c57cbe9ec7f8c8552620ffc/mordecai/geoparse.py#L595-L643
233,885
openeventdata/mordecai
mordecai/geoparse.py
Geoparser.infer_country
def infer_country(self, doc): """NLP a doc, find its entities, get their features, and return the model's country guess for each. Maybe use a better name. Parameters ----------- doc: str or spaCy the document to country-resolve the entities in Returns ------- proced: list of dict the feature output of "make_country_features" updated with the model's estimated country for each entity. E.g.: {'all_confidence': array([ 0.95783567, 0.03769876, 0.00454875], dtype=float32), 'all_countries': array(['SYR', 'USA', 'JAM'], dtype='<U3'), 'country_conf': 0.95783567, 'country_predicted': 'SYR', 'features': {'ct_mention': '', 'ct_mention2': '', 'ctm_count1': 0, 'ctm_count2': 0, 'first_back': 'JAM', 'maj_vote': 'SYR', 'most_alt': 'USA', 'most_pop': 'SYR', 'word_vec': 'SYR', 'wv_confid': '29.3188'}, 'label': 'Syria', 'spans': [{'end': 26, 'start': 20}], 'text': "There's fighting in Aleppo and Homs.", 'word': 'Aleppo'} """ if not hasattr(doc, "ents"): doc = nlp(doc) proced = self.make_country_features(doc, require_maj=False) if not proced: pass # logging! #print("Nothing came back from make_country_features") feat_list = [] #proced = self.ent_list_to_matrix(proced) for loc in proced: feat = self.make_country_matrix(loc) #labels = loc['labels'] feat_list.append(feat) #try: # for each potential country... for n, i in enumerate(feat_list): labels = i['labels'] try: prediction = self.country_model.predict(i['matrix']).transpose()[0] ranks = prediction.argsort()[::-1] labels = np.asarray(labels)[ranks] prediction = prediction[ranks] except ValueError: prediction = np.array([0]) labels = np.array([""]) loc['country_predicted'] = labels[0] loc['country_conf'] = prediction[0] loc['all_countries'] = labels loc['all_confidence'] = prediction return proced
python
def infer_country(self, doc): """NLP a doc, find its entities, get their features, and return the model's country guess for each. Maybe use a better name. Parameters ----------- doc: str or spaCy the document to country-resolve the entities in Returns ------- proced: list of dict the feature output of "make_country_features" updated with the model's estimated country for each entity. E.g.: {'all_confidence': array([ 0.95783567, 0.03769876, 0.00454875], dtype=float32), 'all_countries': array(['SYR', 'USA', 'JAM'], dtype='<U3'), 'country_conf': 0.95783567, 'country_predicted': 'SYR', 'features': {'ct_mention': '', 'ct_mention2': '', 'ctm_count1': 0, 'ctm_count2': 0, 'first_back': 'JAM', 'maj_vote': 'SYR', 'most_alt': 'USA', 'most_pop': 'SYR', 'word_vec': 'SYR', 'wv_confid': '29.3188'}, 'label': 'Syria', 'spans': [{'end': 26, 'start': 20}], 'text': "There's fighting in Aleppo and Homs.", 'word': 'Aleppo'} """ if not hasattr(doc, "ents"): doc = nlp(doc) proced = self.make_country_features(doc, require_maj=False) if not proced: pass # logging! #print("Nothing came back from make_country_features") feat_list = [] #proced = self.ent_list_to_matrix(proced) for loc in proced: feat = self.make_country_matrix(loc) #labels = loc['labels'] feat_list.append(feat) #try: # for each potential country... for n, i in enumerate(feat_list): labels = i['labels'] try: prediction = self.country_model.predict(i['matrix']).transpose()[0] ranks = prediction.argsort()[::-1] labels = np.asarray(labels)[ranks] prediction = prediction[ranks] except ValueError: prediction = np.array([0]) labels = np.array([""]) loc['country_predicted'] = labels[0] loc['country_conf'] = prediction[0] loc['all_countries'] = labels loc['all_confidence'] = prediction return proced
[ "def", "infer_country", "(", "self", ",", "doc", ")", ":", "if", "not", "hasattr", "(", "doc", ",", "\"ents\"", ")", ":", "doc", "=", "nlp", "(", "doc", ")", "proced", "=", "self", ".", "make_country_features", "(", "doc", ",", "require_maj", "=", "False", ")", "if", "not", "proced", ":", "pass", "# logging!", "#print(\"Nothing came back from make_country_features\")", "feat_list", "=", "[", "]", "#proced = self.ent_list_to_matrix(proced)", "for", "loc", "in", "proced", ":", "feat", "=", "self", ".", "make_country_matrix", "(", "loc", ")", "#labels = loc['labels']", "feat_list", ".", "append", "(", "feat", ")", "#try:", "# for each potential country...", "for", "n", ",", "i", "in", "enumerate", "(", "feat_list", ")", ":", "labels", "=", "i", "[", "'labels'", "]", "try", ":", "prediction", "=", "self", ".", "country_model", ".", "predict", "(", "i", "[", "'matrix'", "]", ")", ".", "transpose", "(", ")", "[", "0", "]", "ranks", "=", "prediction", ".", "argsort", "(", ")", "[", ":", ":", "-", "1", "]", "labels", "=", "np", ".", "asarray", "(", "labels", ")", "[", "ranks", "]", "prediction", "=", "prediction", "[", "ranks", "]", "except", "ValueError", ":", "prediction", "=", "np", ".", "array", "(", "[", "0", "]", ")", "labels", "=", "np", ".", "array", "(", "[", "\"\"", "]", ")", "loc", "[", "'country_predicted'", "]", "=", "labels", "[", "0", "]", "loc", "[", "'country_conf'", "]", "=", "prediction", "[", "0", "]", "loc", "[", "'all_countries'", "]", "=", "labels", "loc", "[", "'all_confidence'", "]", "=", "prediction", "return", "proced" ]
NLP a doc, find its entities, get their features, and return the model's country guess for each. Maybe use a better name. Parameters ----------- doc: str or spaCy the document to country-resolve the entities in Returns ------- proced: list of dict the feature output of "make_country_features" updated with the model's estimated country for each entity. E.g.: {'all_confidence': array([ 0.95783567, 0.03769876, 0.00454875], dtype=float32), 'all_countries': array(['SYR', 'USA', 'JAM'], dtype='<U3'), 'country_conf': 0.95783567, 'country_predicted': 'SYR', 'features': {'ct_mention': '', 'ct_mention2': '', 'ctm_count1': 0, 'ctm_count2': 0, 'first_back': 'JAM', 'maj_vote': 'SYR', 'most_alt': 'USA', 'most_pop': 'SYR', 'word_vec': 'SYR', 'wv_confid': '29.3188'}, 'label': 'Syria', 'spans': [{'end': 26, 'start': 20}], 'text': "There's fighting in Aleppo and Homs.", 'word': 'Aleppo'}
[ "NLP", "a", "doc", "find", "its", "entities", "get", "their", "features", "and", "return", "the", "model", "s", "country", "guess", "for", "each", ".", "Maybe", "use", "a", "better", "name", "." ]
bd82b8bcc27621345c57cbe9ec7f8c8552620ffc
https://github.com/openeventdata/mordecai/blob/bd82b8bcc27621345c57cbe9ec7f8c8552620ffc/mordecai/geoparse.py#L647-L714
233,886
openeventdata/mordecai
mordecai/geoparse.py
Geoparser.get_admin1
def get_admin1(self, country_code2, admin1_code): """ Convert a geonames admin1 code to the associated place name. Parameters --------- country_code2: string The two character country code admin1_code: string The admin1 code to be converted. (Admin1 is the highest subnational political unit, state/region/provice/etc. admin1_dict: dictionary The dictionary containing the country code + admin1 code as keys and the admin1 names as values. Returns ------ admin1_name: string The admin1 name. If none is found, return "NA". """ lookup_key = ".".join([country_code2, admin1_code]) try: admin1_name = self._admin1_dict[lookup_key] return admin1_name except KeyError: #print("No admin code found for country {} and code {}".format(country_code2, admin1_code)) return "NA"
python
def get_admin1(self, country_code2, admin1_code): """ Convert a geonames admin1 code to the associated place name. Parameters --------- country_code2: string The two character country code admin1_code: string The admin1 code to be converted. (Admin1 is the highest subnational political unit, state/region/provice/etc. admin1_dict: dictionary The dictionary containing the country code + admin1 code as keys and the admin1 names as values. Returns ------ admin1_name: string The admin1 name. If none is found, return "NA". """ lookup_key = ".".join([country_code2, admin1_code]) try: admin1_name = self._admin1_dict[lookup_key] return admin1_name except KeyError: #print("No admin code found for country {} and code {}".format(country_code2, admin1_code)) return "NA"
[ "def", "get_admin1", "(", "self", ",", "country_code2", ",", "admin1_code", ")", ":", "lookup_key", "=", "\".\"", ".", "join", "(", "[", "country_code2", ",", "admin1_code", "]", ")", "try", ":", "admin1_name", "=", "self", ".", "_admin1_dict", "[", "lookup_key", "]", "return", "admin1_name", "except", "KeyError", ":", "#print(\"No admin code found for country {} and code {}\".format(country_code2, admin1_code))", "return", "\"NA\"" ]
Convert a geonames admin1 code to the associated place name. Parameters --------- country_code2: string The two character country code admin1_code: string The admin1 code to be converted. (Admin1 is the highest subnational political unit, state/region/provice/etc. admin1_dict: dictionary The dictionary containing the country code + admin1 code as keys and the admin1 names as values. Returns ------ admin1_name: string The admin1 name. If none is found, return "NA".
[ "Convert", "a", "geonames", "admin1", "code", "to", "the", "associated", "place", "name", "." ]
bd82b8bcc27621345c57cbe9ec7f8c8552620ffc
https://github.com/openeventdata/mordecai/blob/bd82b8bcc27621345c57cbe9ec7f8c8552620ffc/mordecai/geoparse.py#L716-L742
233,887
openeventdata/mordecai
mordecai/geoparse.py
Geoparser.ranker
def ranker(self, X, meta): """ Sort the place features list by the score of its relevance. """ # total score is just a sum of each row total_score = X.sum(axis=1).transpose() total_score = np.squeeze(np.asarray(total_score)) # matrix to array ranks = total_score.argsort() ranks = ranks[::-1] # sort the list of dicts according to ranks sorted_meta = [meta[r] for r in ranks] sorted_X = X[ranks] return (sorted_X, sorted_meta)
python
def ranker(self, X, meta): """ Sort the place features list by the score of its relevance. """ # total score is just a sum of each row total_score = X.sum(axis=1).transpose() total_score = np.squeeze(np.asarray(total_score)) # matrix to array ranks = total_score.argsort() ranks = ranks[::-1] # sort the list of dicts according to ranks sorted_meta = [meta[r] for r in ranks] sorted_X = X[ranks] return (sorted_X, sorted_meta)
[ "def", "ranker", "(", "self", ",", "X", ",", "meta", ")", ":", "# total score is just a sum of each row", "total_score", "=", "X", ".", "sum", "(", "axis", "=", "1", ")", ".", "transpose", "(", ")", "total_score", "=", "np", ".", "squeeze", "(", "np", ".", "asarray", "(", "total_score", ")", ")", "# matrix to array", "ranks", "=", "total_score", ".", "argsort", "(", ")", "ranks", "=", "ranks", "[", ":", ":", "-", "1", "]", "# sort the list of dicts according to ranks", "sorted_meta", "=", "[", "meta", "[", "r", "]", "for", "r", "in", "ranks", "]", "sorted_X", "=", "X", "[", "ranks", "]", "return", "(", "sorted_X", ",", "sorted_meta", ")" ]
Sort the place features list by the score of its relevance.
[ "Sort", "the", "place", "features", "list", "by", "the", "score", "of", "its", "relevance", "." ]
bd82b8bcc27621345c57cbe9ec7f8c8552620ffc
https://github.com/openeventdata/mordecai/blob/bd82b8bcc27621345c57cbe9ec7f8c8552620ffc/mordecai/geoparse.py#L819-L831
233,888
openeventdata/mordecai
mordecai/geoparse.py
Geoparser.format_for_prodigy
def format_for_prodigy(self, X, meta, placename, return_feature_subset=False): """ Given a feature matrix, geonames data, and the original query, construct a prodigy task. Make meta nicely readable: "A town in Germany" Parameters ---------- X: matrix vector of features for ranking. Output of features_for_rank() meta: list of dictionaries other place information. Output of features_for_rank(). Used to provide information like "city in Germany" to the coding task. placename: str The extracted place name from text Returns -------- task_list: list of dicts Tasks ready to be written to JSONL and use in Prodigy. Each potential match includes a text description to the annotator can pick the right one. """ all_tasks = [] sorted_X, sorted_meta = self.ranker(X, meta) sorted_meta = sorted_meta[:4] sorted_X = sorted_X[:4] for n, i in enumerate(sorted_meta): feature_code = i['feature_code'] try: fc = self._code_to_text[feature_code] except KeyError: fc = '' text = ''.join(['"', i['place_name'], '"', ", a ", fc, " in ", i['country_code3'], ", id: ", i['geonameid']]) d = {"id" : n + 1, "text" : text} all_tasks.append(d) if return_feature_subset: return (all_tasks, sorted_meta, sorted_X) else: return all_tasks
python
def format_for_prodigy(self, X, meta, placename, return_feature_subset=False): """ Given a feature matrix, geonames data, and the original query, construct a prodigy task. Make meta nicely readable: "A town in Germany" Parameters ---------- X: matrix vector of features for ranking. Output of features_for_rank() meta: list of dictionaries other place information. Output of features_for_rank(). Used to provide information like "city in Germany" to the coding task. placename: str The extracted place name from text Returns -------- task_list: list of dicts Tasks ready to be written to JSONL and use in Prodigy. Each potential match includes a text description to the annotator can pick the right one. """ all_tasks = [] sorted_X, sorted_meta = self.ranker(X, meta) sorted_meta = sorted_meta[:4] sorted_X = sorted_X[:4] for n, i in enumerate(sorted_meta): feature_code = i['feature_code'] try: fc = self._code_to_text[feature_code] except KeyError: fc = '' text = ''.join(['"', i['place_name'], '"', ", a ", fc, " in ", i['country_code3'], ", id: ", i['geonameid']]) d = {"id" : n + 1, "text" : text} all_tasks.append(d) if return_feature_subset: return (all_tasks, sorted_meta, sorted_X) else: return all_tasks
[ "def", "format_for_prodigy", "(", "self", ",", "X", ",", "meta", ",", "placename", ",", "return_feature_subset", "=", "False", ")", ":", "all_tasks", "=", "[", "]", "sorted_X", ",", "sorted_meta", "=", "self", ".", "ranker", "(", "X", ",", "meta", ")", "sorted_meta", "=", "sorted_meta", "[", ":", "4", "]", "sorted_X", "=", "sorted_X", "[", ":", "4", "]", "for", "n", ",", "i", "in", "enumerate", "(", "sorted_meta", ")", ":", "feature_code", "=", "i", "[", "'feature_code'", "]", "try", ":", "fc", "=", "self", ".", "_code_to_text", "[", "feature_code", "]", "except", "KeyError", ":", "fc", "=", "''", "text", "=", "''", ".", "join", "(", "[", "'\"'", ",", "i", "[", "'place_name'", "]", ",", "'\"'", ",", "\", a \"", ",", "fc", ",", "\" in \"", ",", "i", "[", "'country_code3'", "]", ",", "\", id: \"", ",", "i", "[", "'geonameid'", "]", "]", ")", "d", "=", "{", "\"id\"", ":", "n", "+", "1", ",", "\"text\"", ":", "text", "}", "all_tasks", ".", "append", "(", "d", ")", "if", "return_feature_subset", ":", "return", "(", "all_tasks", ",", "sorted_meta", ",", "sorted_X", ")", "else", ":", "return", "all_tasks" ]
Given a feature matrix, geonames data, and the original query, construct a prodigy task. Make meta nicely readable: "A town in Germany" Parameters ---------- X: matrix vector of features for ranking. Output of features_for_rank() meta: list of dictionaries other place information. Output of features_for_rank(). Used to provide information like "city in Germany" to the coding task. placename: str The extracted place name from text Returns -------- task_list: list of dicts Tasks ready to be written to JSONL and use in Prodigy. Each potential match includes a text description to the annotator can pick the right one.
[ "Given", "a", "feature", "matrix", "geonames", "data", "and", "the", "original", "query", "construct", "a", "prodigy", "task", "." ]
bd82b8bcc27621345c57cbe9ec7f8c8552620ffc
https://github.com/openeventdata/mordecai/blob/bd82b8bcc27621345c57cbe9ec7f8c8552620ffc/mordecai/geoparse.py#L833-L880
233,889
openeventdata/mordecai
mordecai/geoparse.py
Geoparser.format_geonames
def format_geonames(self, entry, searchterm=None): """ Pull out just the fields we want from a geonames entry To do: - switch to model picking Parameters ----------- res : dict ES/geonames result searchterm : str (not implemented). Needed for better results picking Returns -------- new_res : dict containing selected fields from selected geonames entry """ try: lat, lon = entry['coordinates'].split(",") new_res = {"admin1" : self.get_admin1(entry['country_code2'], entry['admin1_code']), "lat" : lat, "lon" : lon, "country_code3" : entry["country_code3"], "geonameid" : entry["geonameid"], "place_name" : entry["name"], "feature_class" : entry["feature_class"], "feature_code" : entry["feature_code"]} return new_res except (IndexError, TypeError): # two conditions for these errors: # 1. there are no results for some reason (Index) # 2. res is set to "" because the country model was below the thresh new_res = {"admin1" : "", "lat" : "", "lon" : "", "country_code3" : "", "geonameid" : "", "place_name" : "", "feature_class" : "", "feature_code" : ""} return new_res
python
def format_geonames(self, entry, searchterm=None): """ Pull out just the fields we want from a geonames entry To do: - switch to model picking Parameters ----------- res : dict ES/geonames result searchterm : str (not implemented). Needed for better results picking Returns -------- new_res : dict containing selected fields from selected geonames entry """ try: lat, lon = entry['coordinates'].split(",") new_res = {"admin1" : self.get_admin1(entry['country_code2'], entry['admin1_code']), "lat" : lat, "lon" : lon, "country_code3" : entry["country_code3"], "geonameid" : entry["geonameid"], "place_name" : entry["name"], "feature_class" : entry["feature_class"], "feature_code" : entry["feature_code"]} return new_res except (IndexError, TypeError): # two conditions for these errors: # 1. there are no results for some reason (Index) # 2. res is set to "" because the country model was below the thresh new_res = {"admin1" : "", "lat" : "", "lon" : "", "country_code3" : "", "geonameid" : "", "place_name" : "", "feature_class" : "", "feature_code" : ""} return new_res
[ "def", "format_geonames", "(", "self", ",", "entry", ",", "searchterm", "=", "None", ")", ":", "try", ":", "lat", ",", "lon", "=", "entry", "[", "'coordinates'", "]", ".", "split", "(", "\",\"", ")", "new_res", "=", "{", "\"admin1\"", ":", "self", ".", "get_admin1", "(", "entry", "[", "'country_code2'", "]", ",", "entry", "[", "'admin1_code'", "]", ")", ",", "\"lat\"", ":", "lat", ",", "\"lon\"", ":", "lon", ",", "\"country_code3\"", ":", "entry", "[", "\"country_code3\"", "]", ",", "\"geonameid\"", ":", "entry", "[", "\"geonameid\"", "]", ",", "\"place_name\"", ":", "entry", "[", "\"name\"", "]", ",", "\"feature_class\"", ":", "entry", "[", "\"feature_class\"", "]", ",", "\"feature_code\"", ":", "entry", "[", "\"feature_code\"", "]", "}", "return", "new_res", "except", "(", "IndexError", ",", "TypeError", ")", ":", "# two conditions for these errors:", "# 1. there are no results for some reason (Index)", "# 2. res is set to \"\" because the country model was below the thresh", "new_res", "=", "{", "\"admin1\"", ":", "\"\"", ",", "\"lat\"", ":", "\"\"", ",", "\"lon\"", ":", "\"\"", ",", "\"country_code3\"", ":", "\"\"", ",", "\"geonameid\"", ":", "\"\"", ",", "\"place_name\"", ":", "\"\"", ",", "\"feature_class\"", ":", "\"\"", ",", "\"feature_code\"", ":", "\"\"", "}", "return", "new_res" ]
Pull out just the fields we want from a geonames entry To do: - switch to model picking Parameters ----------- res : dict ES/geonames result searchterm : str (not implemented). Needed for better results picking Returns -------- new_res : dict containing selected fields from selected geonames entry
[ "Pull", "out", "just", "the", "fields", "we", "want", "from", "a", "geonames", "entry" ]
bd82b8bcc27621345c57cbe9ec7f8c8552620ffc
https://github.com/openeventdata/mordecai/blob/bd82b8bcc27621345c57cbe9ec7f8c8552620ffc/mordecai/geoparse.py#L883-L926
233,890
openeventdata/mordecai
mordecai/geoparse.py
Geoparser.clean_proced
def clean_proced(self, proced): """Small helper function to delete the features from the final dictionary. These features are mostly interesting for debugging but won't be relevant for most users. """ for loc in proced: try: del loc['all_countries'] except KeyError: pass try: del loc['matrix'] except KeyError: pass try: del loc['all_confidence'] except KeyError: pass try: del loc['place_confidence'] except KeyError: pass try: del loc['text'] except KeyError: pass try: del loc['label'] except KeyError: pass try: del loc['features'] except KeyError: pass return proced
python
def clean_proced(self, proced): """Small helper function to delete the features from the final dictionary. These features are mostly interesting for debugging but won't be relevant for most users. """ for loc in proced: try: del loc['all_countries'] except KeyError: pass try: del loc['matrix'] except KeyError: pass try: del loc['all_confidence'] except KeyError: pass try: del loc['place_confidence'] except KeyError: pass try: del loc['text'] except KeyError: pass try: del loc['label'] except KeyError: pass try: del loc['features'] except KeyError: pass return proced
[ "def", "clean_proced", "(", "self", ",", "proced", ")", ":", "for", "loc", "in", "proced", ":", "try", ":", "del", "loc", "[", "'all_countries'", "]", "except", "KeyError", ":", "pass", "try", ":", "del", "loc", "[", "'matrix'", "]", "except", "KeyError", ":", "pass", "try", ":", "del", "loc", "[", "'all_confidence'", "]", "except", "KeyError", ":", "pass", "try", ":", "del", "loc", "[", "'place_confidence'", "]", "except", "KeyError", ":", "pass", "try", ":", "del", "loc", "[", "'text'", "]", "except", "KeyError", ":", "pass", "try", ":", "del", "loc", "[", "'label'", "]", "except", "KeyError", ":", "pass", "try", ":", "del", "loc", "[", "'features'", "]", "except", "KeyError", ":", "pass", "return", "proced" ]
Small helper function to delete the features from the final dictionary. These features are mostly interesting for debugging but won't be relevant for most users.
[ "Small", "helper", "function", "to", "delete", "the", "features", "from", "the", "final", "dictionary", ".", "These", "features", "are", "mostly", "interesting", "for", "debugging", "but", "won", "t", "be", "relevant", "for", "most", "users", "." ]
bd82b8bcc27621345c57cbe9ec7f8c8552620ffc
https://github.com/openeventdata/mordecai/blob/bd82b8bcc27621345c57cbe9ec7f8c8552620ffc/mordecai/geoparse.py#L928-L961
233,891
openeventdata/mordecai
mordecai/geoparse.py
Geoparser.geoparse
def geoparse(self, doc, verbose=False): """Main geoparsing function. Text to extracted, resolved entities. Parameters ---------- doc : str or spaCy The document to be geoparsed. Can be either raw text or already spacy processed. In some cases, it makes sense to bulk parse using spacy's .pipe() before sending through to Mordecai Returns ------- proced : list of dicts Each entity gets an entry in the list, with the dictionary including geo info, spans, and optionally, the input features. """ if not hasattr(doc, "ents"): doc = nlp(doc) proced = self.infer_country(doc) if not proced: return [] # logging! #print("Nothing came back from infer_country...") if self.threads: pool = ThreadPool(len(proced)) results = pool.map(self.proc_lookup_country, proced) pool.close() pool.join() else: results = [] for loc in proced: # if the confidence is too low, don't use the country info if loc['country_conf'] > self.country_threshold: res = self.query_geonames_country(loc['word'], loc['country_predicted']) results.append(res) else: results.append("") for n, loc in enumerate(proced): res = results[n] try: _ = res['hits']['hits'] # If there's no geonames result, what to do? # For now, just continue. # In the future, delete? Or add an empty "loc" field? except (TypeError, KeyError): continue # Pick the best place X, meta = self.features_for_rank(loc, res) if X.shape[1] == 0: # This happens if there are no results... continue all_tasks, sorted_meta, sorted_X = self.format_for_prodigy(X, meta, loc['word'], return_feature_subset=True) fl_pad = np.pad(sorted_X, ((0, 4 - sorted_X.shape[0]), (0, 0)), 'constant') fl_unwrap = fl_pad.flatten() prediction = self.rank_model.predict(np.asmatrix(fl_unwrap)) place_confidence = prediction.max() loc['geo'] = sorted_meta[prediction.argmax()] loc['place_confidence'] = place_confidence if not verbose: proced = self.clean_proced(proced) return proced
python
def geoparse(self, doc, verbose=False): """Main geoparsing function. Text to extracted, resolved entities. Parameters ---------- doc : str or spaCy The document to be geoparsed. Can be either raw text or already spacy processed. In some cases, it makes sense to bulk parse using spacy's .pipe() before sending through to Mordecai Returns ------- proced : list of dicts Each entity gets an entry in the list, with the dictionary including geo info, spans, and optionally, the input features. """ if not hasattr(doc, "ents"): doc = nlp(doc) proced = self.infer_country(doc) if not proced: return [] # logging! #print("Nothing came back from infer_country...") if self.threads: pool = ThreadPool(len(proced)) results = pool.map(self.proc_lookup_country, proced) pool.close() pool.join() else: results = [] for loc in proced: # if the confidence is too low, don't use the country info if loc['country_conf'] > self.country_threshold: res = self.query_geonames_country(loc['word'], loc['country_predicted']) results.append(res) else: results.append("") for n, loc in enumerate(proced): res = results[n] try: _ = res['hits']['hits'] # If there's no geonames result, what to do? # For now, just continue. # In the future, delete? Or add an empty "loc" field? except (TypeError, KeyError): continue # Pick the best place X, meta = self.features_for_rank(loc, res) if X.shape[1] == 0: # This happens if there are no results... continue all_tasks, sorted_meta, sorted_X = self.format_for_prodigy(X, meta, loc['word'], return_feature_subset=True) fl_pad = np.pad(sorted_X, ((0, 4 - sorted_X.shape[0]), (0, 0)), 'constant') fl_unwrap = fl_pad.flatten() prediction = self.rank_model.predict(np.asmatrix(fl_unwrap)) place_confidence = prediction.max() loc['geo'] = sorted_meta[prediction.argmax()] loc['place_confidence'] = place_confidence if not verbose: proced = self.clean_proced(proced) return proced
[ "def", "geoparse", "(", "self", ",", "doc", ",", "verbose", "=", "False", ")", ":", "if", "not", "hasattr", "(", "doc", ",", "\"ents\"", ")", ":", "doc", "=", "nlp", "(", "doc", ")", "proced", "=", "self", ".", "infer_country", "(", "doc", ")", "if", "not", "proced", ":", "return", "[", "]", "# logging!", "#print(\"Nothing came back from infer_country...\")", "if", "self", ".", "threads", ":", "pool", "=", "ThreadPool", "(", "len", "(", "proced", ")", ")", "results", "=", "pool", ".", "map", "(", "self", ".", "proc_lookup_country", ",", "proced", ")", "pool", ".", "close", "(", ")", "pool", ".", "join", "(", ")", "else", ":", "results", "=", "[", "]", "for", "loc", "in", "proced", ":", "# if the confidence is too low, don't use the country info", "if", "loc", "[", "'country_conf'", "]", ">", "self", ".", "country_threshold", ":", "res", "=", "self", ".", "query_geonames_country", "(", "loc", "[", "'word'", "]", ",", "loc", "[", "'country_predicted'", "]", ")", "results", ".", "append", "(", "res", ")", "else", ":", "results", ".", "append", "(", "\"\"", ")", "for", "n", ",", "loc", "in", "enumerate", "(", "proced", ")", ":", "res", "=", "results", "[", "n", "]", "try", ":", "_", "=", "res", "[", "'hits'", "]", "[", "'hits'", "]", "# If there's no geonames result, what to do?", "# For now, just continue.", "# In the future, delete? Or add an empty \"loc\" field?", "except", "(", "TypeError", ",", "KeyError", ")", ":", "continue", "# Pick the best place", "X", ",", "meta", "=", "self", ".", "features_for_rank", "(", "loc", ",", "res", ")", "if", "X", ".", "shape", "[", "1", "]", "==", "0", ":", "# This happens if there are no results...", "continue", "all_tasks", ",", "sorted_meta", ",", "sorted_X", "=", "self", ".", "format_for_prodigy", "(", "X", ",", "meta", ",", "loc", "[", "'word'", "]", ",", "return_feature_subset", "=", "True", ")", "fl_pad", "=", "np", ".", "pad", "(", "sorted_X", ",", "(", "(", "0", ",", "4", "-", "sorted_X", ".", "shape", "[", "0", "]", ")", ",", "(", "0", ",", "0", ")", ")", ",", "'constant'", ")", "fl_unwrap", "=", "fl_pad", ".", "flatten", "(", ")", "prediction", "=", "self", ".", "rank_model", ".", "predict", "(", "np", ".", "asmatrix", "(", "fl_unwrap", ")", ")", "place_confidence", "=", "prediction", ".", "max", "(", ")", "loc", "[", "'geo'", "]", "=", "sorted_meta", "[", "prediction", ".", "argmax", "(", ")", "]", "loc", "[", "'place_confidence'", "]", "=", "place_confidence", "if", "not", "verbose", ":", "proced", "=", "self", ".", "clean_proced", "(", "proced", ")", "return", "proced" ]
Main geoparsing function. Text to extracted, resolved entities. Parameters ---------- doc : str or spaCy The document to be geoparsed. Can be either raw text or already spacy processed. In some cases, it makes sense to bulk parse using spacy's .pipe() before sending through to Mordecai Returns ------- proced : list of dicts Each entity gets an entry in the list, with the dictionary including geo info, spans, and optionally, the input features.
[ "Main", "geoparsing", "function", ".", "Text", "to", "extracted", "resolved", "entities", "." ]
bd82b8bcc27621345c57cbe9ec7f8c8552620ffc
https://github.com/openeventdata/mordecai/blob/bd82b8bcc27621345c57cbe9ec7f8c8552620ffc/mordecai/geoparse.py#L963-L1024
233,892
openeventdata/mordecai
mordecai/geoparse.py
Geoparser.batch_geoparse
def batch_geoparse(self, text_list): """ Batch geoparsing function. Take in a list of text documents and return a list of lists of the geoparsed documents. The speed improvements come exclusively from using spaCy's `nlp.pipe`. Parameters ---------- text_list : list of strs List of documents. The documents should not have been pre-processed by spaCy. Returns ------- processed : list of list of dictionaries. The list is the same length as the input list of documents. Each element is a list of dicts, one for each geolocated entity. """ if not self.threads: print("batch_geoparsed should be used with threaded searches. Please set `threads=True` when initializing the geoparser.") nlped_docs = list(nlp.pipe(text_list, as_tuples=False, n_threads=multiprocessing.cpu_count())) processed = [] for i in tqdm(nlped_docs, disable=not self.progress): p = self.geoparse(i) processed.append(p) return processed
python
def batch_geoparse(self, text_list): """ Batch geoparsing function. Take in a list of text documents and return a list of lists of the geoparsed documents. The speed improvements come exclusively from using spaCy's `nlp.pipe`. Parameters ---------- text_list : list of strs List of documents. The documents should not have been pre-processed by spaCy. Returns ------- processed : list of list of dictionaries. The list is the same length as the input list of documents. Each element is a list of dicts, one for each geolocated entity. """ if not self.threads: print("batch_geoparsed should be used with threaded searches. Please set `threads=True` when initializing the geoparser.") nlped_docs = list(nlp.pipe(text_list, as_tuples=False, n_threads=multiprocessing.cpu_count())) processed = [] for i in tqdm(nlped_docs, disable=not self.progress): p = self.geoparse(i) processed.append(p) return processed
[ "def", "batch_geoparse", "(", "self", ",", "text_list", ")", ":", "if", "not", "self", ".", "threads", ":", "print", "(", "\"batch_geoparsed should be used with threaded searches. Please set `threads=True` when initializing the geoparser.\"", ")", "nlped_docs", "=", "list", "(", "nlp", ".", "pipe", "(", "text_list", ",", "as_tuples", "=", "False", ",", "n_threads", "=", "multiprocessing", ".", "cpu_count", "(", ")", ")", ")", "processed", "=", "[", "]", "for", "i", "in", "tqdm", "(", "nlped_docs", ",", "disable", "=", "not", "self", ".", "progress", ")", ":", "p", "=", "self", ".", "geoparse", "(", "i", ")", "processed", ".", "append", "(", "p", ")", "return", "processed" ]
Batch geoparsing function. Take in a list of text documents and return a list of lists of the geoparsed documents. The speed improvements come exclusively from using spaCy's `nlp.pipe`. Parameters ---------- text_list : list of strs List of documents. The documents should not have been pre-processed by spaCy. Returns ------- processed : list of list of dictionaries. The list is the same length as the input list of documents. Each element is a list of dicts, one for each geolocated entity.
[ "Batch", "geoparsing", "function", ".", "Take", "in", "a", "list", "of", "text", "documents", "and", "return", "a", "list", "of", "lists", "of", "the", "geoparsed", "documents", ".", "The", "speed", "improvements", "come", "exclusively", "from", "using", "spaCy", "s", "nlp", ".", "pipe", "." ]
bd82b8bcc27621345c57cbe9ec7f8c8552620ffc
https://github.com/openeventdata/mordecai/blob/bd82b8bcc27621345c57cbe9ec7f8c8552620ffc/mordecai/geoparse.py#L1027-L1050
233,893
openeventdata/mordecai
train/train_country_model.py
entry_to_matrix
def entry_to_matrix(prodigy_entry): """ Take in a line from the labeled json and return a vector of labels and a matrix of features for training. Two ways to get 0s: - marked as false by user - generated automatically from other entries when guess is correct Rather than iterating through entities, just get the number of the correct entity directly. Then get one or two GPEs before and after. """ doc = prodigy_entry['text'] doc = nlp(doc) geo_proced = geo.process_text(doc, require_maj=False) # find the geoproced entity that matches the Prodigy entry ent_text = np.asarray([gp['word'] for gp in geo_proced]) # get mask for correct ent #print(ent_text) match = ent_text == entry['meta']['word'] #print("match: ", match) anti_match = np.abs(match - 1) #print("Anti-match ", anti_match) match_position = match.argmax() geo_proc = geo_proced[match_position] iso = geo.cts[prodigy_entry['label']] # convert country text label to ISO feat = geo.features_to_matrix(geo_proc) answer_x = feat['matrix'] label = np.asarray(feat['labels']) if prodigy_entry['answer'] == "accept": answer_binary = label == iso answer_binary = answer_binary.astype('int') #print(answer_x.shape) #print(answer_binary.shape) elif prodigy_entry['answer'] == "reject": # all we know is that the label that was presented is wrong. # just return the corresponding row in the feature matrix, # and force the label to be 0 answer_binary = label == iso answer_x = answer_x[answer_binary,:] # just take the row corresponding to the answer answer_binary = np.asarray([0]) # set the outcome to 0 because reject # NEED TO SHARE LABELS ACROSS! THE CORRECT ONE MIGHT NOT EVEN APPEAR FOR ALL ENTITIES x = feat['matrix'] other_x = x[anti_match,:] #print(other_x) #print(label[anti_match]) # here, need to get the rows corresponding to the correct label # print(geo_proc['meta']) # here's where we get the other place name features. # Need to: # 1. do features_to_matrix but use the label of the current entity # to determine 0/1 in the feature matrix # 2. put them all into one big feature matrix, # 3. ...ordering by distance? And need to decide max entity length # 4. also include these distances as one of the features #print(answer_x.shape[0]) #print(answer_binary.shape[0]) try: if answer_x.shape[0] == answer_binary.shape[0]: return (answer_x, answer_binary) except: pass
python
def entry_to_matrix(prodigy_entry): """ Take in a line from the labeled json and return a vector of labels and a matrix of features for training. Two ways to get 0s: - marked as false by user - generated automatically from other entries when guess is correct Rather than iterating through entities, just get the number of the correct entity directly. Then get one or two GPEs before and after. """ doc = prodigy_entry['text'] doc = nlp(doc) geo_proced = geo.process_text(doc, require_maj=False) # find the geoproced entity that matches the Prodigy entry ent_text = np.asarray([gp['word'] for gp in geo_proced]) # get mask for correct ent #print(ent_text) match = ent_text == entry['meta']['word'] #print("match: ", match) anti_match = np.abs(match - 1) #print("Anti-match ", anti_match) match_position = match.argmax() geo_proc = geo_proced[match_position] iso = geo.cts[prodigy_entry['label']] # convert country text label to ISO feat = geo.features_to_matrix(geo_proc) answer_x = feat['matrix'] label = np.asarray(feat['labels']) if prodigy_entry['answer'] == "accept": answer_binary = label == iso answer_binary = answer_binary.astype('int') #print(answer_x.shape) #print(answer_binary.shape) elif prodigy_entry['answer'] == "reject": # all we know is that the label that was presented is wrong. # just return the corresponding row in the feature matrix, # and force the label to be 0 answer_binary = label == iso answer_x = answer_x[answer_binary,:] # just take the row corresponding to the answer answer_binary = np.asarray([0]) # set the outcome to 0 because reject # NEED TO SHARE LABELS ACROSS! THE CORRECT ONE MIGHT NOT EVEN APPEAR FOR ALL ENTITIES x = feat['matrix'] other_x = x[anti_match,:] #print(other_x) #print(label[anti_match]) # here, need to get the rows corresponding to the correct label # print(geo_proc['meta']) # here's where we get the other place name features. # Need to: # 1. do features_to_matrix but use the label of the current entity # to determine 0/1 in the feature matrix # 2. put them all into one big feature matrix, # 3. ...ordering by distance? And need to decide max entity length # 4. also include these distances as one of the features #print(answer_x.shape[0]) #print(answer_binary.shape[0]) try: if answer_x.shape[0] == answer_binary.shape[0]: return (answer_x, answer_binary) except: pass
[ "def", "entry_to_matrix", "(", "prodigy_entry", ")", ":", "doc", "=", "prodigy_entry", "[", "'text'", "]", "doc", "=", "nlp", "(", "doc", ")", "geo_proced", "=", "geo", ".", "process_text", "(", "doc", ",", "require_maj", "=", "False", ")", "# find the geoproced entity that matches the Prodigy entry", "ent_text", "=", "np", ".", "asarray", "(", "[", "gp", "[", "'word'", "]", "for", "gp", "in", "geo_proced", "]", ")", "# get mask for correct ent", "#print(ent_text)", "match", "=", "ent_text", "==", "entry", "[", "'meta'", "]", "[", "'word'", "]", "#print(\"match: \", match)", "anti_match", "=", "np", ".", "abs", "(", "match", "-", "1", ")", "#print(\"Anti-match \", anti_match)", "match_position", "=", "match", ".", "argmax", "(", ")", "geo_proc", "=", "geo_proced", "[", "match_position", "]", "iso", "=", "geo", ".", "cts", "[", "prodigy_entry", "[", "'label'", "]", "]", "# convert country text label to ISO", "feat", "=", "geo", ".", "features_to_matrix", "(", "geo_proc", ")", "answer_x", "=", "feat", "[", "'matrix'", "]", "label", "=", "np", ".", "asarray", "(", "feat", "[", "'labels'", "]", ")", "if", "prodigy_entry", "[", "'answer'", "]", "==", "\"accept\"", ":", "answer_binary", "=", "label", "==", "iso", "answer_binary", "=", "answer_binary", ".", "astype", "(", "'int'", ")", "#print(answer_x.shape)", "#print(answer_binary.shape)", "elif", "prodigy_entry", "[", "'answer'", "]", "==", "\"reject\"", ":", "# all we know is that the label that was presented is wrong.", "# just return the corresponding row in the feature matrix,", "# and force the label to be 0", "answer_binary", "=", "label", "==", "iso", "answer_x", "=", "answer_x", "[", "answer_binary", ",", ":", "]", "# just take the row corresponding to the answer", "answer_binary", "=", "np", ".", "asarray", "(", "[", "0", "]", ")", "# set the outcome to 0 because reject", "# NEED TO SHARE LABELS ACROSS! THE CORRECT ONE MIGHT NOT EVEN APPEAR FOR ALL ENTITIES", "x", "=", "feat", "[", "'matrix'", "]", "other_x", "=", "x", "[", "anti_match", ",", ":", "]", "#print(other_x)", "#print(label[anti_match])", "# here, need to get the rows corresponding to the correct label", "# print(geo_proc['meta'])", "# here's where we get the other place name features.", "# Need to:", "# 1. do features_to_matrix but use the label of the current entity", "# to determine 0/1 in the feature matrix", "# 2. put them all into one big feature matrix,", "# 3. ...ordering by distance? And need to decide max entity length", "# 4. also include these distances as one of the features", "#print(answer_x.shape[0])", "#print(answer_binary.shape[0])", "try", ":", "if", "answer_x", ".", "shape", "[", "0", "]", "==", "answer_binary", ".", "shape", "[", "0", "]", ":", "return", "(", "answer_x", ",", "answer_binary", ")", "except", ":", "pass" ]
Take in a line from the labeled json and return a vector of labels and a matrix of features for training. Two ways to get 0s: - marked as false by user - generated automatically from other entries when guess is correct Rather than iterating through entities, just get the number of the correct entity directly. Then get one or two GPEs before and after.
[ "Take", "in", "a", "line", "from", "the", "labeled", "json", "and", "return", "a", "vector", "of", "labels", "and", "a", "matrix", "of", "features", "for", "training", "." ]
bd82b8bcc27621345c57cbe9ec7f8c8552620ffc
https://github.com/openeventdata/mordecai/blob/bd82b8bcc27621345c57cbe9ec7f8c8552620ffc/train/train_country_model.py#L22-L92
233,894
picklepete/pyicloud
pyicloud/services/findmyiphone.py
FindMyiPhoneServiceManager.refresh_client
def refresh_client(self): """ Refreshes the FindMyiPhoneService endpoint, This ensures that the location data is up-to-date. """ req = self.session.post( self._fmip_refresh_url, params=self.params, data=json.dumps( { 'clientContext': { 'fmly': True, 'shouldLocate': True, 'selectedDevice': 'all', } } ) ) self.response = req.json() for device_info in self.response['content']: device_id = device_info['id'] if device_id not in self._devices: self._devices[device_id] = AppleDevice( device_info, self.session, self.params, manager=self, sound_url=self._fmip_sound_url, lost_url=self._fmip_lost_url, message_url=self._fmip_message_url, ) else: self._devices[device_id].update(device_info) if not self._devices: raise PyiCloudNoDevicesException()
python
def refresh_client(self): """ Refreshes the FindMyiPhoneService endpoint, This ensures that the location data is up-to-date. """ req = self.session.post( self._fmip_refresh_url, params=self.params, data=json.dumps( { 'clientContext': { 'fmly': True, 'shouldLocate': True, 'selectedDevice': 'all', } } ) ) self.response = req.json() for device_info in self.response['content']: device_id = device_info['id'] if device_id not in self._devices: self._devices[device_id] = AppleDevice( device_info, self.session, self.params, manager=self, sound_url=self._fmip_sound_url, lost_url=self._fmip_lost_url, message_url=self._fmip_message_url, ) else: self._devices[device_id].update(device_info) if not self._devices: raise PyiCloudNoDevicesException()
[ "def", "refresh_client", "(", "self", ")", ":", "req", "=", "self", ".", "session", ".", "post", "(", "self", ".", "_fmip_refresh_url", ",", "params", "=", "self", ".", "params", ",", "data", "=", "json", ".", "dumps", "(", "{", "'clientContext'", ":", "{", "'fmly'", ":", "True", ",", "'shouldLocate'", ":", "True", ",", "'selectedDevice'", ":", "'all'", ",", "}", "}", ")", ")", "self", ".", "response", "=", "req", ".", "json", "(", ")", "for", "device_info", "in", "self", ".", "response", "[", "'content'", "]", ":", "device_id", "=", "device_info", "[", "'id'", "]", "if", "device_id", "not", "in", "self", ".", "_devices", ":", "self", ".", "_devices", "[", "device_id", "]", "=", "AppleDevice", "(", "device_info", ",", "self", ".", "session", ",", "self", ".", "params", ",", "manager", "=", "self", ",", "sound_url", "=", "self", ".", "_fmip_sound_url", ",", "lost_url", "=", "self", ".", "_fmip_lost_url", ",", "message_url", "=", "self", ".", "_fmip_message_url", ",", ")", "else", ":", "self", ".", "_devices", "[", "device_id", "]", ".", "update", "(", "device_info", ")", "if", "not", "self", ".", "_devices", ":", "raise", "PyiCloudNoDevicesException", "(", ")" ]
Refreshes the FindMyiPhoneService endpoint, This ensures that the location data is up-to-date.
[ "Refreshes", "the", "FindMyiPhoneService", "endpoint" ]
9bb6d750662ce24c8febc94807ddbdcdf3cadaa2
https://github.com/picklepete/pyicloud/blob/9bb6d750662ce24c8febc94807ddbdcdf3cadaa2/pyicloud/services/findmyiphone.py#L30-L67
233,895
picklepete/pyicloud
pyicloud/services/findmyiphone.py
AppleDevice.status
def status(self, additional=[]): """ Returns status information for device. This returns only a subset of possible properties. """ self.manager.refresh_client() fields = ['batteryLevel', 'deviceDisplayName', 'deviceStatus', 'name'] fields += additional properties = {} for field in fields: properties[field] = self.content.get(field) return properties
python
def status(self, additional=[]): """ Returns status information for device. This returns only a subset of possible properties. """ self.manager.refresh_client() fields = ['batteryLevel', 'deviceDisplayName', 'deviceStatus', 'name'] fields += additional properties = {} for field in fields: properties[field] = self.content.get(field) return properties
[ "def", "status", "(", "self", ",", "additional", "=", "[", "]", ")", ":", "self", ".", "manager", ".", "refresh_client", "(", ")", "fields", "=", "[", "'batteryLevel'", ",", "'deviceDisplayName'", ",", "'deviceStatus'", ",", "'name'", "]", "fields", "+=", "additional", "properties", "=", "{", "}", "for", "field", "in", "fields", ":", "properties", "[", "field", "]", "=", "self", ".", "content", ".", "get", "(", "field", ")", "return", "properties" ]
Returns status information for device. This returns only a subset of possible properties.
[ "Returns", "status", "information", "for", "device", "." ]
9bb6d750662ce24c8febc94807ddbdcdf3cadaa2
https://github.com/picklepete/pyicloud/blob/9bb6d750662ce24c8febc94807ddbdcdf3cadaa2/pyicloud/services/findmyiphone.py#L115-L126
233,896
picklepete/pyicloud
pyicloud/services/findmyiphone.py
AppleDevice.lost_device
def lost_device( self, number, text='This iPhone has been lost. Please call me.', newpasscode="" ): """ Send a request to the device to trigger 'lost mode'. The device will show the message in `text`, and if a number has been passed, then the person holding the device can call the number without entering the passcode. """ data = json.dumps({ 'text': text, 'userText': True, 'ownerNbr': number, 'lostModeEnabled': True, 'trackingEnabled': True, 'device': self.content['id'], 'passcode': newpasscode }) self.session.post( self.lost_url, params=self.params, data=data )
python
def lost_device( self, number, text='This iPhone has been lost. Please call me.', newpasscode="" ): """ Send a request to the device to trigger 'lost mode'. The device will show the message in `text`, and if a number has been passed, then the person holding the device can call the number without entering the passcode. """ data = json.dumps({ 'text': text, 'userText': True, 'ownerNbr': number, 'lostModeEnabled': True, 'trackingEnabled': True, 'device': self.content['id'], 'passcode': newpasscode }) self.session.post( self.lost_url, params=self.params, data=data )
[ "def", "lost_device", "(", "self", ",", "number", ",", "text", "=", "'This iPhone has been lost. Please call me.'", ",", "newpasscode", "=", "\"\"", ")", ":", "data", "=", "json", ".", "dumps", "(", "{", "'text'", ":", "text", ",", "'userText'", ":", "True", ",", "'ownerNbr'", ":", "number", ",", "'lostModeEnabled'", ":", "True", ",", "'trackingEnabled'", ":", "True", ",", "'device'", ":", "self", ".", "content", "[", "'id'", "]", ",", "'passcode'", ":", "newpasscode", "}", ")", "self", ".", "session", ".", "post", "(", "self", ".", "lost_url", ",", "params", "=", "self", ".", "params", ",", "data", "=", "data", ")" ]
Send a request to the device to trigger 'lost mode'. The device will show the message in `text`, and if a number has been passed, then the person holding the device can call the number without entering the passcode.
[ "Send", "a", "request", "to", "the", "device", "to", "trigger", "lost", "mode", "." ]
9bb6d750662ce24c8febc94807ddbdcdf3cadaa2
https://github.com/picklepete/pyicloud/blob/9bb6d750662ce24c8febc94807ddbdcdf3cadaa2/pyicloud/services/findmyiphone.py#L169-L193
233,897
picklepete/pyicloud
pyicloud/services/calendar.py
CalendarService.events
def events(self, from_dt=None, to_dt=None): """ Retrieves events for a given date range, by default, this month. """ self.refresh_client(from_dt, to_dt) return self.response['Event']
python
def events(self, from_dt=None, to_dt=None): """ Retrieves events for a given date range, by default, this month. """ self.refresh_client(from_dt, to_dt) return self.response['Event']
[ "def", "events", "(", "self", ",", "from_dt", "=", "None", ",", "to_dt", "=", "None", ")", ":", "self", ".", "refresh_client", "(", "from_dt", ",", "to_dt", ")", "return", "self", ".", "response", "[", "'Event'", "]" ]
Retrieves events for a given date range, by default, this month.
[ "Retrieves", "events", "for", "a", "given", "date", "range", "by", "default", "this", "month", "." ]
9bb6d750662ce24c8febc94807ddbdcdf3cadaa2
https://github.com/picklepete/pyicloud/blob/9bb6d750662ce24c8febc94807ddbdcdf3cadaa2/pyicloud/services/calendar.py#L58-L63
233,898
picklepete/pyicloud
pyicloud/services/calendar.py
CalendarService.calendars
def calendars(self): """ Retrieves calendars for this month """ today = datetime.today() first_day, last_day = monthrange(today.year, today.month) from_dt = datetime(today.year, today.month, first_day) to_dt = datetime(today.year, today.month, last_day) params = dict(self.params) params.update({ 'lang': 'en-us', 'usertz': get_localzone().zone, 'startDate': from_dt.strftime('%Y-%m-%d'), 'endDate': to_dt.strftime('%Y-%m-%d') }) req = self.session.get(self._calendars, params=params) self.response = req.json() return self.response['Collection']
python
def calendars(self): """ Retrieves calendars for this month """ today = datetime.today() first_day, last_day = monthrange(today.year, today.month) from_dt = datetime(today.year, today.month, first_day) to_dt = datetime(today.year, today.month, last_day) params = dict(self.params) params.update({ 'lang': 'en-us', 'usertz': get_localzone().zone, 'startDate': from_dt.strftime('%Y-%m-%d'), 'endDate': to_dt.strftime('%Y-%m-%d') }) req = self.session.get(self._calendars, params=params) self.response = req.json() return self.response['Collection']
[ "def", "calendars", "(", "self", ")", ":", "today", "=", "datetime", ".", "today", "(", ")", "first_day", ",", "last_day", "=", "monthrange", "(", "today", ".", "year", ",", "today", ".", "month", ")", "from_dt", "=", "datetime", "(", "today", ".", "year", ",", "today", ".", "month", ",", "first_day", ")", "to_dt", "=", "datetime", "(", "today", ".", "year", ",", "today", ".", "month", ",", "last_day", ")", "params", "=", "dict", "(", "self", ".", "params", ")", "params", ".", "update", "(", "{", "'lang'", ":", "'en-us'", ",", "'usertz'", ":", "get_localzone", "(", ")", ".", "zone", ",", "'startDate'", ":", "from_dt", ".", "strftime", "(", "'%Y-%m-%d'", ")", ",", "'endDate'", ":", "to_dt", ".", "strftime", "(", "'%Y-%m-%d'", ")", "}", ")", "req", "=", "self", ".", "session", ".", "get", "(", "self", ".", "_calendars", ",", "params", "=", "params", ")", "self", ".", "response", "=", "req", ".", "json", "(", ")", "return", "self", ".", "response", "[", "'Collection'", "]" ]
Retrieves calendars for this month
[ "Retrieves", "calendars", "for", "this", "month" ]
9bb6d750662ce24c8febc94807ddbdcdf3cadaa2
https://github.com/picklepete/pyicloud/blob/9bb6d750662ce24c8febc94807ddbdcdf3cadaa2/pyicloud/services/calendar.py#L65-L82
233,899
picklepete/pyicloud
pyicloud/cmdline.py
create_pickled_data
def create_pickled_data(idevice, filename): """This helper will output the idevice to a pickled file named after the passed filename. This allows the data to be used without resorting to screen / pipe scrapping. """ data = {} for x in idevice.content: data[x] = idevice.content[x] location = filename pickle_file = open(location, 'wb') pickle.dump(data, pickle_file, protocol=pickle.HIGHEST_PROTOCOL) pickle_file.close()
python
def create_pickled_data(idevice, filename): """This helper will output the idevice to a pickled file named after the passed filename. This allows the data to be used without resorting to screen / pipe scrapping. """ data = {} for x in idevice.content: data[x] = idevice.content[x] location = filename pickle_file = open(location, 'wb') pickle.dump(data, pickle_file, protocol=pickle.HIGHEST_PROTOCOL) pickle_file.close()
[ "def", "create_pickled_data", "(", "idevice", ",", "filename", ")", ":", "data", "=", "{", "}", "for", "x", "in", "idevice", ".", "content", ":", "data", "[", "x", "]", "=", "idevice", ".", "content", "[", "x", "]", "location", "=", "filename", "pickle_file", "=", "open", "(", "location", ",", "'wb'", ")", "pickle", ".", "dump", "(", "data", ",", "pickle_file", ",", "protocol", "=", "pickle", ".", "HIGHEST_PROTOCOL", ")", "pickle_file", ".", "close", "(", ")" ]
This helper will output the idevice to a pickled file named after the passed filename. This allows the data to be used without resorting to screen / pipe scrapping.
[ "This", "helper", "will", "output", "the", "idevice", "to", "a", "pickled", "file", "named", "after", "the", "passed", "filename", "." ]
9bb6d750662ce24c8febc94807ddbdcdf3cadaa2
https://github.com/picklepete/pyicloud/blob/9bb6d750662ce24c8febc94807ddbdcdf3cadaa2/pyicloud/cmdline.py#L23-L35