_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q16100
ArrayEntry._ancestors
train
def _ancestors( self, qname: Union[QualName, bool] = None) -> List[InstanceNode]: """XPath - return the list of receiver's ancestors.""" return self.up()._ancestors(qname)
python
{ "resource": "" }
q16101
ArrayEntry._preceding_siblings
train
def _preceding_siblings( self, qname: Union[QualName, bool] = None) -> List[InstanceNode]: """XPath - return the list of receiver's preceding siblings.""" if qname and self.qual_name != qname: return [] res = [] en = self for _ in self.before: en = en.previous() res.append(en) return res
python
{ "resource": "" }
q16102
ArrayEntry._following_siblings
train
def _following_siblings( self, qname: Union[QualName, bool] = None) -> List[InstanceNode]: """XPath - return the list of receiver's following siblings.""" if qname and self.qual_name != qname: return [] res = [] en = self for _ in self.after: en = en.next() res.append(en) return res
python
{ "resource": "" }
q16103
MemberName.peek_step
train
def peek_step(self, val: ObjectValue, sn: "DataNode") -> Tuple[Value, "DataNode"]: """Return member value addressed by the receiver + its schema node. Args: val: Current value (object). sn: Current schema node. """ cn = sn.get_data_child(self.name, self.namespace) try: return (val[cn.iname()], cn) except (IndexError, KeyError, TypeError): return (None, cn)
python
{ "resource": "" }
q16104
ActionName.peek_step
train
def peek_step(self, val: ObjectValue, sn: "DataNode") -> Tuple[None, "DataNode"]: """Fail because there is no action instance.""" cn = sn.get_child(self.name, self.namespace) return (None, cn)
python
{ "resource": "" }
q16105
EntryValue.parse_value
train
def parse_value(self, sn: "DataNode") -> ScalarValue: """Let schema node's type parse the receiver's value.""" res = sn.type.parse_value(self.value) if res is None: raise InvalidKeyValue(self.value) return res
python
{ "resource": "" }
q16106
EntryKeys.parse_keys
train
def parse_keys(self, sn: "DataNode") -> Dict[InstanceName, ScalarValue]: """Parse key dictionary in the context of a schema node. Args: sn: Schema node corresponding to a list. """ res = {} for k in self.keys: knod = sn.get_data_child(*k) if knod is None: raise NonexistentSchemaNode(sn.qual_name, *k) kval = knod.type.parse_value(self.keys[k]) if kval is None: raise InvalidKeyValue(self.keys[k]) res[knod.iname()] = kval return res
python
{ "resource": "" }
q16107
EntryKeys.peek_step
train
def peek_step(self, val: ArrayValue, sn: "DataNode") -> Tuple[ObjectValue, "DataNode"]: """Return the entry addressed by the receiver + its schema node. Args: val: Current value (array). sn: Current schema node. """ keys = self.parse_keys(sn) for en in val: flag = True try: for k in keys: if en[k] != keys[k]: flag = False break except KeyError: continue if flag: return (en, sn) return (None, sn)
python
{ "resource": "" }
q16108
ResourceIdParser.parse
train
def parse(self) -> InstanceRoute: """Parse resource identifier.""" res = InstanceRoute() if self.at_end(): return res if self.peek() == "/": self.offset += 1 if self.at_end(): return res sn = self.schema_node while True: name, ns = self.prefixed_name() cn = sn.get_data_child(name, ns) if cn is None: for cn in sn.children: if (isinstance(cn, RpcActionNode) and cn.name == name and (ns is None or cn.ns == ns)): res.append(ActionName(name, ns)) return res raise NonexistentSchemaNode(sn.qual_name, name, ns) res.append(MemberName(name, ns)) if self.at_end(): return res if isinstance(cn, SequenceNode): self.char("=") res.append(self._key_values(cn)) if self.at_end(): return res else: self.char("/") sn = cn
python
{ "resource": "" }
q16109
ResourceIdParser._key_values
train
def _key_values(self, sn: "SequenceNode") -> Union[EntryKeys, EntryValue]: """Parse leaf-list value or list keys.""" try: keys = self.up_to("/") except EndOfInput: keys = self.remaining() if not keys: raise UnexpectedInput(self, "entry value or keys") if isinstance(sn, LeafListNode): return EntryValue(unquote(keys)) ks = keys.split(",") try: if len(ks) != len(sn.keys): raise UnexpectedInput(self, f"exactly {len(sn.keys)} keys") except AttributeError: raise BadSchemaNodeType(sn.qual_name, "list") sel = {} for j in range(len(ks)): knod = sn.get_data_child(*sn.keys[j]) val = unquote(ks[j]) sel[(knod.name, None if knod.ns == sn.ns else knod.ns)] = val return EntryKeys(sel)
python
{ "resource": "" }
q16110
InstanceIdParser.parse
train
def parse(self) -> InstanceRoute: """Parse instance identifier.""" res = InstanceRoute() while True: self.char("/") res.append(MemberName(*self.prefixed_name())) try: next = self.peek() except EndOfInput: return res if next == "[": self.offset += 1 self.skip_ws() next = self.peek() if next in "0123456789": ind = self.unsigned_integer() - 1 if ind < 0: raise UnexpectedInput(self, "positive index") self.skip_ws() self.char("]") res.append(EntryIndex(ind)) elif next == '.': self.offset += 1 res.append(EntryValue(self._get_value())) else: res.append(self._key_predicates()) if self.at_end(): return res
python
{ "resource": "" }
q16111
Statement.find1
train
def find1(self, kw: YangIdentifier, arg: str = None, pref: YangIdentifier = None, required: bool = False) -> Optional["Statement"]: """Return first substatement with the given parameters. Args: kw: Statement keyword (local part for extensions). arg: Argument (all arguments will match if ``None``). pref: Keyword prefix (``None`` for built-in statements). required: Should an exception be raised on failure? Raises: StatementNotFound: If `required` is ``True`` and the statement is not found. """ for sub in self.substatements: if (sub.keyword == kw and sub.prefix == pref and (arg is None or sub.argument == arg)): return sub if required: raise StatementNotFound(str(self), kw)
python
{ "resource": "" }
q16112
Statement.find_all
train
def find_all(self, kw: YangIdentifier, pref: YangIdentifier = None) -> List["Statement"]: """Return the list all substatements with the given keyword and prefix. Args: kw: Statement keyword (local part for extensions). pref: Keyword prefix (``None`` for built-in statements). """ return [c for c in self.substatements if c.keyword == kw and c.prefix == pref]
python
{ "resource": "" }
q16113
Statement.get_definition
train
def get_definition(self, name: YangIdentifier, kw: YangIdentifier) -> Optional["Statement"]: """Search ancestor statements for a definition. Args: name: Name of a grouping or datatype (with no prefix). kw: ``grouping`` or ``typedef``. Raises: DefinitionNotFound: If the definition is not found. """ stmt = self.superstmt while stmt: res = stmt.find1(kw, name) if res: return res stmt = stmt.superstmt return None
python
{ "resource": "" }
q16114
Statement.get_error_info
train
def get_error_info(self) -> Tuple[Optional[str], Optional[str]]: """Return receiver's error tag and error message if present.""" etag = self.find1("error-app-tag") emsg = self.find1("error-message") return (etag.argument if etag else None, emsg.argument if emsg else None)
python
{ "resource": "" }
q16115
ModuleParser.parse
train
def parse(self) -> Statement: """Parse a complete YANG module or submodule. Args: mtext: YANG module text. Raises: EndOfInput: If past the end of input. ModuleNameMismatch: If parsed module name doesn't match `self.name`. ModuleRevisionMismatch: If parsed revision date doesn't match `self.rev`. UnexpectedInput: If top-level statement isn't ``(sub)module``. """ self.opt_separator() start = self.offset res = self.statement() if res.keyword not in ["module", "submodule"]: self.offset = start raise UnexpectedInput(self, "'module' or 'submodule'") if self.name is not None and res.argument != self.name: raise ModuleNameMismatch(res.argument, self.name) if self.rev: revst = res.find1("revision") if revst is None or revst.argument != self.rev: raise ModuleRevisionMismatch(revst.argument, self.rev) try: self.opt_separator() except EndOfInput: return res raise UnexpectedInput(self, "end of input")
python
{ "resource": "" }
q16116
ModuleParser.unescape
train
def unescape(cls, text: str) -> str: """Replace escape sequence with corresponding characters. Args: text: Text to unescape. """ chop = text.split("\\", 1) try: return (chop[0] if len(chop) == 1 else chop[0] + cls.unescape_map[chop[1][0]] + cls.unescape(chop[1][1:])) except KeyError: raise InvalidArgument(text) from None
python
{ "resource": "" }
q16117
ModuleParser.opt_separator
train
def opt_separator(self) -> bool: """Parse an optional separator and return ``True`` if found. Raises: EndOfInput: If past the end of input. """ start = self.offset self.dfa([ { # state 0: whitespace "": lambda: -1, " ": lambda: 0, "\t": lambda: 0, "\n": lambda: 0, "\r": lambda: 1, "/": lambda: 2 }, { # state 1: CR/LF? "": self._back_break, "\n": lambda: 0 }, { # state 2: start comment? "": self._back_break, "/": lambda: 3, "*": lambda: 4 }, { # state 3: line comment "": lambda: 3, "\n": lambda: 0 }, { # state 4: block comment "": lambda: 4, "*": lambda: 5 }, { # state 5: end block comment? "": lambda: 4, "/": lambda: 0, "*": lambda: 5 }]) return start < self.offset
python
{ "resource": "" }
q16118
ModuleParser.keyword
train
def keyword(self) -> Tuple[Optional[str], str]: """Parse a YANG statement keyword. Raises: EndOfInput: If past the end of input. UnexpectedInput: If no syntactically correct keyword is found. """ i1 = self.yang_identifier() if self.peek() == ":": self.offset += 1 i2 = self.yang_identifier() return (i1, i2) return (None, i1)
python
{ "resource": "" }
q16119
ModuleParser.statement
train
def statement(self) -> Statement: """Parse YANG statement. Raises: EndOfInput: If past the end of input. UnexpectedInput: If no syntactically correct statement is found. """ pref, kw = self.keyword() pres = self.opt_separator() next = self.peek() if next == ";": arg = None sub = False # type: bool elif next == "{": arg = None sub = True elif not pres: raise UnexpectedInput(self, "separator") else: self._arg = "" sub = self.argument() arg = self._arg self.offset += 1 res = Statement(kw, arg, pref=pref) if sub: res.substatements = self.substatements() for sub in res.substatements: sub.superstmt = res return res
python
{ "resource": "" }
q16120
ModuleParser.argument
train
def argument(self) -> bool: """Parse statement argument. Return ``True`` if the argument is followed by block of substatements. """ next = self.peek() if next == "'": quoted = True self.sq_argument() elif next == '"': quoted = True self.dq_argument() elif self._arg == "": quoted = False self.unq_argument() else: raise UnexpectedInput(self, "single or double quote") self.opt_separator() next = self.peek() if next == ";": return False if next == "{": return True elif quoted and next == "+": self.offset += 1 self.opt_separator() return self.argument() else: raise UnexpectedInput(self, "';', '{'" + (" or '+'" if quoted else ""))
python
{ "resource": "" }
q16121
ModuleParser.dq_argument
train
def dq_argument(self) -> str: """Parse double-quoted argument. Raises: EndOfInput: If past the end of input. """ def escape(): self._escape = True return 1 self._escape = False # any escaped chars? self.offset += 1 start = self.offset self.dfa([ { # state 0: argument "": lambda: 0, '"': lambda: -1, "\\": escape }, { # state 1: after escape "": lambda: 0 }]) self._arg += (self.unescape(self.input[start:self.offset]) if self._escape else self.input[start:self.offset]) self.offset += 1
python
{ "resource": "" }
q16122
ModuleParser.unq_argument
train
def unq_argument(self) -> str: """Parse unquoted argument. Raises: EndOfInput: If past the end of input. """ start = self.offset self.dfa([ { # state 0: argument "": lambda: 0, ";": lambda: -1, " ": lambda: -1, "\t": lambda: -1, "\r": lambda: -1, "\n": lambda: -1, "{": lambda: -1, '/': lambda: 1 }, { # state 1: comment? "": lambda: 0, "/": self._back_break, "*": self._back_break }]) self._arg = self.input[start:self.offset]
python
{ "resource": "" }
q16123
ModuleParser.substatements
train
def substatements(self) -> List[Statement]: """Parse substatements. Raises: EndOfInput: If past the end of input. """ res = [] self.opt_separator() while self.peek() != "}": res.append(self.statement()) self.opt_separator() self.offset += 1 return res
python
{ "resource": "" }
q16124
SchemaData._from_yang_library
train
def _from_yang_library(self, yang_lib: Dict[str, Any]) -> None: """Set the schema structures from YANG library data. Args: yang_lib: Dictionary with YANG library data. Raises: BadYangLibraryData: If YANG library data is invalid. FeaturePrerequisiteError: If a pre-requisite feature isn't supported. MultipleImplementedRevisions: If multiple revisions of an implemented module are listed in YANG library. ModuleNotFound: If a YANG module wasn't found in any of the directories specified in `mod_path`. """ try: for item in yang_lib["ietf-yang-library:modules-state"]["module"]: name = item["name"] rev = item["revision"] mid = (name, rev) mdata = ModuleData(mid) self.modules[mid] = mdata if item["conformance-type"] == "implement": if name in self.implement: raise MultipleImplementedRevisions(name) self.implement[name] = rev mod = self._load_module(name, rev) mdata.statement = mod if "feature" in item: mdata.features.update(item["feature"]) locpref = mod.find1("prefix", required=True).argument mdata.prefix_map[locpref] = mid if "submodule" in item: for s in item["submodule"]: sname = s["name"] smid = (sname, s["revision"]) sdata = ModuleData(mid) self.modules[smid] = sdata mdata.submodules.add(smid) submod = self._load_module(*smid) sdata.statement = submod bt = submod.find1("belongs-to", name, required=True) locpref = bt.find1("prefix", required=True).argument sdata.prefix_map[locpref] = mid except KeyError as e: raise BadYangLibraryData("missing " + str(e)) from None self._process_imports() self._check_feature_dependences()
python
{ "resource": "" }
q16125
SchemaData._load_module
train
def _load_module(self, name: YangIdentifier, rev: RevisionDate) -> Statement: """Read and parse a YANG module or submodule.""" for d in self.module_search_path: run = 0 while run < 2: fn = f"{d}/{name}" if rev and run == 0: fn += "@" + rev fn += ".yang" try: with open(fn, encoding='utf-8') as infile: res = ModuleParser(infile.read(), name, rev).parse() except (FileNotFoundError, PermissionError, ModuleContentMismatch): run += 1 continue return res raise ModuleNotFound(name, rev)
python
{ "resource": "" }
q16126
SchemaData._check_feature_dependences
train
def _check_feature_dependences(self): """Verify feature dependences.""" for mid in self.modules: for fst in self.modules[mid].statement.find_all("feature"): fn, fid = self.resolve_pname(fst.argument, mid) if fn not in self.modules[fid].features: continue if not self.if_features(fst, mid): raise FeaturePrerequisiteError(*fn)
python
{ "resource": "" }
q16127
SchemaData.namespace
train
def namespace(self, mid: ModuleId) -> YangIdentifier: """Return the namespace corresponding to a module or submodule. Args: mid: Module identifier. Raises: ModuleNotRegistered: If `mid` is not registered in the data model. """ try: mdata = self.modules[mid] except KeyError: raise ModuleNotRegistered(*mid) from None return mdata.main_module[0]
python
{ "resource": "" }
q16128
SchemaData.last_revision
train
def last_revision(self, mod: YangIdentifier) -> ModuleId: """Return the last revision of a module that's part of the data model. Args: mod: Name of a module or submodule. Raises: ModuleNotRegistered: If the module `mod` is not present in the data model. """ revs = [mn for mn in self.modules if mn[0] == mod] if not revs: raise ModuleNotRegistered(mod) return sorted(revs, key=lambda x: x[1])[-1]
python
{ "resource": "" }
q16129
SchemaData.prefix2ns
train
def prefix2ns(self, prefix: YangIdentifier, mid: ModuleId) -> YangIdentifier: """Return the namespace corresponding to a prefix. Args: prefix: Prefix associated with a module and its namespace. mid: Identifier of the module in which the prefix is declared. Raises: ModuleNotRegistered: If `mid` is not registered in the data model. UnknownPrefix: If `prefix` is not declared. """ try: mdata = self.modules[mid] except KeyError: raise ModuleNotRegistered(*mid) from None try: return mdata.prefix_map[prefix][0] except KeyError: raise UnknownPrefix(prefix, mid) from None
python
{ "resource": "" }
q16130
SchemaData.resolve_pname
train
def resolve_pname(self, pname: PrefName, mid: ModuleId) -> Tuple[YangIdentifier, ModuleId]: """Return the name and module identifier in which the name is defined. Args: pname: Name with an optional prefix. mid: Identifier of the module in which `pname` appears. Raises: ModuleNotRegistered: If `mid` is not registered in the data model. UnknownPrefix: If the prefix specified in `pname` is not declared. """ p, s, loc = pname.partition(":") try: mdata = self.modules[mid] except KeyError: raise ModuleNotRegistered(*mid) from None try: return (loc, mdata.prefix_map[p]) if s else (p, mdata.main_module) except KeyError: raise UnknownPrefix(p, mid) from None
python
{ "resource": "" }
q16131
SchemaData.translate_node_id
train
def translate_node_id(self, ni: PrefName, sctx: SchemaContext) -> QualName: """Translate node identifier to a qualified name. Args: ni: Node identifier (with optional prefix). sctx: SchemaContext. Raises: ModuleNotRegistered: If `mid` is not registered in the data model. UnknownPrefix: If the prefix specified in `ni` is not declared. """ p, s, loc = ni.partition(":") if not s: return (ni, sctx.default_ns) try: mdata = self.modules[sctx.text_mid] except KeyError: raise ModuleNotRegistered(*sctx.text_mid) from None try: return (loc, self.namespace(mdata.prefix_map[p])) except KeyError: raise UnknownPrefix(p, sctx.text_mid) from None
python
{ "resource": "" }
q16132
SchemaData.prefix
train
def prefix(self, imod: YangIdentifier, mid: ModuleId) -> YangIdentifier: """Return the prefix corresponding to an implemented module. Args: imod: Name of an implemented module. mid: Identifier of the context module. Raises: ModuleNotImplemented: If `imod` is not implemented. ModuleNotRegistered: If `mid` is not registered in YANG library. ModuleNotImported: If `imod` is not imported in `mid`. """ try: did = (imod, self.implement[imod]) except KeyError: raise ModuleNotImplemented(imod) from None try: pmap = self.modules[mid].prefix_map except KeyError: raise ModuleNotRegistered(*mid) from None for p in pmap: if pmap[p] == did: return p raise ModuleNotImported(imod, mid)
python
{ "resource": "" }
q16133
SchemaData.sni2route
train
def sni2route(self, sni: SchemaNodeId, sctx: SchemaContext) -> SchemaRoute: """Translate schema node identifier to a schema route. Args: sni: Schema node identifier (absolute or relative). sctx: Schema context. Raises: ModuleNotRegistered: If `mid` is not registered in the data model. UnknownPrefix: If a prefix specified in `sni` is not declared. """ nlist = sni.split("/") res = [] for qn in (nlist[1:] if sni[0] == "/" else nlist): res.append(self.translate_node_id(qn, sctx)) return res
python
{ "resource": "" }
q16134
SchemaData.get_definition
train
def get_definition(self, stmt: Statement, sctx: SchemaContext) -> Tuple[Statement, SchemaContext]: """Find the statement defining a grouping or derived type. Args: stmt: YANG "uses" or "type" statement. sctx: Schema context where the definition is used. Returns: A tuple consisting of the definition statement ('grouping' or 'typedef') and schema context of the definition. Raises: ValueError: If `stmt` is neither "uses" nor "type" statement. ModuleNotRegistered: If `mid` is not registered in the data model. UnknownPrefix: If the prefix specified in the argument of `stmt` is not declared. DefinitionNotFound: If the corresponding definition is not found. """ if stmt.keyword == "uses": kw = "grouping" elif stmt.keyword == "type": kw = "typedef" else: raise ValueError("not a 'uses' or 'type' statement") loc, did = self.resolve_pname(stmt.argument, sctx.text_mid) if did == sctx.text_mid: dstmt = stmt.get_definition(loc, kw) if dstmt: return (dstmt, sctx) else: dstmt = self.modules[did].statement.find1(kw, loc) if dstmt: return (dstmt, SchemaContext(sctx.schema_data, sctx.default_ns, did)) for sid in self.modules[did].submodules: dstmt = self.modules[sid].statement.find1(kw, loc) if dstmt: return ( dstmt, SchemaContext(sctx.schema_data, sctx.default_ns, sid)) raise DefinitionNotFound(kw, stmt.argument)
python
{ "resource": "" }
q16135
SchemaData.is_derived_from
train
def is_derived_from(self, identity: QualName, base: QualName) -> bool: """Return ``True`` if `identity` is derived from `base`.""" try: bases = self.identity_adjs[identity].bases except KeyError: return False if base in bases: return True for ib in bases: if self.is_derived_from(ib, base): return True return False
python
{ "resource": "" }
q16136
SchemaData.derived_from
train
def derived_from(self, identity: QualName) -> MutableSet[QualName]: """Return list of identities transitively derived from `identity`.""" try: res = self.identity_adjs[identity].derivs except KeyError: return set() for id in res.copy(): res |= self.derived_from(id) return res
python
{ "resource": "" }
q16137
SchemaData.derived_from_all
train
def derived_from_all(self, identities: List[QualName]) -> MutableSet[QualName]: """Return list of identities transitively derived from all `identity`.""" if not identities: return set() res = self.derived_from(identities[0]) for id in identities[1:]: res &= self.derived_from(id) return res
python
{ "resource": "" }
q16138
SchemaData.if_features
train
def if_features(self, stmt: Statement, mid: ModuleId) -> bool: """Evaluate ``if-feature`` substatements on a statement, if any. Args: stmt: Yang statement that is tested on if-features. mid: Identifier of the module in which `stmt` is present. Raises: ModuleNotRegistered: If `mid` is not registered in the data model. InvalidFeatureExpression: If a if-feature expression is not syntactically correct. UnknownPrefix: If a prefix specified in a feature name is not declared. """ iffs = stmt.find_all("if-feature") if not iffs: return True for i in iffs: if not FeatureExprParser(i.argument, self, mid).parse(): return False return True
python
{ "resource": "" }
q16139
FeatureExprParser.parse
train
def parse(self) -> bool: """Parse and evaluate a complete feature expression. Raises: InvalidFeatureExpression: If the if-feature expression is not syntactically correct. UnknownPrefix: If a prefix of a feature name is not declared. """ self.skip_ws() res = self._feature_disj() self.skip_ws() if not self.at_end(): raise InvalidFeatureExpression(self) return res
python
{ "resource": "" }
q16140
Parser.char
train
def char(self, c: str) -> None: """Parse the specified character. Args: c: One-character string. Raises: EndOfInput: If past the end of `self.input`. UnexpectedInput: If the next character is different from `c`. """ if self.peek() == c: self.offset += 1 else: raise UnexpectedInput(self, f"char '{c}'")
python
{ "resource": "" }
q16141
Parser.line_column
train
def line_column(self) -> Tuple[int, int]: """Return line and column coordinates.""" ln = self.input.count("\n", 0, self.offset) c = (self.offset if ln == 0 else self.offset - self.input.rfind("\n", 0, self.offset) - 1) return (ln + 1, c)
python
{ "resource": "" }
q16142
Parser.match_regex
train
def match_regex(self, regex: Pattern, required: bool = False, meaning: str = "") -> str: """Parse input based on a regular expression . Args: regex: Compiled regular expression object. required: Should the exception be raised on unexpected input? meaning: Meaning of `regex` (for use in error messages). Raises: UnexpectedInput: If no syntactically correct keyword is found. """ mo = regex.match(self.input, self.offset) if mo: self.offset = mo.end() return mo.group() if required: raise UnexpectedInput(self, meaning)
python
{ "resource": "" }
q16143
Parser.one_of
train
def one_of(self, chset: str) -> str: """Parse one character form the specified set. Args: chset: string of characters to try as alternatives. Returns: The character that was actually matched. Raises: UnexpectedInput: If the next character is not in `chset`. """ res = self.peek() if res in chset: self.offset += 1 return res raise UnexpectedInput(self, "one of " + chset)
python
{ "resource": "" }
q16144
Parser.peek
train
def peek(self) -> str: """Return the next character without advancing offset. Raises: EndOfInput: If past the end of `self.input`. """ try: return self.input[self.offset] except IndexError: raise EndOfInput(self)
python
{ "resource": "" }
q16145
Parser.prefixed_name
train
def prefixed_name(self) -> Tuple[YangIdentifier, Optional[YangIdentifier]]: """Parse identifier with an optional colon-separated prefix.""" i1 = self.yang_identifier() try: next = self.peek() except EndOfInput: return (i1, None) if next != ":": return (i1, None) self.offset += 1 return (self.yang_identifier(), i1)
python
{ "resource": "" }
q16146
Parser.remaining
train
def remaining(self) -> str: """Return the remaining part of the input string.""" res = self.input[self.offset:] self.offset = len(self.input) return res
python
{ "resource": "" }
q16147
Parser.up_to
train
def up_to(self, term: str) -> str: """Parse and return segment terminated by the first occurence of a string. Args: term: Terminating string. Raises: EndOfInput: If `term` does not occur in the rest of the input text. """ end = self.input.find(term, self.offset) if end < 0: raise EndOfInput(self) res = self.input[self.offset:end] self.offset = end + 1 return res
python
{ "resource": "" }
q16148
Intervals.restrict_with
train
def restrict_with(self, expr: str, error_tag: str = None, error_message: str = None) -> None: """Combine the receiver with new intervals. Args: expr: "range" or "length" expression. error_tag: error tag of the new expression. error_message: error message for the new expression. Raises: InvalidArgument: If parsing of `expr` fails. """ def parse(x: str) -> Number: res = self.parser(x) if res is None: raise InvalidArgument(expr) return res def simpl(rng: List[Number]) -> List[Number]: return ([rng[0]] if rng[0] == rng[1] else rng) def to_num(xs): return [parse(x) for x in xs] lo = self.intervals[0][0] hi = self.intervals[-1][-1] ran = [] for p in [p.strip() for p in expr.split("|")]: r = [i.strip() for i in p.split("..")] if len(r) > 2: raise InvalidArgument(expr) ran.append(r) if ran[0][0] != "min": lo = parse(ran[0][0]) if ran[-1][-1] != "max": hi = parse(ran[-1][-1]) self.intervals = ( [simpl([lo, hi])] if len(ran) == 1 else ( [simpl([lo, parse(ran[0][-1])])] + [to_num(r) for r in ran[1:-1]] + [simpl([parse(ran[-1][0]), hi])])) if error_tag: self.error_tag = error_tag if error_message: self.error_message = error_message
python
{ "resource": "" }
q16149
module_entry
train
def module_entry(yfile): """Add entry for one file containing YANG module text. Args: yfile (file): File containing a YANG module or submodule. """ ytxt = yfile.read() mp = ModuleParser(ytxt) mst = mp.statement() submod = mst.keyword == "submodule" import_only = True rev = "" features = [] includes = [] rec = {} for sst in mst.substatements: if not rev and sst.keyword == "revision": rev = sst.argument elif import_only and sst.keyword in data_kws: import_only = False elif sst.keyword == "feature": features.append(sst.argument) elif submod: continue elif sst.keyword == "namespace": rec["namespace"] = sst.argument elif sst.keyword == "include": rd = sst.find1("revision-date") includes.append((sst.argument, rd.argument if rd else None)) rec["import-only"] = import_only rec["features"] = features if submod: rec["revision"] = rev submodmap[mst.argument] = rec else: rec["includes"] = includes modmap[(mst.argument, rev)] = rec
python
{ "resource": "" }
q16150
DataModel.from_file
train
def from_file(cls, name: str, mod_path: Tuple[str] = (".",), description: str = None) -> "DataModel": """Initialize the data model from a file with YANG library data. Args: name: Name of a file with YANG library data. mod_path: Tuple of directories where to look for YANG modules. description: Optional description of the data model. Returns: The data model instance. Raises: The same exceptions as the class constructor above. """ with open(name, encoding="utf-8") as infile: yltxt = infile.read() return cls(yltxt, mod_path, description)
python
{ "resource": "" }
q16151
DataModel.module_set_id
train
def module_set_id(self) -> str: """Compute unique id of YANG modules comprising the data model. Returns: String consisting of hexadecimal digits. """ fnames = sorted(["@".join(m) for m in self.schema_data.modules]) return hashlib.sha1("".join(fnames).encode("ascii")).hexdigest()
python
{ "resource": "" }
q16152
DataModel.from_raw
train
def from_raw(self, robj: RawObject) -> RootNode: """Create an instance node from a raw data tree. Args: robj: Dictionary representing a raw data tree. Returns: Root instance node. """ cooked = self.schema.from_raw(robj) return RootNode(cooked, self.schema, cooked.timestamp)
python
{ "resource": "" }
q16153
DataModel.get_schema_node
train
def get_schema_node(self, path: SchemaPath) -> Optional[SchemaNode]: """Return the schema node addressed by a schema path. Args: path: Schema path. Returns: Schema node if found in the schema, or ``None``. Raises: InvalidSchemaPath: If the schema path is invalid. """ return self.schema.get_schema_descendant( self.schema_data.path2route(path))
python
{ "resource": "" }
q16154
DataModel.get_data_node
train
def get_data_node(self, path: DataPath) -> Optional[DataNode]: """Return the data node addressed by a data path. Args: path: Data path. Returns: Data node if found in the schema, or ``None``. Raises: InvalidSchemaPath: If the schema path is invalid. """ addr = self.schema_data.path2route(path) node = self.schema for p in addr: node = node.get_data_child(*p) if node is None: return None return node
python
{ "resource": "" }
q16155
DataModel.ascii_tree
train
def ascii_tree(self, no_types: bool = False, val_count: bool = False) -> str: """Generate ASCII art representation of the schema tree. Args: no_types: Suppress output of data type info. val_count: Show accumulated validation counts. Returns: String with the ASCII tree. """ return self.schema._ascii_tree("", no_types, val_count)
python
{ "resource": "" }
q16156
XPathParser._qname
train
def _qname(self) -> Optional[QualName]: """Parse XML QName.""" if self.test_string("*"): self.skip_ws() return False ident = self.yang_identifier() ws = self.skip_ws() try: next = self.peek() except EndOfInput: return ident, None if next == "(": return self._node_type(ident) if not ws and self.test_string(":"): res = ( self.yang_identifier(), self.sctx.schema_data.prefix2ns(ident, self.sctx.text_mid)) else: res = (ident, None) self.skip_ws() return res
python
{ "resource": "" }
q16157
Expr.evaluate
train
def evaluate(self, node: InstanceNode) -> XPathValue: """Evaluate the receiver and return the result. Args: node: Context node for XPath evaluation. Raises: XPathTypeError: If a subexpression of the receiver is of a wrong type. """ return self._eval(XPathContext(node, node, 1, 1))
python
{ "resource": "" }
q16158
DataType.from_raw
train
def from_raw(self, raw: RawScalar) -> Optional[ScalarValue]: """Return a cooked value of the receiver type. Args: raw: Raw value obtained from JSON parser. """ if isinstance(raw, str): return raw
python
{ "resource": "" }
q16159
DataType.from_yang
train
def from_yang(self, text: str) -> ScalarValue: """Parse value specified in a YANG module. Args: text: String representation of the value. Raises: InvalidArgument: If the receiver type cannot parse the text. """ res = self.parse_value(text) if res is None: raise InvalidArgument(text) return res
python
{ "resource": "" }
q16160
DataType._handle_properties
train
def _handle_properties(self, stmt: Statement, sctx: SchemaContext) -> None: """Handle type substatements.""" self._handle_restrictions(stmt, sctx)
python
{ "resource": "" }
q16161
DataType._type_digest
train
def _type_digest(self, config: bool) -> Dict[str, Any]: """Return receiver's type digest. Args: config: Specifies whether the type is on a configuration node. """ res = {"base": self.yang_type()} if self.name is not None: res["derived"] = self.name return res
python
{ "resource": "" }
q16162
BitsType.sorted_bits
train
def sorted_bits(self) -> List[Tuple[str, int]]: """Return list of bit items sorted by position.""" return sorted(self.bit.items(), key=lambda x: x[1])
python
{ "resource": "" }
q16163
BitsType.as_int
train
def as_int(self, val: Tuple[str]) -> int: """Transform a "bits" value to an integer.""" res = 0 try: for b in val: res += 1 << self.bit[b] except KeyError: return None return res
python
{ "resource": "" }
q16164
BooleanType.parse_value
train
def parse_value(self, text: str) -> Optional[bool]: """Parse boolean value. Args: text: String representation of the value. """ if text == "true": return True if text == "false": return False
python
{ "resource": "" }
q16165
EnumerationType.sorted_enums
train
def sorted_enums(self) -> List[Tuple[str, int]]: """Return list of enum items sorted by value.""" return sorted(self.enum.items(), key=lambda x: x[1])
python
{ "resource": "" }
q16166
Cp2kCalculation.prepare_for_submission
train
def prepare_for_submission(self, folder): """Create the input files from the input nodes passed to this instance of the `CalcJob`. :param folder: an `aiida.common.folders.Folder` to temporarily write files on disk :return: `aiida.common.datastructures.CalcInfo` instance """ # create input structure if 'structure' in self.inputs: self.inputs.structure.export(folder.get_abs_path(self._DEFAULT_COORDS_FILE_NAME), fileformat="xyz") # create cp2k input file inp = Cp2kInput(self.inputs.parameters.get_dict()) inp.add_keyword("GLOBAL/PROJECT", self._DEFAULT_PROJECT_NAME) if 'structure' in self.inputs: for i, letter in enumerate('ABC'): inp.add_keyword('FORCE_EVAL/SUBSYS/CELL/' + letter, '{:<15} {:<15} {:<15}'.format(*self.inputs.structure.cell[i])) topo = "FORCE_EVAL/SUBSYS/TOPOLOGY" inp.add_keyword(topo + "/COORD_FILE_NAME", self._DEFAULT_COORDS_FILE_NAME) inp.add_keyword(topo + "/COORD_FILE_FORMAT", "XYZ") with io.open(folder.get_abs_path(self._DEFAULT_INPUT_FILE), mode="w", encoding="utf-8") as fobj: fobj.write(inp.render()) if 'settings' in self.inputs: settings = self.inputs.settings.get_dict() else: settings = {} # create code info codeinfo = CodeInfo() codeinfo.cmdline_params = settings.pop('cmdline', []) + ["-i", self._DEFAULT_INPUT_FILE] codeinfo.stdout_name = self._DEFAULT_OUTPUT_FILE codeinfo.join_files = True codeinfo.code_uuid = self.inputs.code.uuid # create calc info calcinfo = CalcInfo() calcinfo.stdin_name = self._DEFAULT_INPUT_FILE calcinfo.uuid = self.uuid calcinfo.cmdline_params = codeinfo.cmdline_params calcinfo.stdin_name = self._DEFAULT_INPUT_FILE calcinfo.stdout_name = self._DEFAULT_OUTPUT_FILE calcinfo.codes_info = [codeinfo] # file lists calcinfo.remote_symlink_list = [] if 'file' in self.inputs: calcinfo.local_copy_list = [] for fobj in self.inputs.file.values(): calcinfo.local_copy_list.append((fobj.uuid, fobj.filename, fobj.filename)) calcinfo.remote_copy_list = [] calcinfo.retrieve_list = [self._DEFAULT_OUTPUT_FILE, self._DEFAULT_RESTART_FILE_NAME] calcinfo.retrieve_list += settings.pop('additional_retrieve_list', []) # symlinks if 'parent_calc_folder' in self.inputs: comp_uuid = self.inputs.parent_calc_folder.computer.uuid remote_path = self.inputs.parent_calc_folder.get_remote_path() symlink = (comp_uuid, remote_path, self._DEFAULT_PARENT_CALC_FLDR_NAME) calcinfo.remote_symlink_list.append(symlink) # check for left over settings if settings: raise InputValidationError("The following keys have been found " + "in the settings input node {}, ".format(self.pk) + "but were not understood: " + ",".join(settings.keys())) return calcinfo
python
{ "resource": "" }
q16167
Cp2kInput._render_section
train
def _render_section(self, output, params, indent=0): """ It takes a dictionary and recurses through. For key-value pair it checks whether the value is a dictionary and prepends the key with & It passes the valued to the same function, increasing the indentation If the value is a list, I assume that this is something the user wants to store repetitively eg: dict['KEY'] = ['val1', 'val2'] ===> KEY val1 KEY val2 or dict['KIND'] = [{'_': 'Ba', 'ELEMENT':'Ba'}, {'_': 'Ti', 'ELEMENT':'Ti'}, {'_': 'O', 'ELEMENT':'O'}] ====> &KIND Ba ELEMENT Ba &END KIND &KIND Ti ELEMENT Ti &END KIND &KIND O ELEMENT O &END KIND """ for key, val in sorted(params.items()): if key.upper() != key: raise InputValidationError("keyword '%s' not upper case" % key) if key.startswith('@') or key.startswith('$'): raise InputValidationError("CP2K preprocessor not supported") if isinstance(val, dict): output.append('%s&%s %s' % (' ' * indent, key, val.pop('_', ''))) self._render_section(output, val, indent + 3) output.append('%s&END %s' % (' ' * indent, key)) elif isinstance(val, list): for listitem in val: self._render_section(output, {key: listitem}, indent) elif isinstance(val, bool): val_str = '.true.' if val else '.false.' output.append('%s%s %s' % (' ' * indent, key, val_str)) else: output.append('%s%s %s' % (' ' * indent, key, val))
python
{ "resource": "" }
q16168
multi_raw
train
def multi_raw(query, params, models, model_to_fields): """Scoop multiple model instances out of the DB at once, given a query that returns all fields of each. Return an iterable of sequences of model instances parallel to the ``models`` sequence of classes. For example:: [(<User such-and-such>, <Watch such-and-such>), ...] """ cursor = connections[router.db_for_read(models[0])].cursor() cursor.execute(query, params) rows = cursor.fetchall() for row in rows: row_iter = iter(row) yield [model_class(**dict((a, next(row_iter)) for a in model_to_fields[model_class])) for model_class in models]
python
{ "resource": "" }
q16169
Watch.unsubscribe_url
train
def unsubscribe_url(self): """Return the absolute URL to visit to delete me.""" server_relative = ('%s?s=%s' % (reverse('tidings.unsubscribe', args=[self.pk]), self.secret)) return 'https://%s%s' % (Site.objects.get_current().domain, server_relative)
python
{ "resource": "" }
q16170
claim_watches
train
def claim_watches(user): """Attach any anonymous watches having a user's email to that user. Call this from your user registration process if you like. """ Watch.objects.filter(email=user.email).update(email=None, user=user)
python
{ "resource": "" }
q16171
collate
train
def collate(*iterables, **kwargs): """Return an iterable ordered collation of the already-sorted items from each of ``iterables``, compared by kwarg ``key``. If ``reverse=True`` is passed, iterables must return their results in descending order rather than ascending. """ key = kwargs.pop('key', lambda a: a) reverse = kwargs.pop('reverse', False) min_or_max = max if reverse else min rows = [iter(iterable) for iterable in iterables if iterable] next_values = {} by_key = [] def gather_next_value(row, index): try: next_value = next(row) except StopIteration: pass else: next_values[index] = next_value by_key.append((key(next_value), index)) for index, row in enumerate(rows): gather_next_value(row, index) while by_key: key_value, index = min_or_max(by_key) by_key.remove((key_value, index)) next_value = next_values.pop(index) yield next_value gather_next_value(rows[index], index)
python
{ "resource": "" }
q16172
hash_to_unsigned
train
def hash_to_unsigned(data): """If ``data`` is a string or unicode string, return an unsigned 4-byte int hash of it. If ``data`` is already an int that fits those parameters, return it verbatim. If ``data`` is an int outside that range, behavior is undefined at the moment. We rely on the ``PositiveIntegerField`` on :class:`~tidings.models.WatchFilter` to scream if the int is too long for the field. We use CRC32 to do the hashing. Though CRC32 is not a good general-purpose hash function, it has no collisions on a dictionary of 38,470 English words, which should be fine for the small sets that :class:`WatchFilters <tidings.models.WatchFilter>` are designed to enumerate. As a bonus, it is fast and available as a built-in function in some DBs. If your set of filter values is very large or has different CRC32 distribution properties than English words, you might want to do your own hashing in your :class:`~tidings.events.Event` subclass and pass ints when specifying filter values. """ if isinstance(data, string_types): # Return a CRC32 value identical across Python versions and platforms # by stripping the sign bit as on # http://docs.python.org/library/zlib.html. return crc32(data.encode('utf-8')) & 0xffffffff else: return int(data)
python
{ "resource": "" }
q16173
emails_with_users_and_watches
train
def emails_with_users_and_watches( subject, template_path, vars, users_and_watches, from_email=settings.TIDINGS_FROM_ADDRESS, **extra_kwargs): """Return iterable of EmailMessages with user and watch values substituted. A convenience function for generating emails by repeatedly rendering a Django template with the given ``vars`` plus a ``user`` and ``watches`` key for each pair in ``users_and_watches`` :arg template_path: path to template file :arg vars: a map which becomes the Context passed in to the template :arg extra_kwargs: additional kwargs to pass into EmailMessage constructor """ template = loader.get_template(template_path) context = Context(vars) for u, w in users_and_watches: context['user'] = u # Arbitrary single watch for compatibility with 0.1 # TODO: remove. context['watch'] = w[0] context['watches'] = w yield EmailMessage(subject, template.render(context), from_email, [u.email], **extra_kwargs)
python
{ "resource": "" }
q16174
import_from_setting
train
def import_from_setting(setting_name, fallback): """Return the resolution of an import path stored in a Django setting. :arg setting_name: The name of the setting holding the import path :arg fallback: An alternate object to use if the setting is empty or doesn't exist Raise ImproperlyConfigured if a path is given that can't be resolved. """ path = getattr(settings, setting_name, None) if path: try: return import_string(path) except ImportError: raise ImproperlyConfigured('%s: No such path.' % path) else: return fallback
python
{ "resource": "" }
q16175
Cp2kParser._parse_stdout
train
def _parse_stdout(self, out_folder): """CP2K output parser""" fname = self.node.load_process_class()._DEFAULT_OUTPUT_FILE # pylint: disable=protected-access if fname not in out_folder._repository.list_object_names(): # pylint: disable=protected-access raise OutputParsingError("Cp2k output file not retrieved") result_dict = {'exceeded_walltime': False} abs_fn = os.path.join(out_folder._repository._get_base_folder().abspath, fname) # pylint: disable=protected-access with io.open(abs_fn, mode="r", encoding="utf-8") as fobj: lines = fobj.readlines() for i_line, line in enumerate(lines): if line.startswith(' ENERGY| '): result_dict['energy'] = float(line.split()[8]) result_dict['energy_units'] = "a.u." if 'The number of warnings for this run is' in line: result_dict['nwarnings'] = int(line.split()[-1]) if 'exceeded requested execution time' in line: result_dict['exceeded_walltime'] = True if "KPOINTS| Band Structure Calculation" in line: from aiida.orm import BandsData bnds = BandsData() kpoints, labels, bands = self._parse_bands(lines, i_line) bnds.set_kpoints(kpoints) bnds.labels = labels bnds.set_bands(bands, units='eV') self.out('output_bands', bnds) if 'nwarnings' not in result_dict: raise OutputParsingError("CP2K did not finish properly.") self.out('output_parameters', Dict(dict=result_dict))
python
{ "resource": "" }
q16176
Cp2kParser._parse_bands
train
def _parse_bands(lines, n_start): """Parse band structure from cp2k output""" kpoints = [] labels = [] bands_s1 = [] bands_s2 = [] known_kpoints = {} pattern = re.compile(".*?Nr.*?Spin.*?K-Point.*?", re.DOTALL) selected_lines = lines[n_start:] for current_line, line in enumerate(selected_lines): splitted = line.split() if "KPOINTS| Special K-Point" in line: kpoint = tuple(map(float, splitted[-3:])) if " ".join(splitted[-5:-3]) != "not specified": label = splitted[-4] known_kpoints[kpoint] = label elif pattern.match(line): spin = int(splitted[3]) kpoint = tuple(map(float, splitted[-3:])) kpoint_n_lines = int(math.ceil(int(selected_lines[current_line + 1]) / 4.)) band = list( map(float, ' '.join(selected_lines[current_line + 2:current_line + 2 + kpoint_n_lines]).split())) if spin == 1: if kpoint in known_kpoints: labels.append((len(kpoints), known_kpoints[kpoint])) kpoints.append(kpoint) bands_s1.append(band) elif spin == 2: bands_s2.append(band) if bands_s2: bands = [bands_s1, bands_s2] else: bands = bands_s1 return np.array(kpoints), labels, np.array(bands)
python
{ "resource": "" }
q16177
Cp2kParser._parse_trajectory
train
def _parse_trajectory(self, out_folder): """CP2K trajectory parser""" fname = self.node.load_process_class()._DEFAULT_RESTART_FILE_NAME # pylint: disable=protected-access if fname not in out_folder._repository.list_object_names(): # pylint: disable=protected-access raise Exception # not every run type produces a trajectory # read restart file abs_fn = os.path.join(out_folder._repository._get_base_folder().abspath, fname) # pylint: disable=protected-access with io.open(abs_fn, mode="r", encoding="utf-8") as fobj: content = fobj.read() # parse coordinate section match = re.search(r'\n\s*&COORD\n(.*?)\n\s*&END COORD\n', content, DOTALL) coord_lines = [line.strip().split() for line in match.group(1).splitlines()] symbols = [line[0] for line in coord_lines] positions_str = [line[1:] for line in coord_lines] positions = np.array(positions_str, np.float64) # parse cell section match = re.search(r'\n\s*&CELL\n(.*?)\n\s*&END CELL\n', content, re.DOTALL) cell_lines = [line.strip().split() for line in match.group(1).splitlines()] cell_str = [line[1:] for line in cell_lines if line[0] in 'ABC'] cell = np.array(cell_str, np.float64) # create StructureData atoms = ase.Atoms(symbols=symbols, positions=positions, cell=cell) return StructureData(ase=atoms)
python
{ "resource": "" }
q16178
Event.fire
train
def fire(self, exclude=None, delay=True): """Notify everyone watching the event. We are explicit about sending notifications; we don't just key off creation signals, because the receiver of a ``post_save`` signal has no idea what just changed, so it doesn't know which notifications to send. Also, we could easily send mail accidentally: for instance, during tests. If we want implicit event firing, we can always register a signal handler that calls :meth:`fire()`. :arg exclude: If a saved user is passed in, that user will not be notified, though anonymous notifications having the same email address may still be sent. A sequence of users may also be passed in. :arg delay: If True (default), the event is handled asynchronously with Celery. This requires the pickle task serializer, which is no longer the default starting in Celery 4.0. If False, the event is processed immediately. """ if delay: # Tasks don't receive the `self` arg implicitly. self._fire_task.apply_async( args=(self,), kwargs={'exclude': exclude}, serializer='pickle') else: self._fire_task(self, exclude=exclude)
python
{ "resource": "" }
q16179
Event._fire_task
train
def _fire_task(self, exclude=None): """Build and send the emails as a celery task.""" connection = mail.get_connection(fail_silently=True) # Warning: fail_silently swallows errors thrown by the generators, too. connection.open() for m in self._mails(self._users_watching(exclude=exclude)): connection.send_messages([m])
python
{ "resource": "" }
q16180
Event._validate_filters
train
def _validate_filters(cls, filters): """Raise a TypeError if ``filters`` contains any keys inappropriate to this event class.""" for k in iterkeys(filters): if k not in cls.filters: # Mirror "unexpected keyword argument" message: raise TypeError("%s got an unsupported filter type '%s'" % (cls.__name__, k))
python
{ "resource": "" }
q16181
Event.notify
train
def notify(cls, user_or_email_, object_id=None, **filters): """Start notifying the given user or email address when this event occurs and meets the criteria given in ``filters``. Return the created (or the existing matching) Watch so you can call :meth:`~tidings.models.Watch.activate()` on it if you're so inclined. Implementations in subclasses may take different arguments; see the docstring of :meth:`is_notifying()`. Send an activation email if an anonymous watch is created and :data:`~django.conf.settings.TIDINGS_CONFIRM_ANONYMOUS_WATCHES` is ``True``. If the activation request fails, raise a ActivationRequestFailed exception. Calling :meth:`notify()` twice for an anonymous user will send the email each time. """ # A test-for-existence-then-create race condition exists here, but it # doesn't matter: de-duplication on fire() and deletion of all matches # on stop_notifying() nullify its effects. try: # Pick 1 if >1 are returned: watch = cls._watches_belonging_to_user( user_or_email_, object_id=object_id, **filters)[0:1].get() except Watch.DoesNotExist: create_kwargs = {} if cls.content_type: create_kwargs['content_type'] = \ ContentType.objects.get_for_model(cls.content_type) create_kwargs['email' if isinstance(user_or_email_, string_types) else 'user'] = user_or_email_ # Letters that can't be mistaken for other letters or numbers in # most fonts, in case people try to type these: distinguishable_letters = \ 'abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRTUVWXYZ' secret = ''.join(random.choice(distinguishable_letters) for x in range(10)) # Registered users don't need to confirm, but anonymous users do. is_active = ('user' in create_kwargs or not settings.TIDINGS_CONFIRM_ANONYMOUS_WATCHES) if object_id: create_kwargs['object_id'] = object_id watch = Watch.objects.create( secret=secret, is_active=is_active, event_type=cls.event_type, **create_kwargs) for k, v in iteritems(filters): WatchFilter.objects.create(watch=watch, name=k, value=hash_to_unsigned(v)) # Send email for inactive watches. if not watch.is_active: email = watch.user.email if watch.user else watch.email message = cls._activation_email(watch, email) try: message.send() except SMTPException as e: watch.delete() raise ActivationRequestFailed(e.recipients) return watch
python
{ "resource": "" }
q16182
InstanceEvent.notify
train
def notify(cls, user_or_email, instance): """Create, save, and return a watch which fires when something happens to ``instance``.""" return super(InstanceEvent, cls).notify(user_or_email, object_id=instance.pk)
python
{ "resource": "" }
q16183
InstanceEvent.stop_notifying
train
def stop_notifying(cls, user_or_email, instance): """Delete the watch created by notify.""" super(InstanceEvent, cls).stop_notifying(user_or_email, object_id=instance.pk)
python
{ "resource": "" }
q16184
InstanceEvent.is_notifying
train
def is_notifying(cls, user_or_email, instance): """Check if the watch created by notify exists.""" return super(InstanceEvent, cls).is_notifying(user_or_email, object_id=instance.pk)
python
{ "resource": "" }
q16185
InstanceEvent._users_watching
train
def _users_watching(self, **kwargs): """Return users watching this instance.""" return self._users_watching_by_filter(object_id=self.instance.pk, **kwargs)
python
{ "resource": "" }
q16186
StrictSecret.decrypt
train
def decrypt(self): """Decrypt decrypts the secret and returns the plaintext. Calling decrypt() may incur side effects such as a call to a remote service for decryption. """ if not self._crypter: return b'' try: plaintext = self._crypter.decrypt(self._ciphertext, **self._decrypt_params) return plaintext except Exception as e: exc_info = sys.exc_info() six.reraise( ValueError('Invalid ciphertext "%s", error: %s' % (self._ciphertext, e)), None, exc_info[2] )
python
{ "resource": "" }
q16187
AES_GCMEncrypter.encrypt
train
def encrypt(self, msg, iv='', auth_data=None): """ Encrypts and authenticates the data provided as well as authenticating the associated_data. :param msg: The message to be encrypted :param iv: MUST be present, at least 96-bit long :param auth_data: Associated data :return: The cipher text bytes with the 16 byte tag appended. """ if not iv: raise ValueError('Missing Nonce') return self.key.encrypt(iv, msg, auth_data)
python
{ "resource": "" }
q16188
JWEKey.enc_setup
train
def enc_setup(self, enc_alg, msg, auth_data=b'', key=None, iv=""): """ Encrypt JWE content. :param enc_alg: The JWE "enc" value specifying the encryption algorithm :param msg: The plain text message :param auth_data: Additional authenticated data :param key: Key (CEK) :return: Tuple (ciphertext, tag), both as bytes """ iv = self._generate_iv(enc_alg, iv) if enc_alg in ["A192GCM", "A128GCM", "A256GCM"]: aes = AES_GCMEncrypter(key=key) ctx, tag = split_ctx_and_tag(aes.encrypt(msg, iv, auth_data)) elif enc_alg in ["A128CBC-HS256", "A192CBC-HS384", "A256CBC-HS512"]: aes = AES_CBCEncrypter(key=key) ctx, tag = aes.encrypt(msg, iv, auth_data) else: raise NotSupportedAlgorithm(enc_alg) return ctx, tag, aes.key
python
{ "resource": "" }
q16189
JWEKey._decrypt
train
def _decrypt(enc, key, ctxt, iv, tag, auth_data=b''): """ Decrypt JWE content. :param enc: The JWE "enc" value specifying the encryption algorithm :param key: Key (CEK) :param iv : Initialization vector :param auth_data: Additional authenticated data (AAD) :param ctxt : Ciphertext :param tag: Authentication tag :return: plain text message or None if decryption failed """ if enc in ["A128GCM", "A192GCM", "A256GCM"]: aes = AES_GCMEncrypter(key=key) elif enc in ["A128CBC-HS256", "A192CBC-HS384", "A256CBC-HS512"]: aes = AES_CBCEncrypter(key=key) else: raise Exception("Unsupported encryption algorithm %s" % enc) try: return aes.decrypt(ctxt, iv=iv, auth_data=auth_data, tag=tag) except DecryptionFailed: raise
python
{ "resource": "" }
q16190
PSSSigner.sign
train
def sign(self, msg, key): """ Create a signature over a message :param msg: The message :param key: The key :return: A signature """ hasher = hashes.Hash(self.hash_algorithm(), backend=default_backend()) hasher.update(msg) digest = hasher.finalize() sig = key.sign( digest, padding.PSS( mgf=padding.MGF1(self.hash_algorithm()), salt_length=padding.PSS.MAX_LENGTH), utils.Prehashed(self.hash_algorithm())) return sig
python
{ "resource": "" }
q16191
JWE_SYM.encrypt
train
def encrypt(self, key, iv="", cek="", **kwargs): """ Produces a JWE as defined in RFC7516 using symmetric keys :param key: Shared symmetric key :param iv: Initialization vector :param cek: Content master key :param kwargs: Extra keyword arguments, just ignore for now. :return: """ _msg = as_bytes(self.msg) _args = self._dict try: _args["kid"] = kwargs["kid"] except KeyError: pass jwe = JWEnc(**_args) # If no iv and cek are given generate them iv = self._generate_iv(self["enc"], iv) cek = self._generate_key(self["enc"], cek) if isinstance(key, SYMKey): try: kek = key.key.encode('utf8') except AttributeError: kek = key.key elif isinstance(key, bytes): kek = key else: kek = intarr2str(key) # The iv for this function must be 64 bit # Which is certainly different from the one above jek = aes_key_wrap(kek, cek, default_backend()) _enc = self["enc"] _auth_data = jwe.b64_encode_header() ctxt, tag, cek = self.enc_setup(_enc, _msg, auth_data=_auth_data, key=cek, iv=iv) return jwe.pack(parts=[jek, iv, ctxt, tag])
python
{ "resource": "" }
q16192
ec_construct_public
train
def ec_construct_public(num): """ Given a set of values on public attributes build a elliptic curve public key instance. :param num: A dictionary with public attributes and their values :return: A cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePublicKey instance. """ ecpn = ec.EllipticCurvePublicNumbers(num['x'], num['y'], NIST2SEC[as_unicode(num['crv'])]()) return ecpn.public_key(default_backend())
python
{ "resource": "" }
q16193
ec_construct_private
train
def ec_construct_private(num): """ Given a set of values on public and private attributes build a elliptic curve private key instance. :param num: A dictionary with public and private attributes and their values :return: A cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey instance. """ pub_ecpn = ec.EllipticCurvePublicNumbers(num['x'], num['y'], NIST2SEC[as_unicode(num['crv'])]()) priv_ecpn = ec.EllipticCurvePrivateNumbers(num['d'], pub_ecpn) return priv_ecpn.private_key(default_backend())
python
{ "resource": "" }
q16194
import_private_key_from_file
train
def import_private_key_from_file(filename, passphrase=None): """ Read a private Elliptic Curve key from a PEM file. :param filename: The name of the file :param passphrase: A pass phrase to use to unpack the PEM file. :return: A cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey instance """ with open(filename, "rb") as key_file: private_key = serialization.load_pem_private_key( key_file.read(), password=passphrase, backend=default_backend()) return private_key
python
{ "resource": "" }
q16195
ECKey.serialize
train
def serialize(self, private=False): """ Go from a cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey or EllipticCurvePublicKey instance to a JWK representation. :param private: Whether we should include the private attributes or not. :return: A JWK as a dictionary """ if self.priv_key: self._serialize(self.priv_key) else: self._serialize(self.pub_key) res = self.common() res.update({ "crv": self.crv, "x": self.x, "y": self.y }) if private and self.d: res["d"] = self.d return res
python
{ "resource": "" }
q16196
ECKey.load_key
train
def load_key(self, key): """ Load an Elliptic curve key :param key: An elliptic curve key instance, private or public. :return: Reference to this instance """ self._serialize(key) if isinstance(key, ec.EllipticCurvePrivateKey): self.priv_key = key self.pub_key = key.public_key() else: self.pub_key = key return self
python
{ "resource": "" }
q16197
ec_init
train
def ec_init(spec): """ Initiate a key bundle with an elliptic curve key. :param spec: Key specifics of the form:: {"type": "EC", "crv": "P-256", "use": ["sig"]} :return: A KeyBundle instance """ kb = KeyBundle(keytype="EC") if 'use' in spec: for use in spec["use"]: eck = new_ec_key(crv=spec['crv'], use=use) kb.append(eck) else: eck = new_ec_key(crv=spec['crv']) kb.append(eck) return kb
python
{ "resource": "" }
q16198
dump_jwks
train
def dump_jwks(kbl, target, private=False): """ Write a JWK to a file. Will ignore symmetric keys !! :param kbl: List of KeyBundles :param target: Name of the file to which everything should be written :param private: Should also the private parts be exported """ keys = [] for kb in kbl: keys.extend([k.serialize(private) for k in kb.keys() if k.kty != 'oct' and not k.inactive_since]) res = {"keys": keys} try: f = open(target, 'w') except IOError: (head, tail) = os.path.split(target) os.makedirs(head) f = open(target, 'w') _txt = json.dumps(res) f.write(_txt) f.close()
python
{ "resource": "" }
q16199
order_key_defs
train
def order_key_defs(key_def): """ Sort a set of key definitions. A key definition that defines more then one usage type are splitted into as many definitions as the number of usage types specified. One key definition per usage type. :param key_def: A set of key definitions :return: The set of definitions as a sorted list """ _int = [] # First make sure all defs only reference one usage for kd in key_def: if len(kd['use']) > 1: for _use in kd['use']: _kd = kd.copy() _kd['use'] = _use _int.append(_kd) else: _int.append(kd) _int.sort(key=cmp_to_key(sort_func)) return _int
python
{ "resource": "" }