_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q9000
|
_serialize_argument
|
train
|
def _serialize_argument(rargname, value, varprops):
"""Serialize an MRS argument into the SimpleMRS format."""
_argument = '{rargname}: {value}{props}'
if rargname == CONSTARG_ROLE:
value = '"{}"'.format(value)
props = ''
if value in varprops:
props = ' [ {} ]'.format(
' '.join(
[var_sort(value)] +
list(map('{0[0]}: {0[1]}'.format,
[(k.upper(), v) for k, v in varprops[value]]))
)
)
del varprops[value] # only print props once
return _argument.format(
rargname=rargname,
value=str(value),
props=props
)
|
python
|
{
"resource": ""
}
|
q9001
|
_serialize_ep
|
train
|
def _serialize_ep(ep, varprops, version=_default_version):
"""Serialize an Elementary Predication into the SimpleMRS encoding."""
# ('nodeid', 'pred', 'label', 'args', 'lnk', 'surface', 'base')
args = ep[3]
arglist = ' '.join([_serialize_argument(rarg, args[rarg], varprops)
for rarg in sorted(args, key=rargname_sortkey)])
if version < 1.1 or len(ep) < 6 or ep[5] is None:
surface = ''
else:
surface = ' "%s"' % ep[5]
lnk = None if len(ep) < 5 else ep[4]
pred = ep[1]
predstr = pred.string
return '[ {pred}{lnk}{surface} LBL: {label}{s}{args} ]'.format(
pred=predstr,
lnk=_serialize_lnk(lnk),
surface=surface,
label=str(ep[2]),
s=' ' if arglist else '',
args=arglist
)
|
python
|
{
"resource": ""
}
|
q9002
|
_serialize_lnk
|
train
|
def _serialize_lnk(lnk):
"""Serialize a predication lnk to surface form into the SimpleMRS
encoding."""
s = ""
if lnk is not None:
s = '<'
if lnk.type == Lnk.CHARSPAN:
cfrom, cto = lnk.data
s += ''.join([str(cfrom), ':', str(cto)])
elif lnk.type == Lnk.CHARTSPAN:
cfrom, cto = lnk.data
s += ''.join([str(cfrom), '#', str(cto)])
elif lnk.type == Lnk.TOKENS:
s += ' '.join([str(t) for t in lnk.data])
elif lnk.type == Lnk.EDGE:
s += ''.join(['@', str(lnk.data)])
s += '>'
return s
|
python
|
{
"resource": ""
}
|
q9003
|
_UdfNodeBase.to_dict
|
train
|
def to_dict(self, fields=_all_fields, labels=None):
"""
Encode the node as a dictionary suitable for JSON serialization.
Args:
fields: if given, this is a whitelist of fields to include
on nodes (`daughters` and `form` are always shown)
labels: optional label annotations to embed in the
derivation dict; the value is a list of lists matching
the structure of the derivation (e.g.,
`["S" ["NP" ["NNS" ["Dogs"]]] ["VP" ["VBZ" ["bark"]]]]`)
Returns:
dict: the dictionary representation of the structure
"""
fields = set(fields)
diff = fields.difference(_all_fields)
if isinstance(labels, Sequence):
labels = _map_labels(self, labels)
elif labels is None:
labels = {}
if diff:
raise ValueError(
'Invalid field(s): {}'.format(', '.join(diff))
)
return _to_dict(self, fields, labels)
|
python
|
{
"resource": ""
}
|
q9004
|
UdfNode.is_head
|
train
|
def is_head(self):
"""
Return `True` if the node is a head.
A node is a head if it is marked as a head in the UDX format or
it has no siblings. `False` is returned if the node is known
to not be a head (has a sibling that is a head). Otherwise it
is indeterminate whether the node is a head, and `None` is
returned.
"""
if (self._head or self.is_root() or
len(getattr(self._parent, 'daughters', [None])) == 1):
return True
elif any(dtr._head for dtr in self._parent.daughters):
return False
return None
|
python
|
{
"resource": ""
}
|
q9005
|
Derivation.from_string
|
train
|
def from_string(cls, s):
"""
Instantiate a `Derivation` from a UDF or UDX string representation.
The UDF/UDX representations are as output by a processor like the
`LKB <http://moin.delph-in.net/LkbTop>`_ or
`ACE <http://sweaglesw.org/linguistics/ace/>`_, or from the
:meth:`UdfNode.to_udf` or :meth:`UdfNode.to_udx` methods.
Args:
s (str): UDF or UDX serialization
"""
if not (s.startswith('(') and s.endswith(')')):
raise ValueError(
'Derivations must begin and end with parentheses: ( )'
)
s_ = s[1:] # get rid of initial open-parenthesis
stack = []
deriv = None
try:
matches = cls.udf_re.finditer(s_)
for match in matches:
if match.group('done'):
node = stack.pop()
if len(stack) == 0:
deriv = node
break
else:
stack[-1].daughters.append(node)
elif match.group('form'):
if len(stack) == 0:
raise ValueError('Possible leaf node with no parent.')
gd = match.groupdict()
# ignore LKB-style start/end data if it exists on gd
term = UdfTerminal(
_unquote(gd['form']),
tokens=_udf_tokens(gd.get('tokens')),
parent=stack[-1] if stack else None
)
stack[-1].daughters.append(term)
elif match.group('id'):
gd = match.groupdict()
head = None
entity, _, type = gd['entity'].partition('@')
if entity[0] == '^':
entity = entity[1:]
head = True
if type == '':
type = None
udf = UdfNode(gd['id'], entity, gd['score'],
gd['start'], gd['end'],
head=head, type=type,
parent=stack[-1] if stack else None)
stack.append(udf)
elif match.group('root'):
udf = UdfNode(None, match.group('root'))
stack.append(udf)
except (ValueError, AttributeError):
raise ValueError('Invalid derivation: %s' % s)
if stack or deriv is None:
raise ValueError('Invalid derivation; possibly unbalanced '
'parentheses: %s' % s)
return cls(*deriv, head=deriv._head, type=deriv.type)
|
python
|
{
"resource": ""
}
|
q9006
|
loads
|
train
|
def loads(s, model):
"""
Deserialize PENMAN graphs from a string
Args:
s (str): serialized PENMAN graphs
model: Xmrs subclass instantiated from decoded triples
Returns:
a list of objects (of class *model*)
"""
graphs = penman.loads(s, cls=XMRSCodec)
xs = [model.from_triples(g.triples()) for g in graphs]
return xs
|
python
|
{
"resource": ""
}
|
q9007
|
calc_path_and_create_folders
|
train
|
def calc_path_and_create_folders(folder, import_path):
""" calculate the path and create the needed folders """
file_path = abspath(path_join(folder, import_path[:import_path.rfind(".")].replace(".", folder_seperator) + ".py"))
mkdir_p(dirname(file_path))
return file_path
|
python
|
{
"resource": ""
}
|
q9008
|
_peek
|
train
|
def _peek(tokens, n=0):
"""peek and drop comments"""
return tokens.peek(n=n, skip=_is_comment, drop=True)
|
python
|
{
"resource": ""
}
|
q9009
|
_shift
|
train
|
def _shift(tokens):
"""pop the next token, then peek the gid of the following"""
after = tokens.peek(n=1, skip=_is_comment, drop=True)
tok = tokens._buffer.popleft()
return tok[0], tok[1], tok[2], after[0]
|
python
|
{
"resource": ""
}
|
q9010
|
_accumulate
|
train
|
def _accumulate(lexitems):
"""
Yield lists of tokens based on very simple parsing that checks the
level of nesting within a structure. This is probably much faster
than the LookaheadIterator method, but it is less safe; an unclosed
list or AVM may cause it to build a list including the rest of the
file, or it may return a list that doesn't span a full definition.
As PyDelphin's goals for TDL parsing do not include speed, this
method is not currently used, although it is retained in the source
code as an example if future priorities change.
"""
data = []
stack = []
break_on = 10
in_def = False
for item in lexitems:
gid = item[0]
# only yield comments outside of definitions
if gid in (2, 3):
if len(data) == 0:
yield [item]
else:
continue
elif gid == 20:
assert len(data) == 0
yield [item]
# the following just checks if the previous definition was not
# terminated when the next one is read in
elif gid in (7, 8):
if in_def:
yield data[:-1]
data = data[-1:] + [item]
stack = []
break_on = 10
else:
data.append(item)
in_def = True
else:
data.append(item)
if gid == break_on:
if len(stack) == 0:
yield data
data = []
in_def = False
else:
break_on = stack.pop()
elif gid in (13, 14, 15):
stack.append(break_on)
break_on = gid + 3
if data:
yield data
|
python
|
{
"resource": ""
}
|
q9011
|
_lex
|
train
|
def _lex(stream):
"""
Lex the input stream according to _tdl_lex_re.
Yields
(gid, token, line_number)
"""
lines = enumerate(stream, 1)
line_no = pos = 0
try:
while True:
if pos == 0:
line_no, line = next(lines)
matches = _tdl_lex_re.finditer(line, pos)
pos = 0 # reset; only used for multiline patterns
for m in matches:
gid = m.lastindex
if gid <= 2: # potentially multiline patterns
if gid == 1: # docstring
s, start_line_no, line_no, line, pos = _bounded(
'"""', '"""', line, m.end(), line_no, lines)
elif gid == 2: # comment
s, start_line_no, line_no, line, pos = _bounded(
'#|', '|#', line, m.end(), line_no, lines)
yield (gid, s, line_no)
break
elif gid == 30:
raise TdlParsingError(
('Syntax error:\n {}\n {}^'
.format(line, ' ' * m.start())),
line_number=line_no)
else:
# token = None
# if not (6 < gid < 20):
# token = m.group(gid)
token = m.group(gid)
yield (gid, token, line_no)
except StopIteration:
pass
|
python
|
{
"resource": ""
}
|
q9012
|
format
|
train
|
def format(obj, indent=0):
"""
Serialize TDL objects to strings.
Args:
obj: instance of :class:`Term`, :class:`Conjunction`, or
:class:`TypeDefinition` classes or subclasses
indent (int): number of spaces to indent the formatted object
Returns:
str: serialized form of *obj*
Example:
>>> conj = tdl.Conjunction([
... tdl.TypeIdentifier('lex-item'),
... tdl.AVM([('SYNSEM.LOCAL.CAT.HEAD.MOD',
... tdl.ConsList(end=tdl.EMPTY_LIST_TYPE))])
... ])
>>> t = tdl.TypeDefinition('non-mod-lex-item', conj)
>>> print(format(t))
non-mod-lex-item := lex-item &
[ SYNSEM.LOCAL.CAT.HEAD.MOD < > ].
"""
if isinstance(obj, TypeDefinition):
return _format_typedef(obj, indent)
elif isinstance(obj, Conjunction):
return _format_conjunction(obj, indent)
elif isinstance(obj, Term):
return _format_term(obj, indent)
elif isinstance(obj, _MorphSet):
return _format_morphset(obj, indent)
elif isinstance(obj, _Environment):
return _format_environment(obj, indent)
elif isinstance(obj, FileInclude):
return _format_include(obj, indent)
else:
raise ValueError('cannot format object as TDL: {!r}'.format(obj))
|
python
|
{
"resource": ""
}
|
q9013
|
AVM.normalize
|
train
|
def normalize(self):
"""
Reduce trivial AVM conjunctions to just the AVM.
For example, in `[ ATTR1 [ ATTR2 val ] ]` the value of `ATTR1`
could be a conjunction with the sub-AVM `[ ATTR2 val ]`. This
method removes the conjunction so the sub-AVM nests directly
(equivalent to `[ ATTR1.ATTR2 val ]` in TDL).
"""
for attr in self._avm:
val = self._avm[attr]
if isinstance(val, Conjunction):
val.normalize()
if len(val.terms) == 1 and isinstance(val.terms[0], AVM):
self._avm[attr] = val.terms[0]
elif isinstance(val, AVM):
val.normalize()
|
python
|
{
"resource": ""
}
|
q9014
|
ConsList.values
|
train
|
def values(self):
"""
Return the list of values in the ConsList feature structure.
"""
if self._avm is None:
return []
else:
vals = [val for _, val in _collect_list_items(self)]
# the < a . b > notation puts b on the last REST path,
# which is not returned by _collect_list_items()
if self.terminated and self[self._last_path] is not None:
vals.append(self[self._last_path])
return vals
|
python
|
{
"resource": ""
}
|
q9015
|
ConsList.append
|
train
|
def append(self, value):
"""
Append an item to the end of an open ConsList.
Args:
value (:class:`Conjunction`, :class:`Term`): item to add
Raises:
:class:`TdlError`: when appending to a closed list
"""
if self._avm is not None and not self.terminated:
path = self._last_path
if path:
path += '.'
self[path + LIST_HEAD] = value
self._last_path = path + LIST_TAIL
self[self._last_path] = AVM()
else:
raise TdlError('Cannot append to a closed list.')
|
python
|
{
"resource": ""
}
|
q9016
|
ConsList.terminate
|
train
|
def terminate(self, end):
"""
Set the value of the tail of the list.
Adding values via :meth:`append` places them on the `FIRST`
feature of some level of the feature structure (e.g.,
`REST.FIRST`), while :meth:`terminate` places them on the
final `REST` feature (e.g., `REST.REST`). If *end* is a
:class:`Conjunction` or :class:`Term`, it is typically a
:class:`Coreference`, otherwise *end* is set to
`tdl.EMPTY_LIST_TYPE` or `tdl.LIST_TYPE`. This method does
not necessarily close the list; if *end* is `tdl.LIST_TYPE`,
the list is left open, otherwise it is closed.
Args:
end (str, :class:`Conjunction`, :class:`Term`): value to
use as the end of the list.
"""
if self.terminated:
raise TdlError('Cannot terminate a closed list.')
if end == LIST_TYPE:
self.terminated = False
elif end == EMPTY_LIST_TYPE:
if self._last_path:
self[self._last_path] = None
else:
self._avm = None
self.terminated = True
elif self._last_path:
self[self._last_path] = end
self.terminated = True
else:
raise TdlError('Empty list must be {} or {}'.format(
LIST_TYPE, EMPTY_LIST_TYPE))
|
python
|
{
"resource": ""
}
|
q9017
|
Conjunction.normalize
|
train
|
def normalize(self):
"""
Rearrange the conjunction to a conventional form.
This puts any coreference(s) first, followed by type terms,
then followed by AVM(s) (including lists). AVMs are
normalized via :meth:`AVM.normalize`.
"""
corefs = []
types = []
avms = []
for term in self._terms:
if isinstance(term, TypeTerm):
types.append(term)
elif isinstance(term, AVM):
term.normalize()
avms.append(term)
elif isinstance(term, Coreference):
corefs.append(term)
else:
raise TdlError('unexpected term {}'.format(term))
self._terms = corefs + types + avms
|
python
|
{
"resource": ""
}
|
q9018
|
Conjunction.add
|
train
|
def add(self, term):
"""
Add a term to the conjunction.
Args:
term (:class:`Term`, :class:`Conjunction`): term to add;
if a :class:`Conjunction`, all of its terms are added
to the current conjunction.
Raises:
:class:`TypeError`: when *term* is an invalid type
"""
if isinstance(term, Conjunction):
for term_ in term.terms:
self.add(term_)
elif isinstance(term, Term):
self._terms.append(term)
else:
raise TypeError('Not a Term or Conjunction')
|
python
|
{
"resource": ""
}
|
q9019
|
Conjunction.types
|
train
|
def types(self):
"""Return the list of type terms in the conjunction."""
return [term for term in self._terms
if isinstance(term, (TypeIdentifier, String, Regex))]
|
python
|
{
"resource": ""
}
|
q9020
|
Conjunction.features
|
train
|
def features(self, expand=False):
"""Return the list of feature-value pairs in the conjunction."""
featvals = []
for term in self._terms:
if isinstance(term, AVM):
featvals.extend(term.features(expand=expand))
return featvals
|
python
|
{
"resource": ""
}
|
q9021
|
Conjunction.string
|
train
|
def string(self):
"""
Return the first string term in the conjunction, or `None`.
"""
for term in self._terms:
if isinstance(term, String):
return str(term)
return None
|
python
|
{
"resource": ""
}
|
q9022
|
TypeDefinition.documentation
|
train
|
def documentation(self, level='first'):
"""
Return the documentation of the type.
By default, this is the first docstring on a top-level term.
By setting *level* to `"top"`, the list of all docstrings on
top-level terms is returned, including the type's `docstring`
value, if not `None`, as the last item. The docstring for the
type itself is available via :attr:`TypeDefinition.docstring`.
Args:
level (str): `"first"` or `"top"`
Returns:
a single docstring or a list of docstrings
"""
docs = (t.docstring for t in list(self.conjunction.terms) + [self]
if t.docstring is not None)
if level.lower() == 'first':
doc = next(docs, None)
elif level.lower() == 'top':
doc = list(docs)
return doc
|
python
|
{
"resource": ""
}
|
q9023
|
TdlDefinition.local_constraints
|
train
|
def local_constraints(self):
"""
Return the constraints defined in the local AVM.
"""
cs = []
for feat, val in self._avm.items():
try:
if val.supertypes and not val._avm:
cs.append((feat, val))
else:
for subfeat, subval in val.features():
cs.append(('{}.{}'.format(feat, subfeat), subval))
except AttributeError:
cs.append((feat, val))
return cs
|
python
|
{
"resource": ""
}
|
q9024
|
TdlConsList.values
|
train
|
def values(self):
"""
Return the list of values.
"""
def collect(d):
if d is None or d.get('FIRST') is None:
return []
vals = [d['FIRST']]
vals.extend(collect(d.get('REST')))
return vals
return collect(self)
|
python
|
{
"resource": ""
}
|
q9025
|
Variable.from_dict
|
train
|
def from_dict(cls, d):
"""Instantiate a Variable from a dictionary representation."""
return cls(
d['type'], tuple(d['parents']), list(d['properties'].items())
)
|
python
|
{
"resource": ""
}
|
q9026
|
Role.from_dict
|
train
|
def from_dict(cls, d):
"""Instantiate a Role from a dictionary representation."""
return cls(
d['rargname'],
d['value'],
list(d.get('properties', {}).items()),
d.get('optional', False)
)
|
python
|
{
"resource": ""
}
|
q9027
|
Role.to_dict
|
train
|
def to_dict(self):
"""Return a dictionary representation of the Role."""
d = {'rargname': self.rargname, 'value': self.value}
if self.properties:
d['properties'] = self.properties
if self.optional:
d['optional'] = self.optional
return d
|
python
|
{
"resource": ""
}
|
q9028
|
Predicate.from_dict
|
train
|
def from_dict(cls, d):
"""Instantiate a Predicate from a dictionary representation."""
synopses = [tuple(map(Role.from_dict, synopsis))
for synopsis in d.get('synopses', [])]
return cls(d['predicate'], tuple(d['parents']), synopses)
|
python
|
{
"resource": ""
}
|
q9029
|
Predicate.to_dict
|
train
|
def to_dict(self):
"""Return a dictionary representation of the Predicate."""
return {
'predicate': self.predicate,
'parents': list(self.supertypes),
'synopses': [[role.to_dict() for role in synopsis]
for synopsis in self.synopses]
}
|
python
|
{
"resource": ""
}
|
q9030
|
SemI.from_dict
|
train
|
def from_dict(cls, d):
"""Instantiate a SemI from a dictionary representation."""
read = lambda cls: (lambda pair: (pair[0], cls.from_dict(pair[1])))
return cls(
variables=map(read(Variable), d.get('variables', {}).items()),
properties=map(read(Property), d.get('properties', {}).items()),
roles=map(read(Role), d.get('roles', {}).items()),
predicates=map(read(Predicate), d.get('predicates', {}).items())
)
|
python
|
{
"resource": ""
}
|
q9031
|
SemI.to_dict
|
train
|
def to_dict(self):
"""Return a dictionary representation of the SemI."""
make = lambda pair: (pair[0], pair[1].to_dict())
return dict(
variables=dict(make(v) for v in self.variables.items()),
properties=dict(make(p) for p in self.properties.items()),
roles=dict(make(r) for r in self.roles.items()),
predicates=dict(make(p) for p in self.predicates.items())
)
|
python
|
{
"resource": ""
}
|
q9032
|
sort_vid_split
|
train
|
def sort_vid_split(vs):
"""
Split a valid variable string into its variable sort and id.
Examples:
>>> sort_vid_split('h3')
('h', '3')
>>> sort_vid_split('ref-ind12')
('ref-ind', '12')
"""
match = var_re.match(vs)
if match is None:
raise ValueError('Invalid variable string: {}'.format(str(vs)))
else:
return match.groups()
|
python
|
{
"resource": ""
}
|
q9033
|
Lnk.charspan
|
train
|
def charspan(cls, start, end):
"""
Create a Lnk object for a character span.
Args:
start: the initial character position (cfrom)
end: the final character position (cto)
"""
return cls(Lnk.CHARSPAN, (int(start), int(end)))
|
python
|
{
"resource": ""
}
|
q9034
|
Lnk.chartspan
|
train
|
def chartspan(cls, start, end):
"""
Create a Lnk object for a chart span.
Args:
start: the initial chart vertex
end: the final chart vertex
"""
return cls(Lnk.CHARTSPAN, (int(start), int(end)))
|
python
|
{
"resource": ""
}
|
q9035
|
Lnk.tokens
|
train
|
def tokens(cls, tokens):
"""
Create a Lnk object for a token range.
Args:
tokens: a list of token identifiers
"""
return cls(Lnk.TOKENS, tuple(map(int, tokens)))
|
python
|
{
"resource": ""
}
|
q9036
|
_LnkMixin.cfrom
|
train
|
def cfrom(self):
"""
The initial character position in the surface string.
Defaults to -1 if there is no valid cfrom value.
"""
cfrom = -1
try:
if self.lnk.type == Lnk.CHARSPAN:
cfrom = self.lnk.data[0]
except AttributeError:
pass # use default cfrom of -1
return cfrom
|
python
|
{
"resource": ""
}
|
q9037
|
_LnkMixin.cto
|
train
|
def cto(self):
"""
The final character position in the surface string.
Defaults to -1 if there is no valid cto value.
"""
cto = -1
try:
if self.lnk.type == Lnk.CHARSPAN:
cto = self.lnk.data[1]
except AttributeError:
pass # use default cto of -1
return cto
|
python
|
{
"resource": ""
}
|
q9038
|
Pred.surface
|
train
|
def surface(cls, predstr):
"""Instantiate a Pred from its quoted string representation."""
lemma, pos, sense, _ = split_pred_string(predstr)
return cls(Pred.SURFACE, lemma, pos, sense, predstr)
|
python
|
{
"resource": ""
}
|
q9039
|
Pred.abstract
|
train
|
def abstract(cls, predstr):
"""Instantiate a Pred from its symbol string."""
lemma, pos, sense, _ = split_pred_string(predstr)
return cls(Pred.ABSTRACT, lemma, pos, sense, predstr)
|
python
|
{
"resource": ""
}
|
q9040
|
Pred.surface_or_abstract
|
train
|
def surface_or_abstract(cls, predstr):
"""Instantiate a Pred from either its surface or abstract symbol."""
if predstr.strip('"').lstrip("'").startswith('_'):
return cls.surface(predstr)
else:
return cls.abstract(predstr)
|
python
|
{
"resource": ""
}
|
q9041
|
Pred.realpred
|
train
|
def realpred(cls, lemma, pos, sense=None):
"""Instantiate a Pred from its components."""
string_tokens = [lemma]
if pos is not None:
string_tokens.append(pos)
if sense is not None:
sense = str(sense)
string_tokens.append(sense)
predstr = '_'.join([''] + string_tokens + ['rel'])
return cls(Pred.REALPRED, lemma, pos, sense, predstr)
|
python
|
{
"resource": ""
}
|
q9042
|
Node.properties
|
train
|
def properties(self):
"""
Morphosemantic property mapping.
Unlike :attr:`sortinfo`, this does not include `cvarsort`.
"""
d = dict(self.sortinfo)
if CVARSORT in d:
del d[CVARSORT]
return d
|
python
|
{
"resource": ""
}
|
q9043
|
EntityRepresentation.update_get_params
|
train
|
def update_get_params(self):
"""Update HTTP GET params with the given fields that user wants to fetch."""
if isinstance(self._fields, (tuple, list)): # tuples & lists > x,y,z
self.get_params["fields"] = ",".join([str(_) for _ in self._fields])
elif isinstance(self._fields, str):
self.get_params["fields"] = self._fields
|
python
|
{
"resource": ""
}
|
q9044
|
EntityRepresentation._fetch_meta_data
|
train
|
def _fetch_meta_data(self):
"""Makes an API call to fetch meta data for the given probe and stores the raw data."""
is_success, meta_data = AtlasRequest(
url_path=self.API_META_URL.format(self.id),
key=self.api_key,
server=self.server,
verify=self.verify,
user_agent=self._user_agent
).get(**self.get_params)
self.meta_data = meta_data
if not is_success:
return False
return True
|
python
|
{
"resource": ""
}
|
q9045
|
Probe._populate_data
|
train
|
def _populate_data(self):
"""Assing some probe's raw meta data from API response to instance properties"""
if self.id is None:
self.id = self.meta_data.get("id")
self.is_anchor = self.meta_data.get("is_anchor")
self.country_code = self.meta_data.get("country_code")
self.description = self.meta_data.get("description")
self.is_public = self.meta_data.get("is_public")
self.asn_v4 = self.meta_data.get("asn_v4")
self.asn_v6 = self.meta_data.get("asn_v6")
self.address_v4 = self.meta_data.get("address_v4")
self.address_v6 = self.meta_data.get("address_v6")
self.prefix_v4 = self.meta_data.get("prefix_v4")
self.prefix_v6 = self.meta_data.get("prefix_v6")
self.geometry = self.meta_data.get("geometry")
self.tags = self.meta_data.get("tags")
self.status = self.meta_data.get("status", {}).get("name")
|
python
|
{
"resource": ""
}
|
q9046
|
Measurement._populate_data
|
train
|
def _populate_data(self):
"""Assinging some measurement's raw meta data from API response to instance properties"""
if self.id is None:
self.id = self.meta_data.get("id")
self.stop_time = None
self.creation_time = None
self.start_time = None
self.populate_times()
self.protocol = self.meta_data.get("af")
self.target_ip = self.meta_data.get("target_ip")
self.target_asn = self.meta_data.get("target_asn")
self.target = self.meta_data.get("target")
self.description = self.meta_data.get("description")
self.is_oneoff = self.meta_data.get("is_oneoff")
self.is_public = self.meta_data.get("is_public")
self.interval = self.meta_data.get("interval")
self.resolve_on_probe = self.meta_data.get("resolve_on_probe")
self.status_id = self.meta_data.get("status", {}).get("id")
self.status = self.meta_data.get("status", {}).get("name")
self.type = self.get_type()
self.result_url = self.meta_data.get("result")
|
python
|
{
"resource": ""
}
|
q9047
|
Measurement.get_type
|
train
|
def get_type(self):
"""
Getting type of measurement keeping backwards compatibility for
v2 API output changes.
"""
mtype = None
if "type" not in self.meta_data:
return mtype
mtype = self.meta_data["type"]
if isinstance(mtype, dict):
mtype = self.meta_data.get("type", {}).get("name", "").upper()
elif isinstance(mtype, str):
mtype = mtype
return mtype
|
python
|
{
"resource": ""
}
|
q9048
|
Measurement.populate_times
|
train
|
def populate_times(self):
"""
Populates all different meta data times that comes with measurement if
they are present.
"""
stop_time = self.meta_data.get("stop_time")
if stop_time:
stop_naive = datetime.utcfromtimestamp(stop_time)
self.stop_time = stop_naive.replace(tzinfo=tzutc())
creation_time = self.meta_data.get("creation_time")
if creation_time:
creation_naive = datetime.utcfromtimestamp(creation_time)
self.creation_time = creation_naive.replace(tzinfo=tzutc())
start_time = self.meta_data.get("start_time")
if start_time:
start_naive = datetime.utcfromtimestamp(start_time)
self.start_time = start_naive.replace(tzinfo=tzutc())
|
python
|
{
"resource": ""
}
|
q9049
|
AtlasChangeSource.set_action
|
train
|
def set_action(self, value):
"""Setter for action attribute"""
if value not in ("remove", "add"):
log = "Sources field 'action' should be 'remove' or 'add'."
raise MalFormattedSource(log)
self._action = value
|
python
|
{
"resource": ""
}
|
q9050
|
AtlasChangeSource.build_api_struct
|
train
|
def build_api_struct(self):
"""
Calls parent's method and just adds the addtional field 'action', that
is required to form the structure that Atlas API is accepting.
"""
data = super(AtlasChangeSource, self).build_api_struct()
data.update({"action": self._action})
return data
|
python
|
{
"resource": ""
}
|
q9051
|
AtlasMeasurement.add_option
|
train
|
def add_option(self, **options):
"""
Adds an option and its value to the class as an attribute and stores it
to the used options set.
"""
for option, value in options.items():
setattr(self, option, value)
self._store_option(option)
|
python
|
{
"resource": ""
}
|
q9052
|
AtlasMeasurement.v2_translator
|
train
|
def v2_translator(self, option):
"""
This is a temporary function that helps move from v1 API to v2 without
breaking already running script and keep backwards compatibility.
Translates option name from API v1 to renamed one of v2 API.
"""
new_option = option
new_value = getattr(self, option)
renaming_pairs = {
"dontfrag": "dont_fragment",
"maxhops": "max_hops",
"firsthop": "first_hop",
"use_NSID": "set_nsid_bit",
"cd": "set_cd_bit",
"do": "set_do_bit",
"qbuf": "include_qbuf",
"recursion_desired": "set_rd_bit",
"noabuf": "include_abuf"
}
if option in renaming_pairs.keys():
warninglog = (
"DeprecationWarning: {0} option has been deprecated and "
"renamed to {1}."
).format(option, renaming_pairs[option])
print(warninglog)
new_option = renaming_pairs[option]
# noabuf was changed to include_abuf so we need a double-negative
if option == "noabuf":
new_value = not new_value
return new_option, new_value
|
python
|
{
"resource": ""
}
|
q9053
|
AtlasStream.connect
|
train
|
def connect(self):
"""Initiate the channel we want to start streams from."""
self.socketIO = SocketIO(
host=self.iosocket_server,
port=80,
resource=self.iosocket_resource,
proxies=self.proxies,
headers=self.headers,
transports=["websocket"],
Namespace=AtlasNamespace,
)
self.socketIO.on(self.EVENT_NAME_ERROR, self.handle_error)
|
python
|
{
"resource": ""
}
|
q9054
|
AtlasStream.bind_channel
|
train
|
def bind_channel(self, channel, callback):
"""Bind given channel with the given callback"""
# Remove the following list when deprecation time expires
if channel in self.CHANNELS:
warning = (
"The event name '{}' will soon be deprecated. Use "
"the real event name '{}' instead."
).format(channel, self.CHANNELS[channel])
self.handle_error(warning)
channel = self.CHANNELS[channel]
# -------------------------------------------------------
if channel == self.EVENT_NAME_ERROR:
self.error_callback = callback
elif channel == self.EVENT_NAME_RESULTS:
self.socketIO.on(channel, partial(self.unpack_results, callback))
else:
self.socketIO.on(channel, callback)
|
python
|
{
"resource": ""
}
|
q9055
|
AtlasStream.start_stream
|
train
|
def start_stream(self, stream_type, **stream_parameters):
"""Starts new stream for given type with given parameters"""
if stream_type:
self.subscribe(stream_type, **stream_parameters)
else:
self.handle_error("You need to set a stream type")
|
python
|
{
"resource": ""
}
|
q9056
|
AtlasStream.subscribe
|
train
|
def subscribe(self, stream_type, **parameters):
"""Subscribe to stream with give parameters."""
parameters["stream_type"] = stream_type
if (stream_type == "result") and ("buffering" not in parameters):
parameters["buffering"] = True
self.socketIO.emit(self.EVENT_NAME_SUBSCRIBE, parameters)
|
python
|
{
"resource": ""
}
|
q9057
|
AtlasStream.timeout
|
train
|
def timeout(self, seconds=None):
"""
Times out all streams after n seconds or wait forever if seconds is
None
"""
if seconds is None:
self.socketIO.wait()
else:
self.socketIO.wait(seconds=seconds)
|
python
|
{
"resource": ""
}
|
q9058
|
RequestGenerator.build_url
|
train
|
def build_url(self):
"""Build the url path based on the filter options."""
if not self.api_filters:
return self.url
# Reduce complex objects to simpler strings
for k, v in self.api_filters.items():
if isinstance(v, datetime): # datetime > UNIX timestamp
self.api_filters[k] = int(calendar.timegm(v.timetuple()))
if isinstance(v, (tuple, list)): # tuples & lists > x,y,z
self.api_filters[k] = ",".join([str(_) for _ in v])
if (
self.id_filter in self.api_filters and
len(str(self.api_filters[self.id_filter])) > self.URL_LENGTH_LIMIT
):
self.build_url_chunks()
return self.split_urls.pop(0)
filters = '&'.join("%s=%s" % (k, v) for (k, v) in self.api_filters.items())
return "%s?%s" % (self.url, filters)
|
python
|
{
"resource": ""
}
|
q9059
|
RequestGenerator.build_url_chunks
|
train
|
def build_url_chunks(self):
"""
If url is too big because of id filter is huge, break id and construct
several urls to call them in order to abstract this complexity from user.
"""
CHUNK_SIZE = 500
id_filter = str(self.api_filters.pop(self.id_filter)).split(',')
chuncks = list(self.chunks(id_filter, CHUNK_SIZE))
filters = '&'.join("%s=%s" % (k, v) for (k, v) in self.api_filters.items())
for chunk in chuncks:
if filters:
url = "{0}?{1}&{2}={3}".format(self.url, filters, self.id_filter, ','.join(chunk))
else:
url = "{0}?{1}={2}".format(self.url, self.id_filter, ','.join(chunk))
self.split_urls.append(url)
|
python
|
{
"resource": ""
}
|
q9060
|
RequestGenerator.next_batch
|
train
|
def next_batch(self):
"""
Querying API for the next batch of objects and store next url and
batch of objects.
"""
is_success, results = AtlasRequest(
url_path=self.atlas_url,
user_agent=self._user_agent,
server=self.server,
verify=self.verify,
).get()
if not is_success:
raise APIResponseError(results)
self.total_count = results.get("count")
self.atlas_url = self.build_next_url(results.get("next"))
self.current_batch = results.get("results", [])
|
python
|
{
"resource": ""
}
|
q9061
|
RequestGenerator.build_next_url
|
train
|
def build_next_url(self, url):
"""Builds next url in a format compatible with cousteau. Path + query"""
if not url:
if self.split_urls: # If we had a long request give the next part
self.total_count_flag = False # Reset flag for count
return self.split_urls.pop(0)
else:
return None
parsed_url = urlparse(url)
return "{0}?{1}".format(parsed_url.path, parsed_url.query)
|
python
|
{
"resource": ""
}
|
q9062
|
RequestGenerator.set_total_count
|
train
|
def set_total_count(self, value):
"""Setter for count attribute. Set should append only one count per splitted url."""
if not self.total_count_flag and value:
self._count.append(int(value))
self.total_count_flag = True
|
python
|
{
"resource": ""
}
|
q9063
|
AtlasRequest.get_headers
|
train
|
def get_headers(self):
"""Return header for the HTTP request."""
headers = {
"User-Agent": self.http_agent,
"Content-Type": "application/json",
"Accept": "application/json"
}
if self.headers:
headers.update(self.headers)
return headers
|
python
|
{
"resource": ""
}
|
q9064
|
AtlasRequest.http_method
|
train
|
def http_method(self, method):
"""
Execute the given HTTP method and returns if it's success or not
and the response as a string if not success and as python object after
unjson if it's success.
"""
self.build_url()
try:
response = self.get_http_method(method)
is_success = response.ok
try:
response_message = response.json()
except ValueError:
response_message = response.text
except requests.exceptions.RequestException as exc:
is_success = False
response_message = exc.args
return is_success, response_message
|
python
|
{
"resource": ""
}
|
q9065
|
AtlasRequest.get_http_method
|
train
|
def get_http_method(self, method):
"""Gets the http method that will be called from the requests library"""
return self.http_methods[method](self.url, **self.http_method_args)
|
python
|
{
"resource": ""
}
|
q9066
|
AtlasRequest.get
|
train
|
def get(self, **url_params):
"""
Makes the HTTP GET to the url.
"""
if url_params:
self.http_method_args["params"].update(url_params)
return self.http_method("GET")
|
python
|
{
"resource": ""
}
|
q9067
|
AtlasRequest.post
|
train
|
def post(self):
"""
Makes the HTTP POST to the url sending post_data.
"""
self._construct_post_data()
post_args = {"json": self.post_data}
self.http_method_args.update(post_args)
return self.http_method("POST")
|
python
|
{
"resource": ""
}
|
q9068
|
AtlasRequest.clean_time
|
train
|
def clean_time(self, time):
"""
Transform time field to datetime object if there is any.
"""
if isinstance(time, int):
time = datetime.utcfromtimestamp(time)
elif isinstance(time, str):
time = parser.parse(time)
return time
|
python
|
{
"resource": ""
}
|
q9069
|
AtlasCreateRequest._construct_post_data
|
train
|
def _construct_post_data(self):
"""
Constructs the data structure that is required from the atlas API based
on measurements, sources and times user has specified.
"""
definitions = [msm.build_api_struct() for msm in self.measurements]
probes = [source.build_api_struct() for source in self.sources]
self.post_data = {
"definitions": definitions,
"probes": probes,
"is_oneoff": self.is_oneoff
}
if self.is_oneoff:
self.post_data.update({"is_oneoff": self.is_oneoff})
if self.start_time:
self.post_data.update(
{"start_time": int(calendar.timegm(self.start_time.timetuple()))}
)
if self.stop_time:
self.post_data.update(
{"stop_time": int(calendar.timegm(self.stop_time.timetuple()))}
)
if self.bill_to:
self.post_data.update({"bill_to": self.bill_to})
|
python
|
{
"resource": ""
}
|
q9070
|
AtlasResultsRequest.clean_probes
|
train
|
def clean_probes(self, probe_ids):
"""
Checks format of probe ids and transform it to something API
understands.
"""
if isinstance(probe_ids, (tuple, list)): # tuples & lists > x,y,z
probe_ids = ",".join([str(_) for _ in probe_ids])
return probe_ids
|
python
|
{
"resource": ""
}
|
q9071
|
AtlasResultsRequest.update_http_method_params
|
train
|
def update_http_method_params(self):
"""
Update HTTP url parameters based on msm_id and query filters if
there are any.
"""
url_params = {}
if self.start:
url_params.update(
{"start": int(calendar.timegm(self.start.timetuple()))}
)
if self.stop:
url_params.update(
{"stop": int(calendar.timegm(self.stop.timetuple()))}
)
if self.probe_ids:
url_params.update({"probe_ids": self.probe_ids})
self.http_method_args["params"].update(url_params)
|
python
|
{
"resource": ""
}
|
q9072
|
SearchInterface.lookup
|
train
|
def lookup(self, id, service=None, media=None, extended=None, **kwargs):
"""Lookup items by their Trakt, IMDB, TMDB, TVDB, or TVRage ID.
**Note:** If you lookup an identifier without a :code:`media` type specified it
might return multiple items if the :code:`service` is not globally unique.
:param id: Identifier value to lookup
:type id: :class:`~python:str` or :class:`~python:int`
:param service: Identifier service
**Possible values:**
- :code:`trakt`
- :code:`imdb`
- :code:`tmdb`
- :code:`tvdb`
- :code:`tvrage`
:type service: :class:`~python:str`
:param media: Desired media type (or :code:`None` to return all matching items)
**Possible values:**
- :code:`movie`
- :code:`show`
- :code:`episode`
- :code:`person`
- :code:`list`
:type media: :class:`~python:str` or :class:`~python:list` of :class:`~python:str`
:param extended: Level of information to include in response
**Possible values:**
- :code:`None`: Minimal (e.g. title, year, ids) **(default)**
- :code:`full`: Complete
:type extended: :class:`~python:str`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Results
:rtype: :class:`trakt.objects.media.Media` or :class:`~python:list` of :class:`trakt.objects.media.Media`
"""
# Expand tuple `id`
if type(id) is tuple:
if len(id) != 2:
raise ValueError()
id, service = id
# Validate parameters
if not service:
raise ValueError('Invalid value provided for the "service" parameter')
# Build query
query = {}
if isinstance(media, six.string_types):
query['type'] = media
elif isinstance(media, list):
query['type'] = ','.join(media)
if extended:
query['extended'] = extended
# Send request
response = self.http.get(
params=[service, id],
query=query
)
# Parse response
items = self.get_data(response, **kwargs)
if isinstance(items, requests.Response):
return items
if not items:
return None
count = len(items)
if count > 1:
return SearchMapper.process_many(self.client, items)
elif count == 1:
return SearchMapper.process(self.client, items[0])
return None
|
python
|
{
"resource": ""
}
|
q9073
|
SearchInterface.query
|
train
|
def query(self, query, media=None, year=None, fields=None, extended=None, **kwargs):
"""Search by titles, descriptions, translated titles, aliases, and people.
**Note:** Results are ordered by the most relevant score.
:param query: Search title or description
:type query: :class:`~python:str`
:param media: Desired media type (or :code:`None` to return all matching items)
**Possible values:**
- :code:`movie`
- :code:`show`
- :code:`episode`
- :code:`person`
- :code:`list`
:type media: :class:`~python:str` or :class:`~python:list` of :class:`~python:str`
:param year: Desired media year (or :code:`None` to return all matching items)
:type year: :class:`~python:str` or :class:`~python:int`
:param fields: Fields to search for :code:`query` (or :code:`None` to search all fields)
:type fields: :class:`~python:str` or :class:`~python:list`
:param extended: Level of information to include in response
**Possible values:**
- :code:`None`: Minimal (e.g. title, year, ids) **(default)**
- :code:`full`: Complete
:type extended: :class:`~python:str`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Results
:rtype: :class:`~python:list` of :class:`trakt.objects.media.Media`
"""
# Validate parameters
if not media:
warnings.warn(
"\"media\" parameter is now required on the Trakt['search'].query() method",
DeprecationWarning, stacklevel=2
)
if fields and not media:
raise ValueError('"fields" can only be used when the "media" parameter is defined')
# Build query
query = {
'query': query
}
if year:
query['year'] = year
if fields:
query['fields'] = fields
if extended:
query['extended'] = extended
# Serialize media items
if isinstance(media, list):
media = ','.join(media)
# Send request
response = self.http.get(
params=[media],
query=query
)
# Parse response
items = self.get_data(response, **kwargs)
if isinstance(items, requests.Response):
return items
if items is not None:
return SearchMapper.process_many(self.client, items)
return None
|
python
|
{
"resource": ""
}
|
q9074
|
Season.to_identifier
|
train
|
def to_identifier(self):
"""Return the season identifier which is compatible with requests that require season definitions.
:return: Season identifier/definition
:rtype: :class:`~python:dict`
"""
return {
'number': self.pk,
'episodes': [
episode.to_dict()
for episode in self.episodes.values()
]
}
|
python
|
{
"resource": ""
}
|
q9075
|
Season.to_dict
|
train
|
def to_dict(self):
"""Dump season to a dictionary.
:return: Season dictionary
:rtype: :class:`~python:dict`
"""
result = self.to_identifier()
result.update({
'ids': dict([
(key, value) for (key, value) in self.keys[1:] # NOTE: keys[0] is the season identifier
])
})
if self.rating:
result['rating'] = self.rating.value
result['rated_at'] = to_iso8601_datetime(self.rating.timestamp)
result['in_watchlist'] = self.in_watchlist if self.in_watchlist is not None else 0
# Extended Info
if self.first_aired:
result['first_aired'] = to_iso8601_datetime(self.first_aired)
if self.episode_count:
result['episode_count'] = self.episode_count
if self.aired_episodes:
result['aired_episodes'] = self.aired_episodes
return result
|
python
|
{
"resource": ""
}
|
q9076
|
Episode.to_dict
|
train
|
def to_dict(self):
"""Dump episode to a dictionary.
:return: Episode dictionary
:rtype: :class:`~python:dict`
"""
result = self.to_identifier()
result.update({
'title': self.title,
'watched': 1 if self.is_watched else 0,
'collected': 1 if self.is_collected else 0,
'plays': self.plays if self.plays is not None else 0,
'in_watchlist': self.in_watchlist if self.in_watchlist is not None else 0,
'progress': self.progress,
'last_watched_at': to_iso8601_datetime(self.last_watched_at),
'collected_at': to_iso8601_datetime(self.collected_at),
'paused_at': to_iso8601_datetime(self.paused_at),
'ids': dict([
(key, value) for (key, value) in self.keys[1:] # NOTE: keys[0] is the (<season>, <episode>) identifier
])
})
if self.rating:
result['rating'] = self.rating.value
result['rated_at'] = to_iso8601_datetime(self.rating.timestamp)
# Extended Info
if self.first_aired:
result['first_aired'] = to_iso8601_datetime(self.first_aired)
if self.updated_at:
result['updated_at'] = to_iso8601_datetime(self.updated_at)
if self.overview:
result['overview'] = self.overview
if self.available_translations:
result['available_translations'] = self.available_translations
return result
|
python
|
{
"resource": ""
}
|
q9077
|
Application.on_aborted
|
train
|
def on_aborted(self):
"""Device authentication aborted.
Triggered when device authentication was aborted (either with `DeviceOAuthPoller.stop()`
or via the "poll" event)
"""
print('Authentication aborted')
# Authentication aborted
self.is_authenticating.acquire()
self.is_authenticating.notify_all()
self.is_authenticating.release()
|
python
|
{
"resource": ""
}
|
q9078
|
Application.on_authenticated
|
train
|
def on_authenticated(self, authorization):
"""Device authenticated.
:param authorization: Authentication token details
:type authorization: dict
"""
# Acquire condition
self.is_authenticating.acquire()
# Store authorization for future calls
self.authorization = authorization
print('Authentication successful - authorization: %r' % self.authorization)
# Authentication complete
self.is_authenticating.notify_all()
self.is_authenticating.release()
|
python
|
{
"resource": ""
}
|
q9079
|
Application.on_expired
|
train
|
def on_expired(self):
"""Device authentication expired."""
print('Authentication expired')
# Authentication expired
self.is_authenticating.acquire()
self.is_authenticating.notify_all()
self.is_authenticating.release()
|
python
|
{
"resource": ""
}
|
q9080
|
DeviceOAuthInterface.poll
|
train
|
def poll(self, device_code, expires_in, interval, **kwargs):
"""Construct the device authentication poller.
:param device_code: Device authentication code
:type device_code: str
:param expires_in: Device authentication code expiry (in seconds)
:type in: int
:param interval: Device authentication poll interval
:type interval: int
:rtype: DeviceOAuthPoller
"""
return DeviceOAuthPoller(self.client, device_code, expires_in, interval)
|
python
|
{
"resource": ""
}
|
q9081
|
Movie.to_dict
|
train
|
def to_dict(self):
"""Dump movie to a dictionary.
:return: Movie dictionary
:rtype: :class:`~python:dict`
"""
result = self.to_identifier()
result.update({
'watched': 1 if self.is_watched else 0,
'collected': 1 if self.is_collected else 0,
'plays': self.plays if self.plays is not None else 0,
'in_watchlist': self.in_watchlist if self.in_watchlist is not None else 0,
'progress': self.progress,
'last_watched_at': to_iso8601_datetime(self.last_watched_at),
'collected_at': to_iso8601_datetime(self.collected_at),
'paused_at': to_iso8601_datetime(self.paused_at)
})
if self.rating:
result['rating'] = self.rating.value
result['rated_at'] = to_iso8601_datetime(self.rating.timestamp)
# Extended Info
if self.released:
result['released'] = to_iso8601_date(self.released)
if self.updated_at:
result['updated_at'] = to_iso8601_datetime(self.updated_at)
if self.overview:
result['overview'] = self.overview
if self.tagline:
result['tagline'] = self.tagline
if self.runtime:
result['runtime'] = self.runtime
if self.certification:
result['certification'] = self.certification
if self.homepage:
result['homepage'] = self.homepage
if self.trailer:
result['trailer'] = self.trailer
if self.language:
result['language'] = self.language
if self.available_translations:
result['available_translations'] = self.available_translations
if self.genres:
result['genres'] = self.genres
return result
|
python
|
{
"resource": ""
}
|
q9082
|
Progress.to_dict
|
train
|
def to_dict(self):
"""Dump progress to a dictionary.
:return: Progress dictionary
:rtype: :class:`~python:dict`
"""
result = super(Progress, self).to_dict()
label = LABELS['last_progress_change'][self.progress_type]
result[label] = to_iso8601_datetime(self.last_progress_change)
if self.progress_type == 'watched':
result['reset_at'] = self.reset_at
result['seasons'] = [
season.to_dict()
for season in self.seasons.values()
]
if self.hidden_seasons:
result['hidden_seasons'] = [
popitems(season.to_dict(), ['number', 'ids'])
for season in self.hidden_seasons.values()
]
if self.next_episode:
result['next_episode'] = popitems(self.next_episode.to_dict(), ['season', 'number', 'title', 'ids'])
result['next_episode']['season'] = self.next_episode.keys[0][0]
if self.last_episode:
result['last_episode'] = popitems(self.last_episode.to_dict(), ['season', 'number', 'title', 'ids'])
result['last_episode']['season'] = self.last_episode.keys[0][0]
return result
|
python
|
{
"resource": ""
}
|
q9083
|
ScrobbleInterface.action
|
train
|
def action(self, action, movie=None, show=None, episode=None, progress=0.0, **kwargs):
"""Perform scrobble action.
:param action: Action to perform (either :code:`start`, :code:`pause` or :code:`stop`)
:type action: :class:`~python:str`
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'start',
'progress': 1.25,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict`
"""
if movie and (show or episode):
raise ValueError('Only one media type should be provided')
if not movie and not episode:
raise ValueError('Missing media item')
data = {
'progress': progress,
'app_version': kwargs.pop('app_version', self.client.version),
'app_date': kwargs.pop('app_date', None)
}
if movie:
# TODO validate
data['movie'] = movie
elif episode:
if show:
data['show'] = show
# TODO validate
data['episode'] = episode
response = self.http.post(
action,
data=data,
**popitems(kwargs, [
'authenticated',
'validate_token'
])
)
return self.get_data(response, **kwargs)
|
python
|
{
"resource": ""
}
|
q9084
|
ScrobbleInterface.start
|
train
|
def start(self, movie=None, show=None, episode=None, progress=0.0, **kwargs):
"""Send the scrobble "start" action.
Use this method when the video initially starts playing or is un-paused. This will
remove any playback progress if it exists.
**Note:** A watching status will auto expire after the remaining runtime has elapsed.
There is no need to re-send every 15 minutes.
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'start',
'progress': 1.25,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict`
"""
return self.action(
'start',
movie, show, episode,
progress,
**kwargs
)
|
python
|
{
"resource": ""
}
|
q9085
|
ScrobbleInterface.pause
|
train
|
def pause(self, movie=None, show=None, episode=None, progress=0.0, **kwargs):
"""Send the scrobble "pause' action.
Use this method when the video is paused. The playback progress will be saved and
:code:`Trakt['sync/playback'].get()` can be used to resume the video from this exact
position. Un-pause a video by calling the :code:`Trakt['scrobble'].start()` method again.
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'pause',
'progress': 75,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict`
"""
return self.action(
'pause',
movie, show, episode,
progress,
**kwargs
)
|
python
|
{
"resource": ""
}
|
q9086
|
ScrobbleInterface.stop
|
train
|
def stop(self, movie=None, show=None, episode=None, progress=0.0, **kwargs):
"""Send the scrobble "stop" action.
Use this method when the video is stopped or finishes playing on its own. If the
progress is above 80%, the video will be scrobbled and the :code:`action` will be set
to **scrobble**.
If the progress is less than 80%, it will be treated as a *pause* and the :code:`action`
will be set to **pause**. The playback progress will be saved and :code:`Trakt['sync/playback'].get()`
can be used to resume the video from this exact position.
**Note:** If you prefer to use a threshold higher than 80%, you should use :code:`Trakt['scrobble'].pause()`
yourself so it doesn't create duplicate scrobbles.
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'scrobble',
'progress': 99.9,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict`
"""
return self.action(
'stop',
movie, show, episode,
progress,
**kwargs
)
|
python
|
{
"resource": ""
}
|
q9087
|
CustomList.delete
|
train
|
def delete(self, **kwargs):
"""Delete the list.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
return self._client['users/*/lists/*'].delete(self.username, self.id, **kwargs)
|
python
|
{
"resource": ""
}
|
q9088
|
CustomList.update
|
train
|
def update(self, **kwargs):
"""Update the list with the current object attributes.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
item = self._client['users/*/lists/*'].update(self.username, self.id, return_type='data', **kwargs)
if not item:
return False
self._update(item)
return True
|
python
|
{
"resource": ""
}
|
q9089
|
CustomList.remove
|
train
|
def remove(self, items, **kwargs):
"""Remove specified items from the list.
:param items: Items that should be removed from the list
:type items: :class:`~python:list`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response
:rtype: :class:`~python:dict`
"""
return self._client['users/*/lists/*'].remove(self.username, self.id, items, **kwargs)
|
python
|
{
"resource": ""
}
|
q9090
|
CustomList.like
|
train
|
def like(self, **kwargs):
"""Like the list.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
return self._client['users/*/lists/*'].like(self.username, self.id, **kwargs)
|
python
|
{
"resource": ""
}
|
q9091
|
CustomList.unlike
|
train
|
def unlike(self, **kwargs):
"""Un-like the list.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
return self._client['users/*/lists/*'].unlike(self.username, self.id, **kwargs)
|
python
|
{
"resource": ""
}
|
q9092
|
Base.get
|
train
|
def get(self, source, media, collection=None, start_date=None, days=None, query=None, years=None, genres=None,
languages=None, countries=None, runtimes=None, ratings=None, certifications=None, networks=None,
status=None, **kwargs):
"""Retrieve calendar items.
The `all` calendar displays info for all shows airing during the specified period. The `my` calendar displays
episodes for all shows that have been watched, collected, or watchlisted.
:param source: Calendar source (`all` or `my`)
:type source: str
:param media: Media type (`dvd`, `movies` or `shows`)
:type media: str
:param collection: Collection type (`new`, `premieres`)
:type collection: str or None
:param start_date: Start date (defaults to today)
:type start_date: datetime or None
:param days: Number of days to display (defaults to `7`)
:type days: int or None
:param query: Search title or description.
:type query: str or None
:param years: Year or range of years (e.g. `2014`, or `2014-2016`)
:type years: int or str or tuple or None
:param genres: Genre slugs (e.g. `action`)
:type genres: str or list of str or None
:param languages: Language codes (e.g. `en`)
:type languages: str or list of str or None
:param countries: Country codes (e.g. `us`)
:type countries: str or list of str or None
:param runtimes: Runtime range in minutes (e.g. `30-90`)
:type runtimes: str or tuple or None
:param ratings: Rating range between `0` and `100` (e.g. `75-100`)
:type ratings: str or tuple or None
:param certifications: US Content Certification (e.g. `pg-13`, `tv-pg`)
:type certifications: str or list of str or None
:param networks: (TV) Network name (e.g. `HBO`)
:type networks: str or list of str or None
:param status: (TV) Show status (e.g. `returning series`, `in production`, ended`)
:type status: str or list of str or None
:return: Items
:rtype: list of trakt.objects.video.Video
"""
if source not in ['all', 'my']:
raise ValueError('Unknown collection type: %s' % (source,))
if media not in ['dvd', 'movies', 'shows']:
raise ValueError('Unknown media type: %s' % (media,))
# Default `start_date` to today when only `days` is provided
if start_date is None and days:
start_date = datetime.utcnow()
# Request calendar collection
response = self.http.get(
'/calendars/%s/%s%s' % (
source, media,
('/' + collection) if collection else ''
),
params=[
start_date.strftime('%Y-%m-%d') if start_date else None,
days
],
query={
'query': query,
'years': years,
'genres': genres,
'languages': languages,
'countries': countries,
'runtimes': runtimes,
'ratings': ratings,
'certifications': certifications,
# TV
'networks': networks,
'status': status
},
**popitems(kwargs, [
'authenticated',
'validate_token'
])
)
# Parse response
items = self.get_data(response, **kwargs)
if isinstance(items, requests.Response):
return items
# Map items
if media == 'shows':
return SummaryMapper.episodes(
self.client, items,
parse_show=True
)
return SummaryMapper.movies(self.client, items)
|
python
|
{
"resource": ""
}
|
q9093
|
Show.episodes
|
train
|
def episodes(self):
"""Return a flat episode iterator.
:returns: Iterator :code:`((season_num, episode_num), Episode)`
:rtype: iterator
"""
for sk, season in iteritems(self.seasons):
# Yield each episode in season
for ek, episode in iteritems(season.episodes):
yield (sk, ek), episode
|
python
|
{
"resource": ""
}
|
q9094
|
Show.to_dict
|
train
|
def to_dict(self):
"""Dump show to a dictionary.
:return: Show dictionary
:rtype: :class:`~python:dict`
"""
result = self.to_identifier()
result['seasons'] = [
season.to_dict()
for season in self.seasons.values()
]
result['in_watchlist'] = self.in_watchlist if self.in_watchlist is not None else 0
if self.rating:
result['rating'] = self.rating.value
result['rated_at'] = to_iso8601_datetime(self.rating.timestamp)
# Extended Info
if self.first_aired:
result['first_aired'] = to_iso8601_datetime(self.first_aired)
if self.updated_at:
result['updated_at'] = to_iso8601_datetime(self.updated_at)
if self.overview:
result['overview'] = self.overview
if self.airs:
result['airs'] = self.airs
if self.runtime:
result['runtime'] = self.runtime
if self.certification:
result['certification'] = self.certification
if self.network:
result['network'] = self.network
if self.country:
result['country'] = self.country
if self.status:
result['status'] = self.status
if self.homepage:
result['homepage'] = self.homepage
if self.language:
result['language'] = self.language
if self.available_translations:
result['available_translations'] = self.available_translations
if self.genres:
result['genres'] = self.genres
if self.aired_episodes:
result['aired_episodes'] = self.aired_episodes
return result
|
python
|
{
"resource": ""
}
|
q9095
|
TraktRequest.construct_url
|
train
|
def construct_url(self):
"""Construct a full trakt request URI, with `params` and `query`."""
path = [self.path]
path.extend(self.params)
# Build URL
url = self.client.base_url + '/'.join(
str(value) for value in path
if value
)
# Append query parameters (if defined)
query = self.encode_query(self.query)
if query:
url += '?' + query
return url
|
python
|
{
"resource": ""
}
|
q9096
|
Search.search_officers
|
train
|
def search_officers(self, term, disqualified=False, **kwargs):
"""Search for officers by name.
Args:
term (str): Officer name to search on.
disqualified (Optional[bool]): True to search for disqualified
officers
kwargs (dict): additional keywords passed into
requests.session.get params keyword.
"""
search_type = ('officers' if not disqualified else
'disqualified-officers')
params = kwargs
params['q'] = term
baseuri = self._BASE_URI + 'search/{}'.format(search_type)
res = self.session.get(baseuri, params=params)
self.handle_http_error(res)
return res
|
python
|
{
"resource": ""
}
|
q9097
|
Search.address
|
train
|
def address(self, num):
"""Search for company addresses by company number.
Args:
num (str): Company number to search on.
"""
url_root = "company/{}/registered-office-address"
baseuri = self._BASE_URI + url_root.format(num)
res = self.session.get(baseuri)
self.handle_http_error(res)
return res
|
python
|
{
"resource": ""
}
|
q9098
|
Search.profile
|
train
|
def profile(self, num):
"""Search for company profile by company number.
Args:
num (str): Company number to search on.
"""
baseuri = self._BASE_URI + "company/{}".format(num)
res = self.session.get(baseuri)
self.handle_http_error(res)
return res
|
python
|
{
"resource": ""
}
|
q9099
|
Search.filing_history
|
train
|
def filing_history(self, num, transaction=None, **kwargs):
"""Search for a company's filling history by company number.
Args:
num (str): Company number to search on.
transaction (Optional[str]): Filing record number.
kwargs (dict): additional keywords passed into
requests.session.get params keyword.
"""
baseuri = self._BASE_URI + "company/{}/filing-history".format(num)
if transaction is not None:
baseuri += "/{}".format(transaction)
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.