_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q7500
|
CSSParser.parse_attribute_selector
|
train
|
def parse_attribute_selector(self, sel, m, has_selector, quirks):
"""Create attribute selector from the returned regex match."""
inverse = False
op = m.group('cmp')
case = util.lower(m.group('case')) if m.group('case') else None
parts = [css_unescape(a) for a in m.group('ns_attr').split('|')]
ns = ''
is_type = False
pattern2 = None
if len(parts) > 1:
ns = parts[0]
attr = parts[1]
else:
attr = parts[0]
if case:
flags = re.I if case == 'i' else 0
elif util.lower(attr) == 'type':
flags = re.I
is_type = True
else:
flags = 0
if op:
if m.group('value').startswith(('"', "'")) and not quirks:
value = css_unescape(m.group('value')[1:-1], True)
else:
value = css_unescape(m.group('value'))
else:
value = None
if not op:
# Attribute name
pattern = None
elif op.startswith('^'):
# Value start with
pattern = re.compile(r'^%s.*' % re.escape(value), flags)
elif op.startswith('$'):
# Value ends with
pattern = re.compile(r'.*?%s$' % re.escape(value), flags)
elif op.startswith('*'):
# Value contains
pattern = re.compile(r'.*?%s.*' % re.escape(value), flags)
elif op.startswith('~'):
# Value contains word within space separated list
# `~=` should match nothing if it is empty or contains whitespace,
# so if either of these cases is present, use `[^\s\S]` which cannot be matched.
value = r'[^\s\S]' if not value or RE_WS.search(value) else re.escape(value)
pattern = re.compile(r'.*?(?:(?<=^)|(?<=[ \t\r\n\f]))%s(?=(?:[ \t\r\n\f]|$)).*' % value, flags)
elif op.startswith('|'):
# Value starts with word in dash separated list
pattern = re.compile(r'^%s(?:-.*)?$' % re.escape(value), flags)
elif op.startswith('!'):
# Equivalent to `:not([attr=value])`
pattern = re.compile(r'^%s(?:-.*)?$' % re.escape(value), flags)
inverse = True
else:
# Value matches
pattern = re.compile(r'^%s$' % re.escape(value), flags)
if is_type and pattern:
pattern2 = re.compile(pattern.pattern)
# Append the attribute selector
sel_attr = ct.SelectorAttribute(attr, ns, pattern, pattern2)
if inverse:
# If we are using `!=`, we need to nest the pattern under a `:not()`.
sub_sel = _Selector()
sub_sel.attributes.append(sel_attr)
not_list = ct.SelectorList([sub_sel.freeze()], True, False)
sel.selectors.append(not_list)
else:
sel.attributes.append(sel_attr)
has_selector = True
return has_selector
|
python
|
{
"resource": ""
}
|
q7501
|
CSSParser.parse_tag_pattern
|
train
|
def parse_tag_pattern(self, sel, m, has_selector):
"""Parse tag pattern from regex match."""
parts = [css_unescape(x) for x in m.group(0).split('|')]
if len(parts) > 1:
prefix = parts[0]
tag = parts[1]
else:
tag = parts[0]
prefix = None
sel.tag = ct.SelectorTag(tag, prefix)
has_selector = True
return has_selector
|
python
|
{
"resource": ""
}
|
q7502
|
CSSParser.parse_pseudo_class_custom
|
train
|
def parse_pseudo_class_custom(self, sel, m, has_selector):
"""
Parse custom pseudo class alias.
Compile custom selectors as we need them. When compiling a custom selector,
set it to `None` in the dictionary so we can avoid an infinite loop.
"""
pseudo = util.lower(css_unescape(m.group('name')))
selector = self.custom.get(pseudo)
if selector is None:
raise SelectorSyntaxError(
"Undefined custom selector '{}' found at postion {}".format(pseudo, m.end(0)),
self.pattern,
m.end(0)
)
if not isinstance(selector, ct.SelectorList):
self.custom[pseudo] = None
selector = CSSParser(
selector, custom=self.custom, flags=self.flags
).process_selectors(flags=FLG_PSEUDO)
self.custom[pseudo] = selector
sel.selectors.append(selector)
has_selector = True
return has_selector
|
python
|
{
"resource": ""
}
|
q7503
|
CSSParser.parse_pseudo_nth
|
train
|
def parse_pseudo_nth(self, sel, m, has_selector, iselector):
"""Parse `nth` pseudo."""
mdict = m.groupdict()
if mdict.get('pseudo_nth_child'):
postfix = '_child'
else:
postfix = '_type'
mdict['name'] = util.lower(css_unescape(mdict['name']))
content = util.lower(mdict.get('nth' + postfix))
if content == 'even':
# 2n
s1 = 2
s2 = 0
var = True
elif content == 'odd':
# 2n+1
s1 = 2
s2 = 1
var = True
else:
nth_parts = RE_NTH.match(content)
s1 = '-' if nth_parts.group('s1') and nth_parts.group('s1') == '-' else ''
a = nth_parts.group('a')
var = a.endswith('n')
if a.startswith('n'):
s1 += '1'
elif var:
s1 += a[:-1]
else:
s1 += a
s2 = '-' if nth_parts.group('s2') and nth_parts.group('s2') == '-' else ''
if nth_parts.group('b'):
s2 += nth_parts.group('b')
else:
s2 = '0'
s1 = int(s1, 10)
s2 = int(s2, 10)
pseudo_sel = mdict['name']
if postfix == '_child':
if m.group('of'):
# Parse the rest of `of S`.
nth_sel = self.parse_selectors(iselector, m.end(0), FLG_PSEUDO | FLG_OPEN)
else:
# Use default `*|*` for `of S`.
nth_sel = CSS_NTH_OF_S_DEFAULT
if pseudo_sel == ':nth-child':
sel.nth.append(ct.SelectorNth(s1, var, s2, False, False, nth_sel))
elif pseudo_sel == ':nth-last-child':
sel.nth.append(ct.SelectorNth(s1, var, s2, False, True, nth_sel))
else:
if pseudo_sel == ':nth-of-type':
sel.nth.append(ct.SelectorNth(s1, var, s2, True, False, ct.SelectorList()))
elif pseudo_sel == ':nth-last-of-type':
sel.nth.append(ct.SelectorNth(s1, var, s2, True, True, ct.SelectorList()))
has_selector = True
return has_selector
|
python
|
{
"resource": ""
}
|
q7504
|
CSSParser.parse_pseudo_open
|
train
|
def parse_pseudo_open(self, sel, name, has_selector, iselector, index):
"""Parse pseudo with opening bracket."""
flags = FLG_PSEUDO | FLG_OPEN
if name == ':not':
flags |= FLG_NOT
if name == ':has':
flags |= FLG_RELATIVE
sel.selectors.append(self.parse_selectors(iselector, index, flags))
has_selector = True
return has_selector
|
python
|
{
"resource": ""
}
|
q7505
|
CSSParser.parse_class_id
|
train
|
def parse_class_id(self, sel, m, has_selector):
"""Parse HTML classes and ids."""
selector = m.group(0)
if selector.startswith('.'):
sel.classes.append(css_unescape(selector[1:]))
else:
sel.ids.append(css_unescape(selector[1:]))
has_selector = True
return has_selector
|
python
|
{
"resource": ""
}
|
q7506
|
CSSParser.parse_pseudo_contains
|
train
|
def parse_pseudo_contains(self, sel, m, has_selector):
"""Parse contains."""
values = m.group('values')
patterns = []
for token in RE_VALUES.finditer(values):
if token.group('split'):
continue
value = token.group('value')
if value.startswith(("'", '"')):
value = css_unescape(value[1:-1], True)
else:
value = css_unescape(value)
patterns.append(value)
sel.contains.append(ct.SelectorContains(tuple(patterns)))
has_selector = True
return has_selector
|
python
|
{
"resource": ""
}
|
q7507
|
CSSParser.parse_pseudo_lang
|
train
|
def parse_pseudo_lang(self, sel, m, has_selector):
"""Parse pseudo language."""
values = m.group('values')
patterns = []
for token in RE_VALUES.finditer(values):
if token.group('split'):
continue
value = token.group('value')
if value.startswith(('"', "'")):
parts = css_unescape(value[1:-1], True).split('-')
else:
parts = css_unescape(value).split('-')
new_parts = []
first = True
for part in parts:
if part == '*' and first:
new_parts.append('(?!x\b)[a-z0-9]+?')
elif part != '*':
new_parts.append(('' if first else '(-(?!x\b)[a-z0-9]+)*?\\-') + re.escape(part))
if first:
first = False
patterns.append(re.compile(r'^{}(?:-.*)?$'.format(''.join(new_parts)), re.I))
sel.lang.append(ct.SelectorLang(patterns))
has_selector = True
return has_selector
|
python
|
{
"resource": ""
}
|
q7508
|
CSSParser.parse_pseudo_dir
|
train
|
def parse_pseudo_dir(self, sel, m, has_selector):
"""Parse pseudo direction."""
value = ct.SEL_DIR_LTR if util.lower(m.group('dir')) == 'ltr' else ct.SEL_DIR_RTL
sel.flags |= value
has_selector = True
return has_selector
|
python
|
{
"resource": ""
}
|
q7509
|
CSSParser.selector_iter
|
train
|
def selector_iter(self, pattern):
"""Iterate selector tokens."""
# Ignore whitespace and comments at start and end of pattern
m = RE_WS_BEGIN.search(pattern)
index = m.end(0) if m else 0
m = RE_WS_END.search(pattern)
end = (m.start(0) - 1) if m else (len(pattern) - 1)
if self.debug: # pragma: no cover
if self.quirks:
print('## QUIRKS MODE: Throwing out the spec!')
print('## PARSING: {!r}'.format(pattern))
while index <= end:
m = None
for v in self.css_tokens:
if not v.enabled(self.flags): # pragma: no cover
continue
m = v.match(pattern, index)
if m:
name = v.get_name()
if self.debug: # pragma: no cover
print("TOKEN: '{}' --> {!r} at position {}".format(name, m.group(0), m.start(0)))
index = m.end(0)
yield name, m
break
if m is None:
c = pattern[index]
# If the character represents the start of one of the known selector types,
# throw an exception mentioning that the known selector type is in error;
# otherwise, report the invalid character.
if c == '[':
msg = "Malformed attribute selector at position {}".format(index)
elif c == '.':
msg = "Malformed class selector at position {}".format(index)
elif c == '#':
msg = "Malformed id selector at position {}".format(index)
elif c == ':':
msg = "Malformed pseudo-class selector at position {}".format(index)
else:
msg = "Invalid character {!r} position {}".format(c, index)
raise SelectorSyntaxError(msg, self.pattern, index)
if self.debug: # pragma: no cover
print('## END PARSING')
|
python
|
{
"resource": ""
}
|
q7510
|
CSSParser.process_selectors
|
train
|
def process_selectors(self, index=0, flags=0):
"""
Process selectors.
We do our own selectors as BeautifulSoup4 has some annoying quirks,
and we don't really need to do nth selectors or siblings or
descendants etc.
"""
return self.parse_selectors(self.selector_iter(self.pattern), index, flags)
|
python
|
{
"resource": ""
}
|
q7511
|
find_label
|
train
|
def find_label(label, label_color, label_description):
"""Find label."""
edit = None
for name, values in label_list.items():
color, description = values
if isinstance(name, tuple):
old_name = name[0]
new_name = name[1]
else:
old_name = name
new_name = name
if label.lower() == old_name.lower():
edit = LabelEdit(old_name, new_name, color, description)
break
return edit
|
python
|
{
"resource": ""
}
|
q7512
|
update_labels
|
train
|
def update_labels(repo):
"""Update labels."""
updated = set()
for label in repo.get_labels():
edit = find_label(label.name, label.color, label.description)
if edit is not None:
print(' Updating {}: #{} "{}"'.format(edit.new, edit.color, edit.description))
label.edit(edit.new, edit.color, edit.description)
updated.add(edit.old)
updated.add(edit.new)
else:
if DELETE_UNSPECIFIED:
print(' Deleting {}: #{} "{}"'.format(label.name, label.color, label.description))
label.delete()
else:
print(' Skipping {}: #{} "{}"'.format(label.name, label.color, label.description))
updated.add(label.name)
for name, values in label_list.items():
color, description = values
if isinstance(name, tuple):
new_name = name[1]
else:
new_name = name
if new_name not in updated:
print(' Creating {}: #{} "{}"'.format(new_name, color, description))
repo.create_label(new_name, color, description)
|
python
|
{
"resource": ""
}
|
q7513
|
get_auth
|
train
|
def get_auth():
"""Get authentication."""
import getpass
user = input("User Name: ") # noqa
pswd = getpass.getpass('Password: ')
return Github(user, pswd)
|
python
|
{
"resource": ""
}
|
q7514
|
parse_version
|
train
|
def parse_version(ver, pre=False):
"""Parse version into a comparable Version tuple."""
m = RE_VER.match(ver)
# Handle major, minor, micro
major = int(m.group('major'))
minor = int(m.group('minor')) if m.group('minor') else 0
micro = int(m.group('micro')) if m.group('micro') else 0
# Handle pre releases
if m.group('type'):
release = PRE_REL_MAP[m.group('type')]
pre = int(m.group('pre'))
else:
release = "final"
pre = 0
# Handle development releases
dev = m.group('dev') if m.group('dev') else 0
if m.group('dev'):
dev = int(m.group('dev'))
release = '.dev-' + release if pre else '.dev'
else:
dev = 0
# Handle post
post = int(m.group('post')) if m.group('post') else 0
return Version(major, minor, micro, release, pre, post, dev)
|
python
|
{
"resource": ""
}
|
q7515
|
Version._get_canonical
|
train
|
def _get_canonical(self):
"""Get the canonical output string."""
# Assemble major, minor, micro version and append `pre`, `post`, or `dev` if needed..
if self.micro == 0:
ver = "{}.{}".format(self.major, self.minor)
else:
ver = "{}.{}.{}".format(self.major, self.minor, self.micro)
if self._is_pre():
ver += '{}{}'.format(REL_MAP[self.release], self.pre)
if self._is_post():
ver += ".post{}".format(self.post)
if self._is_dev():
ver += ".dev{}".format(self.dev)
return ver
|
python
|
{
"resource": ""
}
|
q7516
|
Document.assert_valid_input
|
train
|
def assert_valid_input(cls, tag):
"""Check if valid input tag or document."""
# Fail on unexpected types.
if not cls.is_tag(tag):
raise TypeError("Expected a BeautifulSoup 'Tag', but instead recieved type {}".format(type(tag)))
|
python
|
{
"resource": ""
}
|
q7517
|
Document.is_special_string
|
train
|
def is_special_string(obj):
"""Is special string."""
import bs4
return isinstance(obj, (bs4.Comment, bs4.Declaration, bs4.CData, bs4.ProcessingInstruction))
|
python
|
{
"resource": ""
}
|
q7518
|
Document.is_iframe
|
train
|
def is_iframe(self, el):
"""Check if element is an `iframe`."""
return ((el.name if self.is_xml_tree(el) else util.lower(el.name)) == 'iframe') and self.is_html_tag(el)
|
python
|
{
"resource": ""
}
|
q7519
|
Document.is_root
|
train
|
def is_root(self, el):
"""
Return whether element is a root element.
We check that the element is the root of the tree (which we have already pre-calculated),
and we check if it is the root element under an `iframe`.
"""
root = self.root and self.root is el
if not root:
parent = self.get_parent(el)
root = parent is not None and self.is_html and self.is_iframe(parent)
return root
|
python
|
{
"resource": ""
}
|
q7520
|
Document.get_contents
|
train
|
def get_contents(self, el, no_iframe=False):
"""Get contents or contents in reverse."""
if not no_iframe or not self.is_iframe(el):
for content in el.contents:
yield content
|
python
|
{
"resource": ""
}
|
q7521
|
Document.get_descendants
|
train
|
def get_descendants(self, el, tags=True, no_iframe=False):
"""Get descendants."""
if not no_iframe or not self.is_iframe(el):
next_good = None
for child in el.descendants:
if next_good is not None:
if child is not next_good:
continue
next_good = None
is_tag = self.is_tag(child)
if no_iframe and is_tag and self.is_iframe(child):
if child.next_sibling is not None:
next_good = child.next_sibling
else:
last_child = child
while self.is_tag(last_child) and last_child.contents:
last_child = last_child.contents[-1]
next_good = last_child.next_element
yield child
if next_good is None:
break
# Coverage isn't seeing this even though it's executed
continue # pragma: no cover
if not tags or is_tag:
yield child
|
python
|
{
"resource": ""
}
|
q7522
|
Document.get_parent
|
train
|
def get_parent(self, el, no_iframe=False):
"""Get parent."""
parent = el.parent
if no_iframe and parent is not None and self.is_iframe(parent):
parent = None
return parent
|
python
|
{
"resource": ""
}
|
q7523
|
Document.get_next_tag
|
train
|
def get_next_tag(cls, el):
"""Get next sibling tag."""
sibling = el.next_sibling
while not cls.is_tag(sibling) and sibling is not None:
sibling = sibling.next_sibling
return sibling
|
python
|
{
"resource": ""
}
|
q7524
|
Document.get_previous_tag
|
train
|
def get_previous_tag(cls, el):
"""Get previous sibling tag."""
sibling = el.previous_sibling
while not cls.is_tag(sibling) and sibling is not None:
sibling = sibling.previous_sibling
return sibling
|
python
|
{
"resource": ""
}
|
q7525
|
Document.get_attribute_by_name
|
train
|
def get_attribute_by_name(el, name, default=None):
"""Get attribute by name."""
value = default
if el._is_xml:
try:
value = el.attrs[name]
except KeyError:
pass
else:
for k, v in el.attrs.items():
if util.lower(k) == name:
value = v
break
return value
|
python
|
{
"resource": ""
}
|
q7526
|
Document.get_text
|
train
|
def get_text(self, el, no_iframe=False):
"""Get text."""
return ''.join(
[node for node in self.get_descendants(el, tags=False, no_iframe=no_iframe) if self.is_content_string(node)]
)
|
python
|
{
"resource": ""
}
|
q7527
|
Inputs.validate_day
|
train
|
def validate_day(year, month, day):
"""Validate day."""
max_days = LONG_MONTH
if month == FEB:
max_days = FEB_LEAP_MONTH if ((year % 4 == 0) and (year % 100 != 0)) or (year % 400 == 0) else FEB_MONTH
elif month in MONTHS_30:
max_days = SHORT_MONTH
return 1 <= day <= max_days
|
python
|
{
"resource": ""
}
|
q7528
|
Inputs.validate_week
|
train
|
def validate_week(year, week):
"""Validate week."""
max_week = datetime.strptime("{}-{}-{}".format(12, 31, year), "%m-%d-%Y").isocalendar()[1]
if max_week == 1:
max_week = 53
return 1 <= week <= max_week
|
python
|
{
"resource": ""
}
|
q7529
|
Inputs.parse_value
|
train
|
def parse_value(cls, itype, value):
"""Parse the input value."""
parsed = None
if itype == "date":
m = RE_DATE.match(value)
if m:
year = int(m.group('year'), 10)
month = int(m.group('month'), 10)
day = int(m.group('day'), 10)
if cls.validate_year(year) and cls.validate_month(month) and cls.validate_day(year, month, day):
parsed = (year, month, day)
elif itype == "month":
m = RE_MONTH.match(value)
if m:
year = int(m.group('year'), 10)
month = int(m.group('month'), 10)
if cls.validate_year(year) and cls.validate_month(month):
parsed = (year, month)
elif itype == "week":
m = RE_WEEK.match(value)
if m:
year = int(m.group('year'), 10)
week = int(m.group('week'), 10)
if cls.validate_year(year) and cls.validate_week(year, week):
parsed = (year, week)
elif itype == "time":
m = RE_TIME.match(value)
if m:
hour = int(m.group('hour'), 10)
minutes = int(m.group('minutes'), 10)
if cls.validate_hour(hour) and cls.validate_minutes(minutes):
parsed = (hour, minutes)
elif itype == "datetime-local":
m = RE_DATETIME.match(value)
if m:
year = int(m.group('year'), 10)
month = int(m.group('month'), 10)
day = int(m.group('day'), 10)
hour = int(m.group('hour'), 10)
minutes = int(m.group('minutes'), 10)
if (
cls.validate_year(year) and cls.validate_month(month) and cls.validate_day(year, month, day) and
cls.validate_hour(hour) and cls.validate_minutes(minutes)
):
parsed = (year, month, day, hour, minutes)
elif itype in ("number", "range"):
m = RE_NUM.match(value)
if m:
parsed = float(m.group('value'))
return parsed
|
python
|
{
"resource": ""
}
|
q7530
|
CSSMatch.get_tag_ns
|
train
|
def get_tag_ns(self, el):
"""Get tag namespace."""
if self.supports_namespaces():
namespace = ''
ns = el.namespace
if ns:
namespace = ns
else:
namespace = NS_XHTML
return namespace
|
python
|
{
"resource": ""
}
|
q7531
|
CSSMatch.get_tag
|
train
|
def get_tag(self, el):
"""Get tag."""
name = self.get_tag_name(el)
return util.lower(name) if name is not None and not self.is_xml else name
|
python
|
{
"resource": ""
}
|
q7532
|
CSSMatch.get_prefix
|
train
|
def get_prefix(self, el):
"""Get prefix."""
prefix = self.get_prefix_name(el)
return util.lower(prefix) if prefix is not None and not self.is_xml else prefix
|
python
|
{
"resource": ""
}
|
q7533
|
CSSMatch.find_bidi
|
train
|
def find_bidi(self, el):
"""Get directionality from element text."""
for node in self.get_children(el, tags=False):
# Analyze child text nodes
if self.is_tag(node):
# Avoid analyzing certain elements specified in the specification.
direction = DIR_MAP.get(util.lower(self.get_attribute_by_name(node, 'dir', '')), None)
if (
self.get_tag(node) in ('bdi', 'script', 'style', 'textarea', 'iframe') or
not self.is_html_tag(node) or
direction is not None
):
continue # pragma: no cover
# Check directionality of this node's text
value = self.find_bidi(node)
if value is not None:
return value
# Direction could not be determined
continue # pragma: no cover
# Skip `doctype` comments, etc.
if self.is_special_string(node):
continue
# Analyze text nodes for directionality.
for c in node:
bidi = unicodedata.bidirectional(c)
if bidi in ('AL', 'R', 'L'):
return ct.SEL_DIR_LTR if bidi == 'L' else ct.SEL_DIR_RTL
return None
|
python
|
{
"resource": ""
}
|
q7534
|
CSSMatch.match_attribute_name
|
train
|
def match_attribute_name(self, el, attr, prefix):
"""Match attribute name and return value if it exists."""
value = None
if self.supports_namespaces():
value = None
# If we have not defined namespaces, we can't very well find them, so don't bother trying.
if prefix:
ns = self.namespaces.get(prefix)
if ns is None and prefix != '*':
return None
else:
ns = None
for k, v in self.iter_attributes(el):
# Get attribute parts
namespace, name = self.split_namespace(el, k)
# Can't match a prefix attribute as we haven't specified one to match
# Try to match it normally as a whole `p:a` as selector may be trying `p\:a`.
if ns is None:
if (self.is_xml and attr == k) or (not self.is_xml and util.lower(attr) == util.lower(k)):
value = v
break
# Coverage is not finding this even though it is executed.
# Adding a print statement before this (and erasing coverage) causes coverage to find the line.
# Ignore the false positive message.
continue # pragma: no cover
# We can't match our desired prefix attribute as the attribute doesn't have a prefix
if namespace is None or ns != namespace and prefix != '*':
continue
# The attribute doesn't match.
if (util.lower(attr) != util.lower(name)) if not self.is_xml else (attr != name):
continue
value = v
break
else:
for k, v in self.iter_attributes(el):
if util.lower(attr) != util.lower(k):
continue
value = v
break
return value
|
python
|
{
"resource": ""
}
|
q7535
|
CSSMatch.match_namespace
|
train
|
def match_namespace(self, el, tag):
"""Match the namespace of the element."""
match = True
namespace = self.get_tag_ns(el)
default_namespace = self.namespaces.get('')
tag_ns = '' if tag.prefix is None else self.namespaces.get(tag.prefix, None)
# We must match the default namespace if one is not provided
if tag.prefix is None and (default_namespace is not None and namespace != default_namespace):
match = False
# If we specified `|tag`, we must not have a namespace.
elif (tag.prefix is not None and tag.prefix == '' and namespace):
match = False
# Verify prefix matches
elif (
tag.prefix and
tag.prefix != '*' and (tag_ns is None or namespace != tag_ns)
):
match = False
return match
|
python
|
{
"resource": ""
}
|
q7536
|
CSSMatch.match_attributes
|
train
|
def match_attributes(self, el, attributes):
"""Match attributes."""
match = True
if attributes:
for a in attributes:
value = self.match_attribute_name(el, a.attribute, a.prefix)
pattern = a.xml_type_pattern if self.is_xml and a.xml_type_pattern else a.pattern
if isinstance(value, list):
value = ' '.join(value)
if value is None:
match = False
break
elif pattern is None:
continue
elif pattern.match(value) is None:
match = False
break
return match
|
python
|
{
"resource": ""
}
|
q7537
|
CSSMatch.match_tagname
|
train
|
def match_tagname(self, el, tag):
"""Match tag name."""
name = (util.lower(tag.name) if not self.is_xml and tag.name is not None else tag.name)
return not (
name is not None and
name not in (self.get_tag(el), '*')
)
|
python
|
{
"resource": ""
}
|
q7538
|
CSSMatch.match_tag
|
train
|
def match_tag(self, el, tag):
"""Match the tag."""
match = True
if tag is not None:
# Verify namespace
if not self.match_namespace(el, tag):
match = False
if not self.match_tagname(el, tag):
match = False
return match
|
python
|
{
"resource": ""
}
|
q7539
|
CSSMatch.match_past_relations
|
train
|
def match_past_relations(self, el, relation):
"""Match past relationship."""
found = False
if relation[0].rel_type == REL_PARENT:
parent = self.get_parent(el, no_iframe=self.iframe_restrict)
while not found and parent:
found = self.match_selectors(parent, relation)
parent = self.get_parent(parent, no_iframe=self.iframe_restrict)
elif relation[0].rel_type == REL_CLOSE_PARENT:
parent = self.get_parent(el, no_iframe=self.iframe_restrict)
if parent:
found = self.match_selectors(parent, relation)
elif relation[0].rel_type == REL_SIBLING:
sibling = self.get_previous_tag(el)
while not found and sibling:
found = self.match_selectors(sibling, relation)
sibling = self.get_previous_tag(sibling)
elif relation[0].rel_type == REL_CLOSE_SIBLING:
sibling = self.get_previous_tag(el)
if sibling and self.is_tag(sibling):
found = self.match_selectors(sibling, relation)
return found
|
python
|
{
"resource": ""
}
|
q7540
|
CSSMatch.match_future_child
|
train
|
def match_future_child(self, parent, relation, recursive=False):
"""Match future child."""
match = False
children = self.get_descendants if recursive else self.get_children
for child in children(parent, no_iframe=self.iframe_restrict):
match = self.match_selectors(child, relation)
if match:
break
return match
|
python
|
{
"resource": ""
}
|
q7541
|
CSSMatch.match_future_relations
|
train
|
def match_future_relations(self, el, relation):
"""Match future relationship."""
found = False
if relation[0].rel_type == REL_HAS_PARENT:
found = self.match_future_child(el, relation, True)
elif relation[0].rel_type == REL_HAS_CLOSE_PARENT:
found = self.match_future_child(el, relation)
elif relation[0].rel_type == REL_HAS_SIBLING:
sibling = self.get_next_tag(el)
while not found and sibling:
found = self.match_selectors(sibling, relation)
sibling = self.get_next_tag(sibling)
elif relation[0].rel_type == REL_HAS_CLOSE_SIBLING:
sibling = self.get_next_tag(el)
if sibling and self.is_tag(sibling):
found = self.match_selectors(sibling, relation)
return found
|
python
|
{
"resource": ""
}
|
q7542
|
CSSMatch.match_relations
|
train
|
def match_relations(self, el, relation):
"""Match relationship to other elements."""
found = False
if relation[0].rel_type.startswith(':'):
found = self.match_future_relations(el, relation)
else:
found = self.match_past_relations(el, relation)
return found
|
python
|
{
"resource": ""
}
|
q7543
|
CSSMatch.match_id
|
train
|
def match_id(self, el, ids):
"""Match element's ID."""
found = True
for i in ids:
if i != self.get_attribute_by_name(el, 'id', ''):
found = False
break
return found
|
python
|
{
"resource": ""
}
|
q7544
|
CSSMatch.match_classes
|
train
|
def match_classes(self, el, classes):
"""Match element's classes."""
current_classes = self.get_classes(el)
found = True
for c in classes:
if c not in current_classes:
found = False
break
return found
|
python
|
{
"resource": ""
}
|
q7545
|
CSSMatch.match_nth_tag_type
|
train
|
def match_nth_tag_type(self, el, child):
"""Match tag type for `nth` matches."""
return(
(self.get_tag(child) == self.get_tag(el)) and
(self.get_tag_ns(child) == self.get_tag_ns(el))
)
|
python
|
{
"resource": ""
}
|
q7546
|
CSSMatch.match_nth
|
train
|
def match_nth(self, el, nth):
"""Match `nth` elements."""
matched = True
for n in nth:
matched = False
if n.selectors and not self.match_selectors(el, n.selectors):
break
parent = self.get_parent(el)
if parent is None:
parent = self.create_fake_parent(el)
last = n.last
last_index = len(parent) - 1
index = last_index if last else 0
relative_index = 0
a = n.a
b = n.b
var = n.n
count = 0
count_incr = 1
factor = -1 if last else 1
idx = last_idx = a * count + b if var else a
# We can only adjust bounds within a variable index
if var:
# Abort if our nth index is out of bounds and only getting further out of bounds as we increment.
# Otherwise, increment to try to get in bounds.
adjust = None
while idx < 1 or idx > last_index:
if idx < 0:
diff_low = 0 - idx
if adjust is not None and adjust == 1:
break
adjust = -1
count += count_incr
idx = last_idx = a * count + b if var else a
diff = 0 - idx
if diff >= diff_low:
break
else:
diff_high = idx - last_index
if adjust is not None and adjust == -1:
break
adjust = 1
count += count_incr
idx = last_idx = a * count + b if var else a
diff = idx - last_index
if diff >= diff_high:
break
diff_high = diff
# If a < 0, our count is working backwards, so floor the index by increasing the count.
# Find the count that yields the lowest, in bound value and use that.
# Lastly reverse count increment so that we'll increase our index.
lowest = count
if a < 0:
while idx >= 1:
lowest = count
count += count_incr
idx = last_idx = a * count + b if var else a
count_incr = -1
count = lowest
idx = last_idx = a * count + b if var else a
# Evaluate elements while our calculated nth index is still in range
while 1 <= idx <= last_index + 1:
child = None
# Evaluate while our child index is still in range.
for child in self.get_children(parent, start=index, reverse=factor < 0, tags=False):
index += factor
if not self.is_tag(child):
continue
# Handle `of S` in `nth-child`
if n.selectors and not self.match_selectors(child, n.selectors):
continue
# Handle `of-type`
if n.of_type and not self.match_nth_tag_type(el, child):
continue
relative_index += 1
if relative_index == idx:
if child is el:
matched = True
else:
break
if child is el:
break
if child is el:
break
last_idx = idx
count += count_incr
if count < 0:
# Count is counting down and has now ventured into invalid territory.
break
idx = a * count + b if var else a
if last_idx == idx:
break
if not matched:
break
return matched
|
python
|
{
"resource": ""
}
|
q7547
|
CSSMatch.match_subselectors
|
train
|
def match_subselectors(self, el, selectors):
"""Match selectors."""
match = True
for sel in selectors:
if not self.match_selectors(el, sel):
match = False
return match
|
python
|
{
"resource": ""
}
|
q7548
|
CSSMatch.match_contains
|
train
|
def match_contains(self, el, contains):
"""Match element if it contains text."""
match = True
content = None
for contain_list in contains:
if content is None:
content = self.get_text(el, no_iframe=self.is_html)
found = False
for text in contain_list.text:
if text in content:
found = True
break
if not found:
match = False
return match
|
python
|
{
"resource": ""
}
|
q7549
|
CSSMatch.match_lang
|
train
|
def match_lang(self, el, langs):
"""Match languages."""
match = False
has_ns = self.supports_namespaces()
root = self.root
has_html_namespace = self.has_html_namespace
# Walk parents looking for `lang` (HTML) or `xml:lang` XML property.
parent = el
found_lang = None
last = None
while not found_lang:
has_html_ns = self.has_html_ns(parent)
for k, v in self.iter_attributes(parent):
attr_ns, attr = self.split_namespace(parent, k)
if (
((not has_ns or has_html_ns) and (util.lower(k) if not self.is_xml else k) == 'lang') or
(
has_ns and not has_html_ns and attr_ns == NS_XML and
(util.lower(attr) if not self.is_xml and attr is not None else attr) == 'lang'
)
):
found_lang = v
break
last = parent
parent = self.get_parent(parent, no_iframe=self.is_html)
if parent is None:
root = last
has_html_namespace = self.has_html_ns(root)
parent = last
break
# Use cached meta language.
if not found_lang and self.cached_meta_lang:
for cache in self.cached_meta_lang:
if root is cache[0]:
found_lang = cache[1]
# If we couldn't find a language, and the document is HTML, look to meta to determine language.
if found_lang is None and (not self.is_xml or (has_html_namespace and root.name == 'html')):
# Find head
found = False
for tag in ('html', 'head'):
found = False
for child in self.get_children(parent, no_iframe=self.is_html):
if self.get_tag(child) == tag and self.is_html_tag(child):
found = True
parent = child
break
if not found: # pragma: no cover
break
# Search meta tags
if found:
for child in parent:
if self.is_tag(child) and self.get_tag(child) == 'meta' and self.is_html_tag(parent):
c_lang = False
content = None
for k, v in self.iter_attributes(child):
if util.lower(k) == 'http-equiv' and util.lower(v) == 'content-language':
c_lang = True
if util.lower(k) == 'content':
content = v
if c_lang and content:
found_lang = content
self.cached_meta_lang.append((root, found_lang))
break
if found_lang:
break
if not found_lang:
self.cached_meta_lang.append((root, False))
# If we determined a language, compare.
if found_lang:
for patterns in langs:
match = False
for pattern in patterns:
if pattern.match(found_lang):
match = True
if not match:
break
return match
|
python
|
{
"resource": ""
}
|
q7550
|
CSSMatch.match_dir
|
train
|
def match_dir(self, el, directionality):
"""Check directionality."""
# If we have to match both left and right, we can't match either.
if directionality & ct.SEL_DIR_LTR and directionality & ct.SEL_DIR_RTL:
return False
if el is None or not self.is_html_tag(el):
return False
# Element has defined direction of left to right or right to left
direction = DIR_MAP.get(util.lower(self.get_attribute_by_name(el, 'dir', '')), None)
if direction not in (None, 0):
return direction == directionality
# Element is the document element (the root) and no direction assigned, assume left to right.
is_root = self.is_root(el)
if is_root and direction is None:
return ct.SEL_DIR_LTR == directionality
# If `input[type=telephone]` and no direction is assigned, assume left to right.
name = self.get_tag(el)
is_input = name == 'input'
is_textarea = name == 'textarea'
is_bdi = name == 'bdi'
itype = util.lower(self.get_attribute_by_name(el, 'type', '')) if is_input else ''
if is_input and itype == 'tel' and direction is None:
return ct.SEL_DIR_LTR == directionality
# Auto handling for text inputs
if ((is_input and itype in ('text', 'search', 'tel', 'url', 'email')) or is_textarea) and direction == 0:
if is_textarea:
value = []
for node in self.get_contents(el, no_iframe=True):
if self.is_content_string(node):
value.append(node)
value = ''.join(value)
else:
value = self.get_attribute_by_name(el, 'value', '')
if value:
for c in value:
bidi = unicodedata.bidirectional(c)
if bidi in ('AL', 'R', 'L'):
direction = ct.SEL_DIR_LTR if bidi == 'L' else ct.SEL_DIR_RTL
return direction == directionality
# Assume left to right
return ct.SEL_DIR_LTR == directionality
elif is_root:
return ct.SEL_DIR_LTR == directionality
return self.match_dir(self.get_parent(el, no_iframe=True), directionality)
# Auto handling for `bdi` and other non text inputs.
if (is_bdi and direction is None) or direction == 0:
direction = self.find_bidi(el)
if direction is not None:
return direction == directionality
elif is_root:
return ct.SEL_DIR_LTR == directionality
return self.match_dir(self.get_parent(el, no_iframe=True), directionality)
# Match parents direction
return self.match_dir(self.get_parent(el, no_iframe=True), directionality)
|
python
|
{
"resource": ""
}
|
q7551
|
CSSMatch.match_range
|
train
|
def match_range(self, el, condition):
"""
Match range.
Behavior is modeled after what we see in browsers. Browsers seem to evaluate
if the value is out of range, and if not, it is in range. So a missing value
will not evaluate out of range; therefore, value is in range. Personally, I
feel like this should evaluate as neither in or out of range.
"""
out_of_range = False
itype = self.get_attribute_by_name(el, 'type').lower()
mn = self.get_attribute_by_name(el, 'min', None)
if mn is not None:
mn = Inputs.parse_value(itype, mn)
mx = self.get_attribute_by_name(el, 'max', None)
if mx is not None:
mx = Inputs.parse_value(itype, mx)
# There is no valid min or max, so we cannot evaluate a range
if mn is None and mx is None:
return False
value = self.get_attribute_by_name(el, 'value', None)
if value is not None:
value = Inputs.parse_value(itype, value)
if value is not None:
if itype in ("date", "datetime-local", "month", "week", "number", "range"):
if mn is not None and value < mn:
out_of_range = True
if not out_of_range and mx is not None and value > mx:
out_of_range = True
elif itype == "time":
if mn is not None and mx is not None and mn > mx:
# Time is periodic, so this is a reversed/discontinuous range
if value < mn and value > mx:
out_of_range = True
else:
if mn is not None and value < mn:
out_of_range = True
if not out_of_range and mx is not None and value > mx:
out_of_range = True
return not out_of_range if condition & ct.SEL_IN_RANGE else out_of_range
|
python
|
{
"resource": ""
}
|
q7552
|
CSSMatch.match_defined
|
train
|
def match_defined(self, el):
"""
Match defined.
`:defined` is related to custom elements in a browser.
- If the document is XML (not XHTML), all tags will match.
- Tags that are not custom (don't have a hyphen) are marked defined.
- If the tag has a prefix (without or without a namespace), it will not match.
This is of course requires the parser to provide us with the proper prefix and namespace info,
if it doesn't, there is nothing we can do.
"""
name = self.get_tag(el)
return (
name.find('-') == -1 or
name.find(':') != -1 or
self.get_prefix(el) is not None
)
|
python
|
{
"resource": ""
}
|
q7553
|
CSSMatch.match_selectors
|
train
|
def match_selectors(self, el, selectors):
"""Check if element matches one of the selectors."""
match = False
is_not = selectors.is_not
is_html = selectors.is_html
# Internal selector lists that use the HTML flag, will automatically get the `html` namespace.
if is_html:
namespaces = self.namespaces
iframe_restrict = self.iframe_restrict
self.namespaces = {'html': NS_XHTML}
self.iframe_restrict = True
if not is_html or self.is_html:
for selector in selectors:
match = is_not
# We have a un-matchable situation (like `:focus` as you can focus an element in this environment)
if isinstance(selector, ct.SelectorNull):
continue
# Verify tag matches
if not self.match_tag(el, selector.tag):
continue
# Verify tag is defined
if selector.flags & ct.SEL_DEFINED and not self.match_defined(el):
continue
# Verify element is root
if selector.flags & ct.SEL_ROOT and not self.match_root(el):
continue
# Verify element is scope
if selector.flags & ct.SEL_SCOPE and not self.match_scope(el):
continue
# Verify `nth` matches
if not self.match_nth(el, selector.nth):
continue
if selector.flags & ct.SEL_EMPTY and not self.match_empty(el):
continue
# Verify id matches
if selector.ids and not self.match_id(el, selector.ids):
continue
# Verify classes match
if selector.classes and not self.match_classes(el, selector.classes):
continue
# Verify attribute(s) match
if not self.match_attributes(el, selector.attributes):
continue
# Verify ranges
if selector.flags & RANGES and not self.match_range(el, selector.flags & RANGES):
continue
# Verify language patterns
if selector.lang and not self.match_lang(el, selector.lang):
continue
# Verify pseudo selector patterns
if selector.selectors and not self.match_subselectors(el, selector.selectors):
continue
# Verify relationship selectors
if selector.relation and not self.match_relations(el, selector.relation):
continue
# Validate that the current default selector match corresponds to the first submit button in the form
if selector.flags & ct.SEL_DEFAULT and not self.match_default(el):
continue
# Validate that the unset radio button is among radio buttons with the same name in a form that are
# also not set.
if selector.flags & ct.SEL_INDETERMINATE and not self.match_indeterminate(el):
continue
# Validate element directionality
if selector.flags & DIR_FLAGS and not self.match_dir(el, selector.flags & DIR_FLAGS):
continue
# Validate that the tag contains the specified text.
if not self.match_contains(el, selector.contains):
continue
match = not is_not
break
# Restore actual namespaces being used for external selector lists
if is_html:
self.namespaces = namespaces
self.iframe_restrict = iframe_restrict
return match
|
python
|
{
"resource": ""
}
|
q7554
|
CSSMatch.select
|
train
|
def select(self, limit=0):
"""Match all tags under the targeted tag."""
if limit < 1:
limit = None
for child in self.get_descendants(self.tag):
if self.match(child):
yield child
if limit is not None:
limit -= 1
if limit < 1:
break
|
python
|
{
"resource": ""
}
|
q7555
|
CSSMatch.filter
|
train
|
def filter(self): # noqa A001
"""Filter tag's children."""
return [tag for tag in self.get_contents(self.tag) if not self.is_navigable_string(tag) and self.match(tag)]
|
python
|
{
"resource": ""
}
|
q7556
|
SoupSieve.icomments
|
train
|
def icomments(self, tag, limit=0):
"""Iterate comments only."""
for comment in CommentsMatch(tag).get_comments(limit):
yield comment
|
python
|
{
"resource": ""
}
|
q7557
|
uord
|
train
|
def uord(c):
"""Get Unicode ordinal."""
if len(c) == 2: # pragma: no cover
high, low = [ord(p) for p in c]
ordinal = (high - 0xD800) * 0x400 + low - 0xDC00 + 0x10000
else:
ordinal = ord(c)
return ordinal
|
python
|
{
"resource": ""
}
|
q7558
|
warn_deprecated
|
train
|
def warn_deprecated(message, stacklevel=2): # pragma: no cover
"""Warn deprecated."""
warnings.warn(
message,
category=DeprecationWarning,
stacklevel=stacklevel
)
|
python
|
{
"resource": ""
}
|
q7559
|
get_pattern_context
|
train
|
def get_pattern_context(pattern, index):
"""Get the pattern context."""
last = 0
current_line = 1
col = 1
text = []
line = 1
# Split pattern by newline and handle the text before the newline
for m in RE_PATTERN_LINE_SPLIT.finditer(pattern):
linetext = pattern[last:m.start(0)]
if not len(m.group(0)) and not len(text):
indent = ''
offset = -1
col = index - last + 1
elif last <= index < m.end(0):
indent = '--> '
offset = (-1 if index > m.start(0) else 0) + 3
col = index - last + 1
else:
indent = ' '
offset = None
if len(text):
# Regardless of whether we are presented with `\r\n`, `\r`, or `\n`,
# we will render the output with just `\n`. We will still log the column
# correctly though.
text.append('\n')
text.append('{}{}'.format(indent, linetext))
if offset is not None:
text.append('\n')
text.append(' ' * (col + offset) + '^')
line = current_line
current_line += 1
last = m.end(0)
return ''.join(text), line, col
|
python
|
{
"resource": ""
}
|
q7560
|
warn_quirks
|
train
|
def warn_quirks(message, recommend, pattern, index):
"""Warn quirks."""
import traceback
import bs4 # noqa: F401
# Acquire source code line context
paths = (MODULE, sys.modules['bs4'].__path__[0])
tb = traceback.extract_stack()
previous = None
filename = None
lineno = None
for entry in tb:
if (PY35 and entry.filename.startswith(paths)) or (not PY35 and entry[0].startswith(paths)):
break
previous = entry
if previous:
filename = previous.filename if PY35 else previous[0]
lineno = previous.lineno if PY35 else previous[1]
# Format pattern to show line and column position
context, line = get_pattern_context(pattern, index)[0:2]
# Display warning
warnings.warn_explicit(
"\nCSS selector pattern:\n" +
" {}\n".format(message) +
" This behavior is only allowed temporarily for Beautiful Soup's transition to Soup Sieve.\n" +
" In order to confrom to the CSS spec, {}\n".format(recommend) +
" It is strongly recommended the selector be altered to conform to the CSS spec " +
"as an exception will be raised for this case in the future.\n" +
"pattern line {}:\n{}".format(line, context),
QuirksWarning,
filename,
lineno
)
|
python
|
{
"resource": ""
}
|
q7561
|
compile
|
train
|
def compile(pattern, namespaces=None, flags=0, **kwargs): # noqa: A001
"""Compile CSS pattern."""
if namespaces is not None:
namespaces = ct.Namespaces(**namespaces)
custom = kwargs.get('custom')
if custom is not None:
custom = ct.CustomSelectors(**custom)
if isinstance(pattern, SoupSieve):
if flags:
raise ValueError("Cannot process 'flags' argument on a compiled selector list")
elif namespaces is not None:
raise ValueError("Cannot process 'namespaces' argument on a compiled selector list")
elif custom is not None:
raise ValueError("Cannot process 'custom' argument on a compiled selector list")
return pattern
return cp._cached_css_compile(pattern, namespaces, custom, flags)
|
python
|
{
"resource": ""
}
|
q7562
|
match
|
train
|
def match(select, tag, namespaces=None, flags=0, **kwargs):
"""Match node."""
return compile(select, namespaces, flags, **kwargs).match(tag)
|
python
|
{
"resource": ""
}
|
q7563
|
filter
|
train
|
def filter(select, iterable, namespaces=None, flags=0, **kwargs): # noqa: A001
"""Filter list of nodes."""
return compile(select, namespaces, flags, **kwargs).filter(iterable)
|
python
|
{
"resource": ""
}
|
q7564
|
select
|
train
|
def select(select, tag, namespaces=None, limit=0, flags=0, **kwargs):
"""Select the specified tags."""
return compile(select, namespaces, flags, **kwargs).select(tag, limit)
|
python
|
{
"resource": ""
}
|
q7565
|
ListableApiResource.all
|
train
|
def all(cls, connection=None, **params):
"""
Returns first page if no params passed in as a list.
"""
request = cls._make_request('GET', cls._get_all_path(), connection, params=params)
return cls._create_object(request, connection=connection)
|
python
|
{
"resource": ""
}
|
q7566
|
ListableApiResource.iterall
|
train
|
def iterall(cls, connection=None, **kwargs):
"""
Returns a autopaging generator that yields each object returned one by one.
"""
try:
limit = kwargs['limit']
except KeyError:
limit = None
try:
page = kwargs['page']
except KeyError:
page = None
def _all_responses():
page = 1 # one based
params = kwargs.copy()
while True:
params.update(page=page, limit=250)
rsp = cls._make_request('GET', cls._get_all_path(), connection, params=params)
if rsp:
yield rsp
page += 1
else:
yield [] # needed for case where there is no objects
break
if not (limit or page):
for rsp in _all_responses():
for obj in rsp:
yield cls._create_object(obj, connection=connection)
else:
response = cls._make_request('GET', cls._get_all_path(), connection, params=kwargs)
for obj in cls._create_object(response, connection=connection):
yield obj
|
python
|
{
"resource": ""
}
|
q7567
|
Connection.update
|
train
|
def update(self, resource, rid, updates):
"""
Updates the resource with id 'rid' with the given updates dictionary.
"""
if resource[-1] != '/':
resource += '/'
resource += str(rid)
return self.put(resource, data=updates)
|
python
|
{
"resource": ""
}
|
q7568
|
Connection.delete
|
train
|
def delete(self, resource, rid=None): # note that rid can't be 0 - problem?
"""
Deletes the resource with given id 'rid', or all resources of given type if rid is not supplied.
"""
if rid:
if resource[-1] != '/':
resource += '/'
resource += str(rid)
response = self._run_method('DELETE', resource)
return self._handle_response(resource, response, suppress_empty=True)
|
python
|
{
"resource": ""
}
|
q7569
|
Connection.put
|
train
|
def put(self, url, data):
"""
Make a PUT request to save data.
data should be a dictionary.
"""
response = self._run_method('PUT', url, data=data)
log.debug("OUTPUT: %s" % response.content)
return self._handle_response(url, response)
|
python
|
{
"resource": ""
}
|
q7570
|
Connection.post
|
train
|
def post(self, url, data, headers={}):
"""
POST request for creating new objects.
data should be a dictionary.
"""
response = self._run_method('POST', url, data=data, headers=headers)
return self._handle_response(url, response)
|
python
|
{
"resource": ""
}
|
q7571
|
Connection._handle_response
|
train
|
def _handle_response(self, url, res, suppress_empty=True):
"""
Returns parsed JSON or raises an exception appropriately.
"""
self._last_response = res
result = {}
if res.status_code in (200, 201, 202):
try:
result = res.json()
except Exception as e: # json might be invalid, or store might be down
e.message += " (_handle_response failed to decode JSON: " + str(res.content) + ")"
raise # TODO better exception
elif res.status_code == 204 and not suppress_empty:
raise EmptyResponseWarning("%d %s @ %s: %s" % (res.status_code, res.reason, url, res.content), res)
elif res.status_code >= 500:
raise ServerException("%d %s @ %s: %s" % (res.status_code, res.reason, url, res.content), res)
elif res.status_code == 429:
raise RateLimitingException("%d %s @ %s: %s" % (res.status_code, res.reason, url, res.content), res)
elif res.status_code >= 400:
raise ClientRequestException("%d %s @ %s: %s" % (res.status_code, res.reason, url, res.content), res)
elif res.status_code >= 300:
raise RedirectionException("%d %s @ %s: %s" % (res.status_code, res.reason, url, res.content), res)
return result
|
python
|
{
"resource": ""
}
|
q7572
|
OAuthConnection.fetch_token
|
train
|
def fetch_token(self, client_secret, code, context, scope, redirect_uri,
token_url='https://login.bigcommerce.com/oauth2/token'):
"""
Fetches a token from given token_url, using given parameters, and sets up session headers for
future requests.
redirect_uri should be the same as your callback URL.
code, context, and scope should be passed as parameters to your callback URL on app installation.
Raises HttpException on failure (same as Connection methods).
"""
res = self.post(token_url, {'client_id': self.client_id,
'client_secret': client_secret,
'code': code,
'context': context,
'scope': scope,
'grant_type': 'authorization_code',
'redirect_uri': redirect_uri},
headers={'Content-Type': 'application/x-www-form-urlencoded'})
self._session.headers.update(self._oauth_headers(self.client_id, res['access_token']))
return res
|
python
|
{
"resource": ""
}
|
q7573
|
OAuthConnection._handle_response
|
train
|
def _handle_response(self, url, res, suppress_empty=True):
"""
Adds rate limiting information on to the response object
"""
result = Connection._handle_response(self, url, res, suppress_empty)
if 'X-Rate-Limit-Time-Reset-Ms' in res.headers:
self.rate_limit = dict(ms_until_reset=int(res.headers['X-Rate-Limit-Time-Reset-Ms']),
window_size_ms=int(res.headers['X-Rate-Limit-Time-Window-Ms']),
requests_remaining=int(res.headers['X-Rate-Limit-Requests-Left']),
requests_quota=int(res.headers['X-Rate-Limit-Requests-Quota']))
if self.rate_limiting_management:
if self.rate_limiting_management['min_requests_remaining'] >= self.rate_limit['requests_remaining']:
if self.rate_limiting_management['wait']:
sleep(ceil(float(self.rate_limit['ms_until_reset']) / 1000))
if self.rate_limiting_management.get('callback_function'):
callback = self.rate_limiting_management['callback_function']
args_dict = self.rate_limiting_management.get('callback_args')
if args_dict:
callback(args_dict)
else:
callback()
return result
|
python
|
{
"resource": ""
}
|
q7574
|
escape_html
|
train
|
def escape_html(text, escape_slash=False):
"""
Binding for Hoedown's HTML escaping function.
The implementation is inspired by the OWASP XSS Prevention recommendations:
.. code-block:: none
& --> &
< --> <
> --> >
" --> "
' --> '
/ --> / when escape_slash is set to True
.. versionadded:: 2.1.0
"""
byte_str = text.encode('utf-8')
ob = lib.hoedown_buffer_new(OUNIT)
lib.hoedown_escape_html(ob, byte_str, len(byte_str), int(escape_slash))
try:
return to_string(ob)
finally:
lib.hoedown_buffer_free(ob)
|
python
|
{
"resource": ""
}
|
q7575
|
html
|
train
|
def html(text, extensions=0, render_flags=0):
"""
Convert markdown text to HTML.
``extensions`` can be a list or tuple of extensions (e.g.
``('fenced-code', 'footnotes', 'strikethrough')``) or an integer
(e.g. ``EXT_FENCED_CODE | EXT_FOOTNOTES | EXT_STRIKETHROUGH``).
``render_flags`` can be a list or tuple of flags (e.g.
``('skip-html', 'hard-wrap')``) or an integer
(e.g. ``HTML_SKIP_HTML | HTML_HARD_WRAP``).
"""
extensions = args_to_int(extension_map, extensions)
render_flags = args_to_int(html_flag_map, render_flags)
ib = lib.hoedown_buffer_new(IUNIT)
ob = lib.hoedown_buffer_new(OUNIT)
renderer = lib.hoedown_html_renderer_new(render_flags, 0)
document = lib.hoedown_document_new(renderer, extensions, 16);
lib.hoedown_buffer_puts(ib, text.encode('utf-8'))
lib.hoedown_document_render(document, ob, ib.data, ib.size);
lib.hoedown_buffer_free(ib);
lib.hoedown_document_free(document);
lib.hoedown_html_renderer_free(renderer);
try:
return to_string(ob)
finally:
lib.hoedown_buffer_free(ob);
|
python
|
{
"resource": ""
}
|
q7576
|
smartypants
|
train
|
def smartypants(text):
"""
Transforms sequences of characters into HTML entities.
=================================== ===================== =========
Markdown HTML Result
=================================== ===================== =========
``'s`` (s, t, m, d, re, ll, ve) ’s ’s
``"Quotes"`` “Quotes” “Quotes”
``---`` — —
``--`` – –
``...`` … …
``. . .`` … …
``(c)`` © ©
``(r)`` ® ®
``(tm)`` ™ ™
``3/4`` ¾ ¾
``1/2`` ½ ½
``1/4`` ¼ ¼
=================================== ===================== =========
"""
byte_str = text.encode('utf-8')
ob = lib.hoedown_buffer_new(OUNIT)
lib.hoedown_html_smartypants(ob, byte_str, len(byte_str))
try:
return to_string(ob)
finally:
lib.hoedown_buffer_free(ob);
|
python
|
{
"resource": ""
}
|
q7577
|
SaferHtmlRenderer.autolink
|
train
|
def autolink(self, raw_url, is_email):
"""
Filters links generated by the ``autolink`` extension.
"""
if self.check_url(raw_url):
url = self.rewrite_url(('mailto:' if is_email else '') + raw_url)
url = escape_html(url)
return '<a href="%s">%s</a>' % (url, escape_html(raw_url))
else:
return escape_html('<%s>' % raw_url)
|
python
|
{
"resource": ""
}
|
q7578
|
SaferHtmlRenderer.image
|
train
|
def image(self, raw_url, title='', alt=''):
"""
Filters the ``src`` attribute of an image.
Note that filtering the source URL of an ``<img>`` tag is only a very
basic protection, and it's mostly useless in modern browsers (they block
JavaScript in there by default). An example of attack that filtering
does not thwart is phishing based on HTTP Auth, see `this issue
<https://github.com/liberapay/liberapay.com/issues/504>`_ for details.
To mitigate this issue you should only allow images from trusted services,
for example your own image store, or a proxy (see :meth:`rewrite_url`).
"""
if self.check_url(raw_url, is_image_src=True):
url = self.rewrite_url(raw_url, is_image_src=True)
maybe_alt = ' alt="%s"' % escape_html(alt) if alt else ''
maybe_title = ' title="%s"' % escape_html(title) if title else ''
url = escape_html(url)
return '<img src="%s"%s%s />' % (url, maybe_alt, maybe_title)
else:
return escape_html("" % (alt, raw_url))
|
python
|
{
"resource": ""
}
|
q7579
|
SaferHtmlRenderer.link
|
train
|
def link(self, content, raw_url, title=''):
"""
Filters links.
"""
if self.check_url(raw_url):
url = self.rewrite_url(raw_url)
maybe_title = ' title="%s"' % escape_html(title) if title else ''
url = escape_html(url)
return ('<a href="%s"%s>' % (url, maybe_title)) + content + '</a>'
else:
return escape_html("[%s](%s)" % (content, raw_url))
|
python
|
{
"resource": ""
}
|
q7580
|
SaferHtmlRenderer.check_url
|
train
|
def check_url(self, url, is_image_src=False):
"""
This method is used to check a URL.
Returns :obj:`True` if the URL is "safe", :obj:`False` otherwise.
The default implementation only allows HTTP and HTTPS links. That means
no ``mailto:``, no ``xmpp:``, no ``ftp:``, etc.
This method exists specifically to allow easy customization of link
filtering through subclassing, so don't hesitate to write your own.
If you're thinking of implementing a blacklist approach, see
"`Which URL schemes are dangerous (XSS exploitable)?
<http://security.stackexchange.com/q/148428/37409>`_".
"""
return bool(self._allowed_url_re.match(url))
|
python
|
{
"resource": ""
}
|
q7581
|
SaferHtmlRenderer.rewrite_url
|
train
|
def rewrite_url(self, url, is_image_src=False):
"""
This method is called to rewrite URLs.
It uses either ``self.link_rewrite`` or ``self.img_src_rewrite``
depending on the value of ``is_image_src``. The URL is returned
unchanged if the corresponding attribute is :obj:`None`.
"""
rewrite = self.img_src_rewrite if is_image_src else self.link_rewrite
if rewrite:
return rewrite.format(url=urlquote(url))
return url
|
python
|
{
"resource": ""
}
|
q7582
|
args_to_int
|
train
|
def args_to_int(mapping, argument):
"""
Convert list of strings to an int using a mapping.
"""
if isinstance(argument, int):
if argument == 0:
return 0
deprecation('passing extensions and flags as constants is deprecated')
return argument
elif isinstance(argument, (tuple, list)):
return reduce(op.or_, [mapping[n] for n in set(argument) if n in mapping], 0)
raise TypeError('argument must be a list of strings or an int')
|
python
|
{
"resource": ""
}
|
q7583
|
_pack3
|
train
|
def _pack3(obj, fp, **options):
"""
Serialize a Python object into MessagePack bytes.
Args:
obj: a Python object
fp: a .write()-supporting file-like object
Kwargs:
ext_handlers (dict): dictionary of Ext handlers, mapping a custom type
to a callable that packs an instance of the type
into an Ext object
force_float_precision (str): "single" to force packing floats as
IEEE-754 single-precision floats,
"double" to force packing floats as
IEEE-754 double-precision floats.
Returns:
None.
Raises:
UnsupportedType(PackException):
Object type not supported for packing.
Example:
>>> f = open('test.bin', 'wb')
>>> umsgpack.pack({u"compact": True, u"schema": 0}, f)
>>>
"""
global compatibility
ext_handlers = options.get("ext_handlers")
if obj is None:
_pack_nil(obj, fp, options)
elif ext_handlers and obj.__class__ in ext_handlers:
_pack_ext(ext_handlers[obj.__class__](obj), fp, options)
elif isinstance(obj, bool):
_pack_boolean(obj, fp, options)
elif isinstance(obj, int):
_pack_integer(obj, fp, options)
elif isinstance(obj, float):
_pack_float(obj, fp, options)
elif compatibility and isinstance(obj, str):
_pack_oldspec_raw(obj.encode('utf-8'), fp, options)
elif compatibility and isinstance(obj, bytes):
_pack_oldspec_raw(obj, fp, options)
elif isinstance(obj, str):
_pack_string(obj, fp, options)
elif isinstance(obj, bytes):
_pack_binary(obj, fp, options)
elif isinstance(obj, (list, tuple)):
_pack_array(obj, fp, options)
elif isinstance(obj, dict):
_pack_map(obj, fp, options)
elif isinstance(obj, datetime.datetime):
_pack_ext_timestamp(obj, fp, options)
elif isinstance(obj, Ext):
_pack_ext(obj, fp, options)
elif ext_handlers:
# Linear search for superclass
t = next((t for t in ext_handlers.keys() if isinstance(obj, t)), None)
if t:
_pack_ext(ext_handlers[t](obj), fp, options)
else:
raise UnsupportedTypeException(
"unsupported type: %s" % str(type(obj)))
else:
raise UnsupportedTypeException(
"unsupported type: %s" % str(type(obj)))
|
python
|
{
"resource": ""
}
|
q7584
|
_get_task_target
|
train
|
def _get_task_target():
"""Get the default target for a pipeline task.
Current version id format is: user_defined_version.minor_version_number
Current module id is just the module's name. It could be "default"
Returns:
A complete target name is of format version.module. If module is the
default module, just version. None if target can not be determined.
"""
# Break circular dependency.
# pylint: disable=g-import-not-at-top
import pipeline
if pipeline._TEST_MODE:
return None
# Further protect against test cases that doesn't set env vars
# propertly.
if ("CURRENT_VERSION_ID" not in os.environ or
"CURRENT_MODULE_ID" not in os.environ):
logging.warning("Running Pipeline in non TEST_MODE but important "
"env vars are not set.")
return None
version = os.environ["CURRENT_VERSION_ID"].split(".")[0]
module = os.environ["CURRENT_MODULE_ID"]
return "%s.%s" % (version, module)
|
python
|
{
"resource": ""
}
|
q7585
|
is_generator_function
|
train
|
def is_generator_function(obj):
"""Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See isfunction.__doc__ for attributes listing.
Adapted from Python 2.6.
Args:
obj: an object to test.
Returns:
true if the object is generator function.
"""
CO_GENERATOR = 0x20
return bool(((inspect.isfunction(obj) or inspect.ismethod(obj)) and
obj.func_code.co_flags & CO_GENERATOR))
|
python
|
{
"resource": ""
}
|
q7586
|
_register_json_primitive
|
train
|
def _register_json_primitive(object_type, encoder, decoder):
"""Extend what Pipeline can serialize.
Args:
object_type: type of the object.
encoder: a function that takes in an object and returns
a dict of json primitives.
decoder: inverse function of encoder.
"""
global _TYPE_TO_ENCODER
global _TYPE_NAME_TO_DECODER
if object_type not in _TYPE_TO_ENCODER:
_TYPE_TO_ENCODER[object_type] = encoder
_TYPE_NAME_TO_DECODER[object_type.__name__] = decoder
|
python
|
{
"resource": ""
}
|
q7587
|
_JsonDecodeKey
|
train
|
def _JsonDecodeKey(d):
"""Json decode a ndb.Key object."""
k_c = d['key_string']
if isinstance(k_c, (list, tuple)):
return ndb.Key(flat=k_c)
return ndb.Key(urlsafe=d['key_string'])
|
python
|
{
"resource": ""
}
|
q7588
|
JsonDecoder._dict_to_obj
|
train
|
def _dict_to_obj(self, d):
"""Converts a dictionary of json object to a Python object."""
if JsonEncoder.TYPE_ID not in d:
return d
type_name = d.pop(JsonEncoder.TYPE_ID)
if type_name in _TYPE_NAME_TO_DECODER:
decoder = _TYPE_NAME_TO_DECODER[type_name]
return decoder(d)
else:
raise TypeError("Invalid type %s.", type_name)
|
python
|
{
"resource": ""
}
|
q7589
|
_write_json_blob
|
train
|
def _write_json_blob(encoded_value, pipeline_id=None):
"""Writes a JSON encoded value to a Cloud Storage File.
This function will store the blob in a GCS file in the default bucket under
the appengine_pipeline directory. Optionally using another directory level
specified by pipeline_id
Args:
encoded_value: The encoded JSON string.
pipeline_id: A pipeline id to segment files in Cloud Storage, if none,
the file will be created under appengine_pipeline
Returns:
The blobstore.BlobKey for the file that was created.
"""
default_bucket = app_identity.get_default_gcs_bucket_name()
if default_bucket is None:
raise Exception(
"No default cloud storage bucket has been set for this application. "
"This app was likely created before v1.9.0, please see: "
"https://cloud.google.com/appengine/docs/php/googlestorage/setup")
path_components = ['/', default_bucket, "appengine_pipeline"]
if pipeline_id:
path_components.append(pipeline_id)
path_components.append(uuid.uuid4().hex)
# Use posixpath to get a / even if we're running on windows somehow
file_name = posixpath.join(*path_components)
with cloudstorage.open(file_name, 'w', content_type='application/json') as f:
for start_index in xrange(0, len(encoded_value), _MAX_JSON_SIZE):
end_index = start_index + _MAX_JSON_SIZE
f.write(encoded_value[start_index:end_index])
key_str = blobstore.create_gs_key("/gs" + file_name)
logging.debug("Created blob for filename = %s gs_key = %s", file_name, key_str)
return blobstore.BlobKey(key_str)
|
python
|
{
"resource": ""
}
|
q7590
|
_dereference_args
|
train
|
def _dereference_args(pipeline_name, args, kwargs):
"""Dereference a Pipeline's arguments that are slots, validating them.
Each argument value passed in is assumed to be a dictionary with the format:
{'type': 'value', 'value': 'serializable'} # A resolved value.
{'type': 'slot', 'slot_key': 'str() on a db.Key'} # A pending Slot.
Args:
pipeline_name: The name of the pipeline class; used for debugging.
args: Iterable of positional arguments.
kwargs: Dictionary of keyword arguments.
Returns:
Tuple (args, kwargs) where:
Args: A list of positional arguments values that are all dereferenced.
Kwargs: A list of keyword arguments values that are all dereferenced.
Raises:
SlotNotFilledError if any of the supplied 'slot_key' records are not
present in the Datastore or have not yet been filled.
UnexpectedPipelineError if an unknown parameter type was passed.
"""
lookup_slots = set()
for arg in itertools.chain(args, kwargs.itervalues()):
if arg['type'] == 'slot':
lookup_slots.add(db.Key(arg['slot_key']))
slot_dict = {}
for key, slot_record in zip(lookup_slots, db.get(lookup_slots)):
if slot_record is None or slot_record.status != _SlotRecord.FILLED:
raise SlotNotFilledError(
'Slot "%s" missing its value. From %s(*args=%s, **kwargs=%s)' %
(key, pipeline_name, _short_repr(args), _short_repr(kwargs)))
slot_dict[key] = slot_record.value
arg_list = []
for current_arg in args:
if current_arg['type'] == 'slot':
arg_list.append(slot_dict[db.Key(current_arg['slot_key'])])
elif current_arg['type'] == 'value':
arg_list.append(current_arg['value'])
else:
raise UnexpectedPipelineError('Unknown parameter type: %r' % current_arg)
kwarg_dict = {}
for key, current_arg in kwargs.iteritems():
if current_arg['type'] == 'slot':
kwarg_dict[key] = slot_dict[db.Key(current_arg['slot_key'])]
elif current_arg['type'] == 'value':
kwarg_dict[key] = current_arg['value']
else:
raise UnexpectedPipelineError('Unknown parameter type: %r' % current_arg)
return (arg_list, kwarg_dict)
|
python
|
{
"resource": ""
}
|
q7591
|
_generate_args
|
train
|
def _generate_args(pipeline, future, queue_name, base_path):
"""Generate the params used to describe a Pipeline's depedencies.
The arguments passed to this method may be normal values, Slot instances
(for named outputs), or PipelineFuture instances (for referring to the
default output slot).
Args:
pipeline: The Pipeline instance to generate args for.
future: The PipelineFuture for the Pipeline these arguments correspond to.
queue_name: The queue to run the pipeline on.
base_path: Relative URL for pipeline URL handlers.
Returns:
Tuple (dependent_slots, output_slot_keys, params_text, params_blob) where:
dependent_slots: List of db.Key instances of _SlotRecords on which
this pipeline will need to block before execution (passed to
create a _BarrierRecord for running the pipeline).
output_slot_keys: List of db.Key instances of _SlotRecords that will
be filled by this pipeline during its execution (passed to create
a _BarrierRecord for finalizing the pipeline).
params_text: JSON dictionary of pipeline parameters to be serialized and
saved in a corresponding _PipelineRecord. Will be None if the params are
too big and must be saved in a blob instead.
params_blob: JSON dictionary of pipeline parameters to be serialized and
saved in a Blob file, and then attached to a _PipelineRecord. Will be
None if the params data size was small enough to fit in the entity.
"""
params = {
'args': [],
'kwargs': {},
'after_all': [],
'output_slots': {},
'class_path': pipeline._class_path,
'queue_name': queue_name,
'base_path': base_path,
'backoff_seconds': pipeline.backoff_seconds,
'backoff_factor': pipeline.backoff_factor,
'max_attempts': pipeline.max_attempts,
'task_retry': pipeline.task_retry,
'target': pipeline.target,
}
dependent_slots = set()
arg_list = params['args']
for current_arg in pipeline.args:
if isinstance(current_arg, PipelineFuture):
current_arg = current_arg.default
if isinstance(current_arg, Slot):
arg_list.append({'type': 'slot', 'slot_key': str(current_arg.key)})
dependent_slots.add(current_arg.key)
else:
arg_list.append({'type': 'value', 'value': current_arg})
kwarg_dict = params['kwargs']
for name, current_arg in pipeline.kwargs.iteritems():
if isinstance(current_arg, PipelineFuture):
current_arg = current_arg.default
if isinstance(current_arg, Slot):
kwarg_dict[name] = {'type': 'slot', 'slot_key': str(current_arg.key)}
dependent_slots.add(current_arg.key)
else:
kwarg_dict[name] = {'type': 'value', 'value': current_arg}
after_all = params['after_all']
for other_future in future._after_all_pipelines:
slot_key = other_future._output_dict['default'].key
after_all.append(str(slot_key))
dependent_slots.add(slot_key)
output_slots = params['output_slots']
output_slot_keys = set()
for name, slot in future._output_dict.iteritems():
output_slot_keys.add(slot.key)
output_slots[name] = str(slot.key)
params_encoded = json.dumps(params, cls=mr_util.JsonEncoder)
params_text = None
params_blob = None
if len(params_encoded) > _MAX_JSON_SIZE:
params_blob = _write_json_blob(params_encoded, pipeline.pipeline_id)
else:
params_text = params_encoded
return dependent_slots, output_slot_keys, params_text, params_blob
|
python
|
{
"resource": ""
}
|
q7592
|
_get_timestamp_ms
|
train
|
def _get_timestamp_ms(when):
"""Converts a datetime.datetime to integer milliseconds since the epoch.
Requires special handling to preserve microseconds.
Args:
when: A datetime.datetime instance.
Returns:
Integer time since the epoch in milliseconds. If the supplied 'when' is
None, the return value will be None.
"""
if when is None:
return None
ms_since_epoch = float(time.mktime(when.utctimetuple()) * 1000.0)
ms_since_epoch += when.microsecond / 1000.0
return int(ms_since_epoch)
|
python
|
{
"resource": ""
}
|
q7593
|
_get_internal_slot
|
train
|
def _get_internal_slot(slot_key=None,
filler_pipeline_key=None,
slot_dict=None):
"""Gets information about a _SlotRecord for display in UI.
Args:
slot_key: The db.Key of the slot to fetch.
filler_pipeline_key: In the case the slot has not yet been filled, assume
that the given db.Key (for a _PipelineRecord) will be the filler of
the slot in the future.
slot_dict: The slot JSON dictionary.
Returns:
Dictionary with the keys:
status: Slot status: 'filled' or 'waiting'
fillTimeMs: Time in milliseconds since the epoch of when it was filled.
value: The current value of the slot, which is a slot's JSON dictionary.
fillerPipelineId: The pipeline ID of what stage has or should fill
this slot.
Raises:
PipelineStatusError if any input is bad.
"""
if slot_dict is None:
slot_dict = {}
slot_record = slot_dict.get(slot_key)
if slot_record is None:
raise PipelineStatusError(
'Could not find data for output slot key "%s".' % slot_key)
output = {}
if slot_record.status == _SlotRecord.FILLED:
output['status'] = 'filled'
output['fillTimeMs'] = _get_timestamp_ms(slot_record.fill_time)
output['value'] = slot_record.value
filler_pipeline_key = (
_SlotRecord.filler.get_value_for_datastore(slot_record))
else:
output['status'] = 'waiting'
if filler_pipeline_key:
output['fillerPipelineId'] = filler_pipeline_key.name()
return output
|
python
|
{
"resource": ""
}
|
q7594
|
get_status_tree
|
train
|
def get_status_tree(root_pipeline_id):
"""Gets the full status tree of a pipeline.
Args:
root_pipeline_id: The pipeline ID to get status for.
Returns:
Dictionary with the keys:
rootPipelineId: The ID of the root pipeline.
slots: Mapping of slot IDs to result of from _get_internal_slot.
pipelines: Mapping of pipeline IDs to result of _get_internal_status.
Raises:
PipelineStatusError if any input is bad.
"""
root_pipeline_key = db.Key.from_path(_PipelineRecord.kind(), root_pipeline_id)
root_pipeline_record = db.get(root_pipeline_key)
if root_pipeline_record is None:
raise PipelineStatusError(
'Could not find pipeline ID "%s"' % root_pipeline_id)
# If the supplied root_pipeline_id is not actually the root pipeline that's
# okay. We'll find the real root and override the value they passed in.
actual_root_key = _PipelineRecord.root_pipeline.get_value_for_datastore(
root_pipeline_record)
if actual_root_key != root_pipeline_key:
root_pipeline_key = actual_root_key
root_pipeline_id = root_pipeline_key.id_or_name()
root_pipeline_record = db.get(root_pipeline_key)
if not root_pipeline_record:
raise PipelineStatusError(
'Could not find pipeline ID "%s"' % root_pipeline_id)
# Run all queries asynchronously.
queries = {}
for model in (_PipelineRecord, _SlotRecord, _BarrierRecord, _StatusRecord):
queries[model] = model.all().filter(
'root_pipeline =', root_pipeline_key).run(batch_size=1000)
found_pipeline_dict = dict(
(stage.key(), stage) for stage in queries[_PipelineRecord])
found_slot_dict = dict(
(slot.key(), slot) for slot in queries[_SlotRecord])
found_barrier_dict = dict(
(barrier.key(), barrier) for barrier in queries[_BarrierRecord])
found_status_dict = dict(
(status.key(), status) for status in queries[_StatusRecord])
# Breadth-first traversal of _PipelineRecord instances by following
# _PipelineRecord.fanned_out property values.
valid_pipeline_keys = set([root_pipeline_key])
slot_filler_dict = {} # slot_key to pipeline_key
expand_stack = [root_pipeline_record]
while expand_stack:
old_stack = expand_stack
expand_stack = []
for pipeline_record in old_stack:
for child_pipeline_key in pipeline_record.fanned_out:
# This will let us prune off those pipelines which were allocated in
# the Datastore but were never run due to mid-flight task failures.
child_pipeline_record = found_pipeline_dict.get(child_pipeline_key)
if child_pipeline_record is None:
raise PipelineStatusError(
'Pipeline ID "%s" points to child ID "%s" which does not exist.'
% (pipeline_record.key().name(), child_pipeline_key.name()))
expand_stack.append(child_pipeline_record)
valid_pipeline_keys.add(child_pipeline_key)
# Figure out the deepest pipeline that's responsible for outputting to
# a particular _SlotRecord, so we can report which pipeline *should*
# be the filler.
child_outputs = child_pipeline_record.params['output_slots']
for output_slot_key in child_outputs.itervalues():
slot_filler_dict[db.Key(output_slot_key)] = child_pipeline_key
output = {
'rootPipelineId': root_pipeline_id,
'slots': {},
'pipelines': {},
}
for pipeline_key in found_pipeline_dict.keys():
if pipeline_key not in valid_pipeline_keys:
continue
output['pipelines'][pipeline_key.name()] = _get_internal_status(
pipeline_key=pipeline_key,
pipeline_dict=found_pipeline_dict,
slot_dict=found_slot_dict,
barrier_dict=found_barrier_dict,
status_dict=found_status_dict)
for slot_key, filler_pipeline_key in slot_filler_dict.iteritems():
output['slots'][str(slot_key)] = _get_internal_slot(
slot_key=slot_key,
filler_pipeline_key=filler_pipeline_key,
slot_dict=found_slot_dict)
return output
|
python
|
{
"resource": ""
}
|
q7595
|
get_pipeline_names
|
train
|
def get_pipeline_names():
"""Returns the class paths of all Pipelines defined in alphabetical order."""
class_path_set = set()
for cls in _PipelineMeta._all_classes:
if cls.class_path is not None:
class_path_set.add(cls.class_path)
return sorted(class_path_set)
|
python
|
{
"resource": ""
}
|
q7596
|
get_root_list
|
train
|
def get_root_list(class_path=None, cursor=None, count=50):
"""Gets a list root Pipelines.
Args:
class_path: Optional. If supplied, only return root Pipelines with the
given class_path. By default all root pipelines are returned.
cursor: Optional. When supplied, the cursor returned from the last call to
get_root_list which indicates where to pick up.
count: How many pipeline returns to return.
Returns:
Dictionary with the keys:
pipelines: The list of Pipeline records in the same format as
returned by get_status_tree, but with only the roots listed.
cursor: Cursor to pass back to this function to resume the query. Will
only be present if there is another page of results.
Raises:
PipelineStatusError if any input is bad.
"""
query = _PipelineRecord.all(cursor=cursor)
if class_path:
query.filter('class_path =', class_path)
query.filter('is_root_pipeline =', True)
query.order('-start_time')
root_list = query.fetch(count)
fetch_list = []
for pipeline_record in root_list:
fetch_list.append(db.Key(pipeline_record.params['output_slots']['default']))
fetch_list.append(db.Key.from_path(
_BarrierRecord.kind(), _BarrierRecord.FINALIZE,
parent=pipeline_record.key()))
fetch_list.append(db.Key.from_path(
_StatusRecord.kind(), pipeline_record.key().name()))
pipeline_dict = dict((stage.key(), stage) for stage in root_list)
slot_dict = {}
barrier_dict = {}
status_dict = {}
for entity in db.get(fetch_list):
if isinstance(entity, _BarrierRecord):
barrier_dict[entity.key()] = entity
elif isinstance(entity, _SlotRecord):
slot_dict[entity.key()] = entity
elif isinstance(entity, _StatusRecord):
status_dict[entity.key()] = entity
results = []
for pipeline_record in root_list:
try:
output = _get_internal_status(
pipeline_record.key(),
pipeline_dict=pipeline_dict,
slot_dict=slot_dict,
barrier_dict=barrier_dict,
status_dict=status_dict)
output['pipelineId'] = pipeline_record.key().name()
results.append(output)
except PipelineStatusError, e:
output = {'status': e.message}
output['classPath'] = ''
output['pipelineId'] = pipeline_record.key().name()
results.append(output)
result_dict = {}
cursor = query.cursor()
query.with_cursor(cursor)
if query.get(keys_only=True):
result_dict.update(cursor=cursor)
result_dict.update(pipelines=results)
return result_dict
|
python
|
{
"resource": ""
}
|
q7597
|
Slot.value
|
train
|
def value(self):
"""Returns the current value of this slot.
Returns:
The value of the slot (a serializable Python type).
Raises:
SlotNotFilledError if the value hasn't been filled yet.
"""
if not self.filled:
raise SlotNotFilledError('Slot with name "%s", key "%s" not yet filled.'
% (self.name, self.key))
return self._value
|
python
|
{
"resource": ""
}
|
q7598
|
Slot.filler
|
train
|
def filler(self):
"""Returns the pipeline ID that filled this slot's value.
Returns:
A string that is the pipeline ID.
Raises:
SlotNotFilledError if the value hasn't been filled yet.
"""
if not self.filled:
raise SlotNotFilledError('Slot with name "%s", key "%s" not yet filled.'
% (self.name, self.key))
return self._filler_pipeline_key.name()
|
python
|
{
"resource": ""
}
|
q7599
|
Slot.fill_datetime
|
train
|
def fill_datetime(self):
"""Returns when the slot was filled.
Returns:
A datetime.datetime.
Raises:
SlotNotFilledError if the value hasn't been filled yet.
"""
if not self.filled:
raise SlotNotFilledError('Slot with name "%s", key "%s" not yet filled.'
% (self.name, self.key))
return self._fill_datetime
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.