partition stringclasses 3 values | func_name stringlengths 1 134 | docstring stringlengths 1 46.9k | path stringlengths 4 223 | original_string stringlengths 75 104k | code stringlengths 75 104k | docstring_tokens listlengths 1 1.97k | repo stringlengths 7 55 | language stringclasses 1 value | url stringlengths 87 315 | code_tokens listlengths 19 28.4k | sha stringlengths 40 40 |
|---|---|---|---|---|---|---|---|---|---|---|---|
test | Descriptor.err_msg | Return an error message for use in exceptions thrown by
subclasses. | descriptors/Descriptor.py | def err_msg(self, instance, value):
"""Return an error message for use in exceptions thrown by
subclasses.
"""
if not hasattr(self, "name"):
# err_msg will be called by the composed descriptor
return ""
return (
"Attempted to set the {f_type} attribute {inst}.{attr} to the "
"{val_type} value {val}, which does not satisfy the condition "
"{f_type}.".format(
f_type=self.field_type,
inst=instance.__class__.__name__,
attr=self.name,
val_type=value.__class__.__name__,
val=value)) | def err_msg(self, instance, value):
"""Return an error message for use in exceptions thrown by
subclasses.
"""
if not hasattr(self, "name"):
# err_msg will be called by the composed descriptor
return ""
return (
"Attempted to set the {f_type} attribute {inst}.{attr} to the "
"{val_type} value {val}, which does not satisfy the condition "
"{f_type}.".format(
f_type=self.field_type,
inst=instance.__class__.__name__,
attr=self.name,
val_type=value.__class__.__name__,
val=value)) | [
"Return",
"an",
"error",
"message",
"for",
"use",
"in",
"exceptions",
"thrown",
"by",
"subclasses",
"."
] | bheinzerling/descriptors | python | https://github.com/bheinzerling/descriptors/blob/04fff864649fba9bd6a2d8f8b649cf30994e0e46/descriptors/Descriptor.py#L83-L99 | [
"def",
"err_msg",
"(",
"self",
",",
"instance",
",",
"value",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"name\"",
")",
":",
"# err_msg will be called by the composed descriptor",
"return",
"\"\"",
"return",
"(",
"\"Attempted to set the {f_type} attribute {inst}.{attr} to the \"",
"\"{val_type} value {val}, which does not satisfy the condition \"",
"\"{f_type}.\"",
".",
"format",
"(",
"f_type",
"=",
"self",
".",
"field_type",
",",
"inst",
"=",
"instance",
".",
"__class__",
".",
"__name__",
",",
"attr",
"=",
"self",
".",
"name",
",",
"val_type",
"=",
"value",
".",
"__class__",
".",
"__name__",
",",
"val",
"=",
"value",
")",
")"
] | 04fff864649fba9bd6a2d8f8b649cf30994e0e46 |
test | Descriptor.exc_thrown_by_descriptor | Return True if the last exception was thrown by a
Descriptor instance. | descriptors/Descriptor.py | def exc_thrown_by_descriptor():
"""Return True if the last exception was thrown by a
Descriptor instance.
"""
traceback = sys.exc_info()[2]
tb_locals = traceback.tb_frame.f_locals
# relying on naming convention to get the object that threw
# the exception
if "self" in tb_locals:
if not isinstance(tb_locals["self"], Descriptor):
return False
return True
return False | def exc_thrown_by_descriptor():
"""Return True if the last exception was thrown by a
Descriptor instance.
"""
traceback = sys.exc_info()[2]
tb_locals = traceback.tb_frame.f_locals
# relying on naming convention to get the object that threw
# the exception
if "self" in tb_locals:
if not isinstance(tb_locals["self"], Descriptor):
return False
return True
return False | [
"Return",
"True",
"if",
"the",
"last",
"exception",
"was",
"thrown",
"by",
"a",
"Descriptor",
"instance",
"."
] | bheinzerling/descriptors | python | https://github.com/bheinzerling/descriptors/blob/04fff864649fba9bd6a2d8f8b649cf30994e0e46/descriptors/Descriptor.py#L171-L184 | [
"def",
"exc_thrown_by_descriptor",
"(",
")",
":",
"traceback",
"=",
"sys",
".",
"exc_info",
"(",
")",
"[",
"2",
"]",
"tb_locals",
"=",
"traceback",
".",
"tb_frame",
".",
"f_locals",
"# relying on naming convention to get the object that threw",
"# the exception",
"if",
"\"self\"",
"in",
"tb_locals",
":",
"if",
"not",
"isinstance",
"(",
"tb_locals",
"[",
"\"self\"",
"]",
",",
"Descriptor",
")",
":",
"return",
"False",
"return",
"True",
"return",
"False"
] | 04fff864649fba9bd6a2d8f8b649cf30994e0e46 |
test | Series._set_data | This method will be called to set Series data | flot/__init__.py | def _set_data(self):
"""
This method will be called to set Series data
"""
if getattr(self, 'data', False) and not getattr(self, '_x', False) and not getattr(self, '_y', False):
_x = XVariable()
_y = YVariable()
_x.contribute_to_class(self, 'X', self.data)
_y.contribute_to_class(self, 'Y', self.data)
self['data'] = zip(self._x.points, self._y.points)
else:
for axis in ('_x', '_y'):
axis_obj = getattr(self, axis, False)
if not axis_obj:
raise exception.MissingAxisException("%s missing" % axis)
if not getattr(axis_obj, 'points', False):
raise exception.MissingDataException()
self['data'] = zip(self._x.points, self._y.points) | def _set_data(self):
"""
This method will be called to set Series data
"""
if getattr(self, 'data', False) and not getattr(self, '_x', False) and not getattr(self, '_y', False):
_x = XVariable()
_y = YVariable()
_x.contribute_to_class(self, 'X', self.data)
_y.contribute_to_class(self, 'Y', self.data)
self['data'] = zip(self._x.points, self._y.points)
else:
for axis in ('_x', '_y'):
axis_obj = getattr(self, axis, False)
if not axis_obj:
raise exception.MissingAxisException("%s missing" % axis)
if not getattr(axis_obj, 'points', False):
raise exception.MissingDataException()
self['data'] = zip(self._x.points, self._y.points) | [
"This",
"method",
"will",
"be",
"called",
"to",
"set",
"Series",
"data"
] | andrefsp/pyflot | python | https://github.com/andrefsp/pyflot/blob/f2dde10709aeed39074fcce8172184b5cd8bfd66/flot/__init__.py#L168-L186 | [
"def",
"_set_data",
"(",
"self",
")",
":",
"if",
"getattr",
"(",
"self",
",",
"'data'",
",",
"False",
")",
"and",
"not",
"getattr",
"(",
"self",
",",
"'_x'",
",",
"False",
")",
"and",
"not",
"getattr",
"(",
"self",
",",
"'_y'",
",",
"False",
")",
":",
"_x",
"=",
"XVariable",
"(",
")",
"_y",
"=",
"YVariable",
"(",
")",
"_x",
".",
"contribute_to_class",
"(",
"self",
",",
"'X'",
",",
"self",
".",
"data",
")",
"_y",
".",
"contribute_to_class",
"(",
"self",
",",
"'Y'",
",",
"self",
".",
"data",
")",
"self",
"[",
"'data'",
"]",
"=",
"zip",
"(",
"self",
".",
"_x",
".",
"points",
",",
"self",
".",
"_y",
".",
"points",
")",
"else",
":",
"for",
"axis",
"in",
"(",
"'_x'",
",",
"'_y'",
")",
":",
"axis_obj",
"=",
"getattr",
"(",
"self",
",",
"axis",
",",
"False",
")",
"if",
"not",
"axis_obj",
":",
"raise",
"exception",
".",
"MissingAxisException",
"(",
"\"%s missing\"",
"%",
"axis",
")",
"if",
"not",
"getattr",
"(",
"axis_obj",
",",
"'points'",
",",
"False",
")",
":",
"raise",
"exception",
".",
"MissingDataException",
"(",
")",
"self",
"[",
"'data'",
"]",
"=",
"zip",
"(",
"self",
".",
"_x",
".",
"points",
",",
"self",
".",
"_y",
".",
"points",
")"
] | f2dde10709aeed39074fcce8172184b5cd8bfd66 |
test | Graph._get_axis_mode | will get the axis mode for the current series | flot/__init__.py | def _get_axis_mode(self, axis):
"will get the axis mode for the current series"
if all([isinstance(getattr(s, axis), TimeVariable) for s in self._series]):
return 'time'
return None | def _get_axis_mode(self, axis):
"will get the axis mode for the current series"
if all([isinstance(getattr(s, axis), TimeVariable) for s in self._series]):
return 'time'
return None | [
"will",
"get",
"the",
"axis",
"mode",
"for",
"the",
"current",
"series"
] | andrefsp/pyflot | python | https://github.com/andrefsp/pyflot/blob/f2dde10709aeed39074fcce8172184b5cd8bfd66/flot/__init__.py#L266-L270 | [
"def",
"_get_axis_mode",
"(",
"self",
",",
"axis",
")",
":",
"if",
"all",
"(",
"[",
"isinstance",
"(",
"getattr",
"(",
"s",
",",
"axis",
")",
",",
"TimeVariable",
")",
"for",
"s",
"in",
"self",
".",
"_series",
"]",
")",
":",
"return",
"'time'",
"return",
"None"
] | f2dde10709aeed39074fcce8172184b5cd8bfd66 |
test | Graph._set_options | sets the graph ploting options | flot/__init__.py | def _set_options(self):
"sets the graph ploting options"
# this is aweful
# FIXME: Axis options should be passed completly by a GraphOption
if 'xaxis' in self._options.keys():
self._options['xaxis'].update(
{'mode' : self._get_axis_mode(XAxis._var_name)})
if 'yaxis' in self._options.keys():
self._options['yaxis'].update(
{'mode' : self._get_axis_mode(YAxis._var_name)}) | def _set_options(self):
"sets the graph ploting options"
# this is aweful
# FIXME: Axis options should be passed completly by a GraphOption
if 'xaxis' in self._options.keys():
self._options['xaxis'].update(
{'mode' : self._get_axis_mode(XAxis._var_name)})
if 'yaxis' in self._options.keys():
self._options['yaxis'].update(
{'mode' : self._get_axis_mode(YAxis._var_name)}) | [
"sets",
"the",
"graph",
"ploting",
"options"
] | andrefsp/pyflot | python | https://github.com/andrefsp/pyflot/blob/f2dde10709aeed39074fcce8172184b5cd8bfd66/flot/__init__.py#L272-L281 | [
"def",
"_set_options",
"(",
"self",
")",
":",
"# this is aweful",
"# FIXME: Axis options should be passed completly by a GraphOption",
"if",
"'xaxis'",
"in",
"self",
".",
"_options",
".",
"keys",
"(",
")",
":",
"self",
".",
"_options",
"[",
"'xaxis'",
"]",
".",
"update",
"(",
"{",
"'mode'",
":",
"self",
".",
"_get_axis_mode",
"(",
"XAxis",
".",
"_var_name",
")",
"}",
")",
"if",
"'yaxis'",
"in",
"self",
".",
"_options",
".",
"keys",
"(",
")",
":",
"self",
".",
"_options",
"[",
"'yaxis'",
"]",
".",
"update",
"(",
"{",
"'mode'",
":",
"self",
".",
"_get_axis_mode",
"(",
"YAxis",
".",
"_var_name",
")",
"}",
")"
] | f2dde10709aeed39074fcce8172184b5cd8bfd66 |
test | create_init | Create an __init__ method that sets all the attributes
necessary for the function the Descriptor invokes to check the
value. | descriptors/massproduced.py | def create_init(attrs):
"""Create an __init__ method that sets all the attributes
necessary for the function the Descriptor invokes to check the
value.
"""
args = ", ".join(attrs)
vals = ", ".join(['getattr(self, "{}")'.format(attr) for attr in attrs])
attr_lines = "\n ".join(
["self.{attr} = {attr}".format(attr=attr) for attr in attrs])
init_code = """def _init(self, {args}):
super(self.__class__, self).__init__()
{attr_lines}
self.field_type += "({{}})".format(
", ".join([str(val) for val in [{vals}]]))
""".format(args=args, attr_lines=attr_lines, vals=vals)
exec(init_code, globals())
return _init | def create_init(attrs):
"""Create an __init__ method that sets all the attributes
necessary for the function the Descriptor invokes to check the
value.
"""
args = ", ".join(attrs)
vals = ", ".join(['getattr(self, "{}")'.format(attr) for attr in attrs])
attr_lines = "\n ".join(
["self.{attr} = {attr}".format(attr=attr) for attr in attrs])
init_code = """def _init(self, {args}):
super(self.__class__, self).__init__()
{attr_lines}
self.field_type += "({{}})".format(
", ".join([str(val) for val in [{vals}]]))
""".format(args=args, attr_lines=attr_lines, vals=vals)
exec(init_code, globals())
return _init | [
"Create",
"an",
"__init__",
"method",
"that",
"sets",
"all",
"the",
"attributes",
"necessary",
"for",
"the",
"function",
"the",
"Descriptor",
"invokes",
"to",
"check",
"the",
"value",
"."
] | bheinzerling/descriptors | python | https://github.com/bheinzerling/descriptors/blob/04fff864649fba9bd6a2d8f8b649cf30994e0e46/descriptors/massproduced.py#L51-L68 | [
"def",
"create_init",
"(",
"attrs",
")",
":",
"args",
"=",
"\", \"",
".",
"join",
"(",
"attrs",
")",
"vals",
"=",
"\", \"",
".",
"join",
"(",
"[",
"'getattr(self, \"{}\")'",
".",
"format",
"(",
"attr",
")",
"for",
"attr",
"in",
"attrs",
"]",
")",
"attr_lines",
"=",
"\"\\n \"",
".",
"join",
"(",
"[",
"\"self.{attr} = {attr}\"",
".",
"format",
"(",
"attr",
"=",
"attr",
")",
"for",
"attr",
"in",
"attrs",
"]",
")",
"init_code",
"=",
"\"\"\"def _init(self, {args}):\n super(self.__class__, self).__init__()\n {attr_lines}\n self.field_type += \"({{}})\".format(\n \", \".join([str(val) for val in [{vals}]]))\n \"\"\"",
".",
"format",
"(",
"args",
"=",
"args",
",",
"attr_lines",
"=",
"attr_lines",
",",
"vals",
"=",
"vals",
")",
"exec",
"(",
"init_code",
",",
"globals",
"(",
")",
")",
"return",
"_init"
] | 04fff864649fba9bd6a2d8f8b649cf30994e0e46 |
test | create_setter | Create the __set__ method for the descriptor. | descriptors/massproduced.py | def create_setter(func, attrs):
"""Create the __set__ method for the descriptor."""
def _set(self, instance, value, name=None):
args = [getattr(self, attr) for attr in attrs]
if not func(value, *args):
raise ValueError(self.err_msg(instance, value))
return _set | def create_setter(func, attrs):
"""Create the __set__ method for the descriptor."""
def _set(self, instance, value, name=None):
args = [getattr(self, attr) for attr in attrs]
if not func(value, *args):
raise ValueError(self.err_msg(instance, value))
return _set | [
"Create",
"the",
"__set__",
"method",
"for",
"the",
"descriptor",
"."
] | bheinzerling/descriptors | python | https://github.com/bheinzerling/descriptors/blob/04fff864649fba9bd6a2d8f8b649cf30994e0e46/descriptors/massproduced.py#L71-L77 | [
"def",
"create_setter",
"(",
"func",
",",
"attrs",
")",
":",
"def",
"_set",
"(",
"self",
",",
"instance",
",",
"value",
",",
"name",
"=",
"None",
")",
":",
"args",
"=",
"[",
"getattr",
"(",
"self",
",",
"attr",
")",
"for",
"attr",
"in",
"attrs",
"]",
"if",
"not",
"func",
"(",
"value",
",",
"*",
"args",
")",
":",
"raise",
"ValueError",
"(",
"self",
".",
"err_msg",
"(",
"instance",
",",
"value",
")",
")",
"return",
"_set"
] | 04fff864649fba9bd6a2d8f8b649cf30994e0e46 |
test | make_class | Turn a funcs list element into a class object. | descriptors/massproduced.py | def make_class(clsname, func, attrs):
"""Turn a funcs list element into a class object."""
clsdict = {"__set__": create_setter(func, attrs)}
if len(attrs) > 0:
clsdict["__init__"] = create_init(attrs)
clsobj = type(str(clsname), (Descriptor, ), clsdict)
clsobj.__doc__ = docstrings.get(clsname)
return clsobj | def make_class(clsname, func, attrs):
"""Turn a funcs list element into a class object."""
clsdict = {"__set__": create_setter(func, attrs)}
if len(attrs) > 0:
clsdict["__init__"] = create_init(attrs)
clsobj = type(str(clsname), (Descriptor, ), clsdict)
clsobj.__doc__ = docstrings.get(clsname)
return clsobj | [
"Turn",
"a",
"funcs",
"list",
"element",
"into",
"a",
"class",
"object",
"."
] | bheinzerling/descriptors | python | https://github.com/bheinzerling/descriptors/blob/04fff864649fba9bd6a2d8f8b649cf30994e0e46/descriptors/massproduced.py#L80-L87 | [
"def",
"make_class",
"(",
"clsname",
",",
"func",
",",
"attrs",
")",
":",
"clsdict",
"=",
"{",
"\"__set__\"",
":",
"create_setter",
"(",
"func",
",",
"attrs",
")",
"}",
"if",
"len",
"(",
"attrs",
")",
">",
"0",
":",
"clsdict",
"[",
"\"__init__\"",
"]",
"=",
"create_init",
"(",
"attrs",
")",
"clsobj",
"=",
"type",
"(",
"str",
"(",
"clsname",
")",
",",
"(",
"Descriptor",
",",
")",
",",
"clsdict",
")",
"clsobj",
".",
"__doc__",
"=",
"docstrings",
".",
"get",
"(",
"clsname",
")",
"return",
"clsobj"
] | 04fff864649fba9bd6a2d8f8b649cf30994e0e46 |
test | DashboardRunner.cycle | Cycles through notifications with latest results from data feeds. | doodledashboard/dashboard.py | def cycle(self):
"""
Cycles through notifications with latest results from data feeds.
"""
messages = self.poll_datafeeds()
notifications = self.process_notifications(messages)
self.draw_notifications(notifications) | def cycle(self):
"""
Cycles through notifications with latest results from data feeds.
"""
messages = self.poll_datafeeds()
notifications = self.process_notifications(messages)
self.draw_notifications(notifications) | [
"Cycles",
"through",
"notifications",
"with",
"latest",
"results",
"from",
"data",
"feeds",
"."
] | SketchingDev/Doodle-Dashboard | python | https://github.com/SketchingDev/Doodle-Dashboard/blob/4d7f4c248875f82a962c275009aac4aa76bd0320/doodledashboard/dashboard.py#L39-L46 | [
"def",
"cycle",
"(",
"self",
")",
":",
"messages",
"=",
"self",
".",
"poll_datafeeds",
"(",
")",
"notifications",
"=",
"self",
".",
"process_notifications",
"(",
"messages",
")",
"self",
".",
"draw_notifications",
"(",
"notifications",
")"
] | 4d7f4c248875f82a962c275009aac4aa76bd0320 |
test | ForceNumeric.try_convert | Convert value to a numeric value or raise a ValueError
if that isn't possible. | descriptors/handmade.py | def try_convert(value):
"""Convert value to a numeric value or raise a ValueError
if that isn't possible.
"""
convertible = ForceNumeric.is_convertible(value)
if not convertible or isinstance(value, bool):
raise ValueError
if isinstance(str(value), str):
return ForceNumeric.str_to_num(value)
return float(value) | def try_convert(value):
"""Convert value to a numeric value or raise a ValueError
if that isn't possible.
"""
convertible = ForceNumeric.is_convertible(value)
if not convertible or isinstance(value, bool):
raise ValueError
if isinstance(str(value), str):
return ForceNumeric.str_to_num(value)
return float(value) | [
"Convert",
"value",
"to",
"a",
"numeric",
"value",
"or",
"raise",
"a",
"ValueError",
"if",
"that",
"isn",
"t",
"possible",
"."
] | bheinzerling/descriptors | python | https://github.com/bheinzerling/descriptors/blob/04fff864649fba9bd6a2d8f8b649cf30994e0e46/descriptors/handmade.py#L143-L153 | [
"def",
"try_convert",
"(",
"value",
")",
":",
"convertible",
"=",
"ForceNumeric",
".",
"is_convertible",
"(",
"value",
")",
"if",
"not",
"convertible",
"or",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"raise",
"ValueError",
"if",
"isinstance",
"(",
"str",
"(",
"value",
")",
",",
"str",
")",
":",
"return",
"ForceNumeric",
".",
"str_to_num",
"(",
"value",
")",
"return",
"float",
"(",
"value",
")"
] | 04fff864649fba9bd6a2d8f8b649cf30994e0e46 |
test | ForceNumeric.str_to_num | Convert str_value to an int or a float, depending on the
numeric value represented by str_value. | descriptors/handmade.py | def str_to_num(str_value):
"""Convert str_value to an int or a float, depending on the
numeric value represented by str_value.
"""
str_value = str(str_value)
try:
return int(str_value)
except ValueError:
return float(str_value) | def str_to_num(str_value):
"""Convert str_value to an int or a float, depending on the
numeric value represented by str_value.
"""
str_value = str(str_value)
try:
return int(str_value)
except ValueError:
return float(str_value) | [
"Convert",
"str_value",
"to",
"an",
"int",
"or",
"a",
"float",
"depending",
"on",
"the",
"numeric",
"value",
"represented",
"by",
"str_value",
"."
] | bheinzerling/descriptors | python | https://github.com/bheinzerling/descriptors/blob/04fff864649fba9bd6a2d8f8b649cf30994e0e46/descriptors/handmade.py#L167-L176 | [
"def",
"str_to_num",
"(",
"str_value",
")",
":",
"str_value",
"=",
"str",
"(",
"str_value",
")",
"try",
":",
"return",
"int",
"(",
"str_value",
")",
"except",
"ValueError",
":",
"return",
"float",
"(",
"str_value",
")"
] | 04fff864649fba9bd6a2d8f8b649cf30994e0e46 |
test | plot | Tag to plot graphs into the template | flot/templatetags/flot_tags.py | def plot(parser, token):
"""
Tag to plot graphs into the template
"""
tokens = token.split_contents()
tokens.pop(0)
graph = tokens.pop(0)
attrs = dict([token.split("=") for token in tokens])
if 'id' not in attrs.keys():
attrs['id'] = ''.join([chr(choice(range(65, 90))) for i in range(0, 5)])
else:
attrs['id'] = attrs['id'][1:len(attrs['id'])-1]
attr_string = ''.join([" %s=%s" % (k, v) for k, v in attrs.iteritems()])
return GraphRenderer(graph, attr_string, attrs['id']) | def plot(parser, token):
"""
Tag to plot graphs into the template
"""
tokens = token.split_contents()
tokens.pop(0)
graph = tokens.pop(0)
attrs = dict([token.split("=") for token in tokens])
if 'id' not in attrs.keys():
attrs['id'] = ''.join([chr(choice(range(65, 90))) for i in range(0, 5)])
else:
attrs['id'] = attrs['id'][1:len(attrs['id'])-1]
attr_string = ''.join([" %s=%s" % (k, v) for k, v in attrs.iteritems()])
return GraphRenderer(graph, attr_string, attrs['id']) | [
"Tag",
"to",
"plot",
"graphs",
"into",
"the",
"template"
] | andrefsp/pyflot | python | https://github.com/andrefsp/pyflot/blob/f2dde10709aeed39074fcce8172184b5cd8bfd66/flot/templatetags/flot_tags.py#L31-L48 | [
"def",
"plot",
"(",
"parser",
",",
"token",
")",
":",
"tokens",
"=",
"token",
".",
"split_contents",
"(",
")",
"tokens",
".",
"pop",
"(",
"0",
")",
"graph",
"=",
"tokens",
".",
"pop",
"(",
"0",
")",
"attrs",
"=",
"dict",
"(",
"[",
"token",
".",
"split",
"(",
"\"=\"",
")",
"for",
"token",
"in",
"tokens",
"]",
")",
"if",
"'id'",
"not",
"in",
"attrs",
".",
"keys",
"(",
")",
":",
"attrs",
"[",
"'id'",
"]",
"=",
"''",
".",
"join",
"(",
"[",
"chr",
"(",
"choice",
"(",
"range",
"(",
"65",
",",
"90",
")",
")",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"5",
")",
"]",
")",
"else",
":",
"attrs",
"[",
"'id'",
"]",
"=",
"attrs",
"[",
"'id'",
"]",
"[",
"1",
":",
"len",
"(",
"attrs",
"[",
"'id'",
"]",
")",
"-",
"1",
"]",
"attr_string",
"=",
"''",
".",
"join",
"(",
"[",
"\" %s=%s\"",
"%",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"attrs",
".",
"iteritems",
"(",
")",
"]",
")",
"return",
"GraphRenderer",
"(",
"graph",
",",
"attr_string",
",",
"attrs",
"[",
"'id'",
"]",
")"
] | f2dde10709aeed39074fcce8172184b5cd8bfd66 |
test | force_unicode | Try really really hard to get a Unicode copy of a string.
First try :class:`BeautifulSoup.UnicodeDammit` to try to force
to Unicode; if that fails, assume UTF-8 encoding, and ignore
all errors.
:param str raw: string to coerce
:return: Unicode approximation of `raw`
:returntype: :class:`unicode` | streamcorpus_pipeline/_clean_html.py | def force_unicode(raw):
'''Try really really hard to get a Unicode copy of a string.
First try :class:`BeautifulSoup.UnicodeDammit` to try to force
to Unicode; if that fails, assume UTF-8 encoding, and ignore
all errors.
:param str raw: string to coerce
:return: Unicode approximation of `raw`
:returntype: :class:`unicode`
'''
converted = UnicodeDammit(raw, isHTML=True)
if not converted.unicode:
converted.unicode = unicode(raw, 'utf8', errors='ignore')
encoding_m = encoding_re.match(converted.unicode)
if encoding_m:
converted.unicode = \
encoding_m.group('start_xml') + \
encoding_m.group('remainder')
return converted.unicode | def force_unicode(raw):
'''Try really really hard to get a Unicode copy of a string.
First try :class:`BeautifulSoup.UnicodeDammit` to try to force
to Unicode; if that fails, assume UTF-8 encoding, and ignore
all errors.
:param str raw: string to coerce
:return: Unicode approximation of `raw`
:returntype: :class:`unicode`
'''
converted = UnicodeDammit(raw, isHTML=True)
if not converted.unicode:
converted.unicode = unicode(raw, 'utf8', errors='ignore')
encoding_m = encoding_re.match(converted.unicode)
if encoding_m:
converted.unicode = \
encoding_m.group('start_xml') + \
encoding_m.group('remainder')
return converted.unicode | [
"Try",
"really",
"really",
"hard",
"to",
"get",
"a",
"Unicode",
"copy",
"of",
"a",
"string",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_clean_html.py#L51-L73 | [
"def",
"force_unicode",
"(",
"raw",
")",
":",
"converted",
"=",
"UnicodeDammit",
"(",
"raw",
",",
"isHTML",
"=",
"True",
")",
"if",
"not",
"converted",
".",
"unicode",
":",
"converted",
".",
"unicode",
"=",
"unicode",
"(",
"raw",
",",
"'utf8'",
",",
"errors",
"=",
"'ignore'",
")",
"encoding_m",
"=",
"encoding_re",
".",
"match",
"(",
"converted",
".",
"unicode",
")",
"if",
"encoding_m",
":",
"converted",
".",
"unicode",
"=",
"encoding_m",
".",
"group",
"(",
"'start_xml'",
")",
"+",
"encoding_m",
".",
"group",
"(",
"'remainder'",
")",
"return",
"converted",
".",
"unicode"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | make_clean_html | Get a clean text representation of presumed HTML.
Treat `raw` as though it is HTML, even if we have no idea what it
really is, and attempt to get a properly formatted HTML document
with all HTML-escaped characters converted to their unicode.
This is called below by the `clean_html` transform stage, which
interprets MIME-type. If `character_encoding` is not provided,
and `stream_item` is provided, then this falles back to
:attr:`streamcorpus.StreamItem.body.encoding`.
:param str raw: raw text to clean up
:param stream_item: optional stream item with encoding metadata
:type stream_item: :class:`streamcorpus.StreamItem`
:returns: UTF-8-encoded byte string of cleaned HTML text
:returntype: :class:`str` | streamcorpus_pipeline/_clean_html.py | def make_clean_html(raw, stream_item=None, encoding=None):
'''Get a clean text representation of presumed HTML.
Treat `raw` as though it is HTML, even if we have no idea what it
really is, and attempt to get a properly formatted HTML document
with all HTML-escaped characters converted to their unicode.
This is called below by the `clean_html` transform stage, which
interprets MIME-type. If `character_encoding` is not provided,
and `stream_item` is provided, then this falles back to
:attr:`streamcorpus.StreamItem.body.encoding`.
:param str raw: raw text to clean up
:param stream_item: optional stream item with encoding metadata
:type stream_item: :class:`streamcorpus.StreamItem`
:returns: UTF-8-encoded byte string of cleaned HTML text
:returntype: :class:`str`
'''
# Fix emails by protecting the <,> from HTML
raw = fix_emails(raw)
raw_decoded = nice_decode(raw, stream_item=stream_item, encoding=encoding)
if raw_decoded is None:
# give up on decoding it... maybe this should use force_unicode
raw_decoded = raw
# default attempt uses vanilla lxml.html
try:
root = lxml.html.document_fromstring(raw_decoded)
except ValueError, exc:
if 'with encoding declaration' in str(exc):
root = lxml.html.document_fromstring(raw)
else:
raise
# While we have the document parsed as a DOM, let's strip attributes.
# (The HTML cleaner seems to only support whitelisting attributes.
# As of now, we just want to blacklist a few.)
lxml.etree.strip_attributes(root, 'class', 'id')
# if that worked, then we will be able to generate a
# valid HTML string
fixed_html = lxml.html.tostring(root, encoding=unicode)
# remove any ^M characters
fixed_html = string.replace(fixed_html, '\r', ' ')
# We drop utf8 characters that are above 0xFFFF as
# Lingpipe seems to be doing the wrong thing with them.
fixed_html = drop_invalid_and_upper_utf8_chars(fixed_html)
# construct a Cleaner that removes any ``<script>`` tags,
# Javascript, like an ``onclick`` attribute, comments, style
# tags or attributes, ``<link>`` tags
cleaner = lxml.html.clean.Cleaner(
scripts=True, javascript=True,
comments=True,
# do not remove <html> <head> <title> etc
page_structure=False,
remove_tags=['base'],
style=True, links=True)
# now get the really sanitized HTML
_clean_html = cleaner.clean_html(fixed_html)
# generate pretty HTML in utf-8
_clean_html = lxml.html.tostring(
lxml.html.document_fromstring(_clean_html),
method='html', encoding='utf-8',
pretty_print=True,
# include_meta_content_type=True
)
return uniform_html(_clean_html) | def make_clean_html(raw, stream_item=None, encoding=None):
'''Get a clean text representation of presumed HTML.
Treat `raw` as though it is HTML, even if we have no idea what it
really is, and attempt to get a properly formatted HTML document
with all HTML-escaped characters converted to their unicode.
This is called below by the `clean_html` transform stage, which
interprets MIME-type. If `character_encoding` is not provided,
and `stream_item` is provided, then this falles back to
:attr:`streamcorpus.StreamItem.body.encoding`.
:param str raw: raw text to clean up
:param stream_item: optional stream item with encoding metadata
:type stream_item: :class:`streamcorpus.StreamItem`
:returns: UTF-8-encoded byte string of cleaned HTML text
:returntype: :class:`str`
'''
# Fix emails by protecting the <,> from HTML
raw = fix_emails(raw)
raw_decoded = nice_decode(raw, stream_item=stream_item, encoding=encoding)
if raw_decoded is None:
# give up on decoding it... maybe this should use force_unicode
raw_decoded = raw
# default attempt uses vanilla lxml.html
try:
root = lxml.html.document_fromstring(raw_decoded)
except ValueError, exc:
if 'with encoding declaration' in str(exc):
root = lxml.html.document_fromstring(raw)
else:
raise
# While we have the document parsed as a DOM, let's strip attributes.
# (The HTML cleaner seems to only support whitelisting attributes.
# As of now, we just want to blacklist a few.)
lxml.etree.strip_attributes(root, 'class', 'id')
# if that worked, then we will be able to generate a
# valid HTML string
fixed_html = lxml.html.tostring(root, encoding=unicode)
# remove any ^M characters
fixed_html = string.replace(fixed_html, '\r', ' ')
# We drop utf8 characters that are above 0xFFFF as
# Lingpipe seems to be doing the wrong thing with them.
fixed_html = drop_invalid_and_upper_utf8_chars(fixed_html)
# construct a Cleaner that removes any ``<script>`` tags,
# Javascript, like an ``onclick`` attribute, comments, style
# tags or attributes, ``<link>`` tags
cleaner = lxml.html.clean.Cleaner(
scripts=True, javascript=True,
comments=True,
# do not remove <html> <head> <title> etc
page_structure=False,
remove_tags=['base'],
style=True, links=True)
# now get the really sanitized HTML
_clean_html = cleaner.clean_html(fixed_html)
# generate pretty HTML in utf-8
_clean_html = lxml.html.tostring(
lxml.html.document_fromstring(_clean_html),
method='html', encoding='utf-8',
pretty_print=True,
# include_meta_content_type=True
)
return uniform_html(_clean_html) | [
"Get",
"a",
"clean",
"text",
"representation",
"of",
"presumed",
"HTML",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_clean_html.py#L111-L184 | [
"def",
"make_clean_html",
"(",
"raw",
",",
"stream_item",
"=",
"None",
",",
"encoding",
"=",
"None",
")",
":",
"# Fix emails by protecting the <,> from HTML",
"raw",
"=",
"fix_emails",
"(",
"raw",
")",
"raw_decoded",
"=",
"nice_decode",
"(",
"raw",
",",
"stream_item",
"=",
"stream_item",
",",
"encoding",
"=",
"encoding",
")",
"if",
"raw_decoded",
"is",
"None",
":",
"# give up on decoding it... maybe this should use force_unicode",
"raw_decoded",
"=",
"raw",
"# default attempt uses vanilla lxml.html",
"try",
":",
"root",
"=",
"lxml",
".",
"html",
".",
"document_fromstring",
"(",
"raw_decoded",
")",
"except",
"ValueError",
",",
"exc",
":",
"if",
"'with encoding declaration'",
"in",
"str",
"(",
"exc",
")",
":",
"root",
"=",
"lxml",
".",
"html",
".",
"document_fromstring",
"(",
"raw",
")",
"else",
":",
"raise",
"# While we have the document parsed as a DOM, let's strip attributes.",
"# (The HTML cleaner seems to only support whitelisting attributes.",
"# As of now, we just want to blacklist a few.)",
"lxml",
".",
"etree",
".",
"strip_attributes",
"(",
"root",
",",
"'class'",
",",
"'id'",
")",
"# if that worked, then we will be able to generate a",
"# valid HTML string",
"fixed_html",
"=",
"lxml",
".",
"html",
".",
"tostring",
"(",
"root",
",",
"encoding",
"=",
"unicode",
")",
"# remove any ^M characters",
"fixed_html",
"=",
"string",
".",
"replace",
"(",
"fixed_html",
",",
"'\\r'",
",",
"' '",
")",
"# We drop utf8 characters that are above 0xFFFF as",
"# Lingpipe seems to be doing the wrong thing with them.",
"fixed_html",
"=",
"drop_invalid_and_upper_utf8_chars",
"(",
"fixed_html",
")",
"# construct a Cleaner that removes any ``<script>`` tags,",
"# Javascript, like an ``onclick`` attribute, comments, style",
"# tags or attributes, ``<link>`` tags",
"cleaner",
"=",
"lxml",
".",
"html",
".",
"clean",
".",
"Cleaner",
"(",
"scripts",
"=",
"True",
",",
"javascript",
"=",
"True",
",",
"comments",
"=",
"True",
",",
"# do not remove <html> <head> <title> etc",
"page_structure",
"=",
"False",
",",
"remove_tags",
"=",
"[",
"'base'",
"]",
",",
"style",
"=",
"True",
",",
"links",
"=",
"True",
")",
"# now get the really sanitized HTML",
"_clean_html",
"=",
"cleaner",
".",
"clean_html",
"(",
"fixed_html",
")",
"# generate pretty HTML in utf-8",
"_clean_html",
"=",
"lxml",
".",
"html",
".",
"tostring",
"(",
"lxml",
".",
"html",
".",
"document_fromstring",
"(",
"_clean_html",
")",
",",
"method",
"=",
"'html'",
",",
"encoding",
"=",
"'utf-8'",
",",
"pretty_print",
"=",
"True",
",",
"# include_meta_content_type=True",
")",
"return",
"uniform_html",
"(",
"_clean_html",
")"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | uniform_html | Takes a utf-8-encoded string of HTML as input and returns a new
HTML string with fixed quoting and close tags, which generally
should not break any of the offsets and makes it easier for
functions like
:func:`streamcorpus_pipeline.offsets.char_offsets_to_xpaths` to
operate without failures. | streamcorpus_pipeline/_clean_html.py | def uniform_html(html):
'''Takes a utf-8-encoded string of HTML as input and returns a new
HTML string with fixed quoting and close tags, which generally
should not break any of the offsets and makes it easier for
functions like
:func:`streamcorpus_pipeline.offsets.char_offsets_to_xpaths` to
operate without failures.
'''
doc = html5lib.parse(html.decode('utf-8'))
config = {
'omit_optional_tags': False,
'encoding': 'utf-8',
'quote_attr_values': 'always',
}
return html5lib.serializer.serialize(doc, **config) | def uniform_html(html):
'''Takes a utf-8-encoded string of HTML as input and returns a new
HTML string with fixed quoting and close tags, which generally
should not break any of the offsets and makes it easier for
functions like
:func:`streamcorpus_pipeline.offsets.char_offsets_to_xpaths` to
operate without failures.
'''
doc = html5lib.parse(html.decode('utf-8'))
config = {
'omit_optional_tags': False,
'encoding': 'utf-8',
'quote_attr_values': 'always',
}
return html5lib.serializer.serialize(doc, **config) | [
"Takes",
"a",
"utf",
"-",
"8",
"-",
"encoded",
"string",
"of",
"HTML",
"as",
"input",
"and",
"returns",
"a",
"new",
"HTML",
"string",
"with",
"fixed",
"quoting",
"and",
"close",
"tags",
"which",
"generally",
"should",
"not",
"break",
"any",
"of",
"the",
"offsets",
"and",
"makes",
"it",
"easier",
"for",
"functions",
"like",
":",
"func",
":",
"streamcorpus_pipeline",
".",
"offsets",
".",
"char_offsets_to_xpaths",
"to",
"operate",
"without",
"failures",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_clean_html.py#L187-L202 | [
"def",
"uniform_html",
"(",
"html",
")",
":",
"doc",
"=",
"html5lib",
".",
"parse",
"(",
"html",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"config",
"=",
"{",
"'omit_optional_tags'",
":",
"False",
",",
"'encoding'",
":",
"'utf-8'",
",",
"'quote_attr_values'",
":",
"'always'",
",",
"}",
"return",
"html5lib",
".",
"serializer",
".",
"serialize",
"(",
"doc",
",",
"*",
"*",
"config",
")"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | clean_html.is_matching_mime_type | This implements the MIME-type matching logic for deciding whether
to run `make_clean_html` | streamcorpus_pipeline/_clean_html.py | def is_matching_mime_type(self, mime_type):
'''This implements the MIME-type matching logic for deciding whether
to run `make_clean_html`
'''
if len(self.include_mime_types) == 0:
return True
if mime_type is None:
return False
mime_type = mime_type.lower()
# NB: startswith is necessary here, because encodings are
# often appended to HTTP header Content-Type
return any(mime_type.startswith(mt) for mt in self.include_mime_types) | def is_matching_mime_type(self, mime_type):
'''This implements the MIME-type matching logic for deciding whether
to run `make_clean_html`
'''
if len(self.include_mime_types) == 0:
return True
if mime_type is None:
return False
mime_type = mime_type.lower()
# NB: startswith is necessary here, because encodings are
# often appended to HTTP header Content-Type
return any(mime_type.startswith(mt) for mt in self.include_mime_types) | [
"This",
"implements",
"the",
"MIME",
"-",
"type",
"matching",
"logic",
"for",
"deciding",
"whether",
"to",
"run",
"make_clean_html"
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_clean_html.py#L250-L262 | [
"def",
"is_matching_mime_type",
"(",
"self",
",",
"mime_type",
")",
":",
"if",
"len",
"(",
"self",
".",
"include_mime_types",
")",
"==",
"0",
":",
"return",
"True",
"if",
"mime_type",
"is",
"None",
":",
"return",
"False",
"mime_type",
"=",
"mime_type",
".",
"lower",
"(",
")",
"# NB: startswith is necessary here, because encodings are",
"# often appended to HTTP header Content-Type",
"return",
"any",
"(",
"mime_type",
".",
"startswith",
"(",
"mt",
")",
"for",
"mt",
"in",
"self",
".",
"include_mime_types",
")"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | domain_name_cleanse | extract a lower-case, no-slashes domain name from a raw string
that might be a URL | streamcorpus_pipeline/_filters.py | def domain_name_cleanse(raw_string):
'''extract a lower-case, no-slashes domain name from a raw string
that might be a URL
'''
try:
parts = urlparse(raw_string)
domain = parts.netloc.split(':')[0]
except:
domain = ''
if not domain:
domain = raw_string
if not domain:
return ''
domain = re.sub('\/', '', domain.strip().lower())
return domain | def domain_name_cleanse(raw_string):
'''extract a lower-case, no-slashes domain name from a raw string
that might be a URL
'''
try:
parts = urlparse(raw_string)
domain = parts.netloc.split(':')[0]
except:
domain = ''
if not domain:
domain = raw_string
if not domain:
return ''
domain = re.sub('\/', '', domain.strip().lower())
return domain | [
"extract",
"a",
"lower",
"-",
"case",
"no",
"-",
"slashes",
"domain",
"name",
"from",
"a",
"raw",
"string",
"that",
"might",
"be",
"a",
"URL"
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_filters.py#L253-L267 | [
"def",
"domain_name_cleanse",
"(",
"raw_string",
")",
":",
"try",
":",
"parts",
"=",
"urlparse",
"(",
"raw_string",
")",
"domain",
"=",
"parts",
".",
"netloc",
".",
"split",
"(",
"':'",
")",
"[",
"0",
"]",
"except",
":",
"domain",
"=",
"''",
"if",
"not",
"domain",
":",
"domain",
"=",
"raw_string",
"if",
"not",
"domain",
":",
"return",
"''",
"domain",
"=",
"re",
".",
"sub",
"(",
"'\\/'",
",",
"''",
",",
"domain",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
")",
"return",
"domain"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | domain_name_left_cuts | returns a list of strings created by splitting the domain on
'.' and successively cutting off the left most portion | streamcorpus_pipeline/_filters.py | def domain_name_left_cuts(domain):
'''returns a list of strings created by splitting the domain on
'.' and successively cutting off the left most portion
'''
cuts = []
if domain:
parts = domain.split('.')
for i in range(len(parts)):
cuts.append( '.'.join(parts[i:]))
return cuts | def domain_name_left_cuts(domain):
'''returns a list of strings created by splitting the domain on
'.' and successively cutting off the left most portion
'''
cuts = []
if domain:
parts = domain.split('.')
for i in range(len(parts)):
cuts.append( '.'.join(parts[i:]))
return cuts | [
"returns",
"a",
"list",
"of",
"strings",
"created",
"by",
"splitting",
"the",
"domain",
"on",
".",
"and",
"successively",
"cutting",
"off",
"the",
"left",
"most",
"portion"
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_filters.py#L269-L278 | [
"def",
"domain_name_left_cuts",
"(",
"domain",
")",
":",
"cuts",
"=",
"[",
"]",
"if",
"domain",
":",
"parts",
"=",
"domain",
".",
"split",
"(",
"'.'",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"parts",
")",
")",
":",
"cuts",
".",
"append",
"(",
"'.'",
".",
"join",
"(",
"parts",
"[",
"i",
":",
"]",
")",
")",
"return",
"cuts"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | keyword_indexer.make_hash_kw | Get a Murmur hash and a normalized token.
`tok` may be a :class:`unicode` string or a UTF-8-encoded
byte string. :data:`DOCUMENT_HASH_KEY`, hash value 0, is
reserved for the document count, and this function remaps
that value.
:param tok: token to hash
:return: pair of normalized `tok` and its hash | streamcorpus_pipeline/_kvlayer_keyword_search.py | def make_hash_kw(self, tok):
'''Get a Murmur hash and a normalized token.
`tok` may be a :class:`unicode` string or a UTF-8-encoded
byte string. :data:`DOCUMENT_HASH_KEY`, hash value 0, is
reserved for the document count, and this function remaps
that value.
:param tok: token to hash
:return: pair of normalized `tok` and its hash
'''
if isinstance(tok, unicode):
tok = tok.encode('utf-8')
h = mmh3.hash(tok)
if h == DOCUMENT_HASH_KEY:
h = DOCUMENT_HASH_KEY_REPLACEMENT
return (tok, h) | def make_hash_kw(self, tok):
'''Get a Murmur hash and a normalized token.
`tok` may be a :class:`unicode` string or a UTF-8-encoded
byte string. :data:`DOCUMENT_HASH_KEY`, hash value 0, is
reserved for the document count, and this function remaps
that value.
:param tok: token to hash
:return: pair of normalized `tok` and its hash
'''
if isinstance(tok, unicode):
tok = tok.encode('utf-8')
h = mmh3.hash(tok)
if h == DOCUMENT_HASH_KEY:
h = DOCUMENT_HASH_KEY_REPLACEMENT
return (tok, h) | [
"Get",
"a",
"Murmur",
"hash",
"and",
"a",
"normalized",
"token",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_kvlayer_keyword_search.py#L85-L102 | [
"def",
"make_hash_kw",
"(",
"self",
",",
"tok",
")",
":",
"if",
"isinstance",
"(",
"tok",
",",
"unicode",
")",
":",
"tok",
"=",
"tok",
".",
"encode",
"(",
"'utf-8'",
")",
"h",
"=",
"mmh3",
".",
"hash",
"(",
"tok",
")",
"if",
"h",
"==",
"DOCUMENT_HASH_KEY",
":",
"h",
"=",
"DOCUMENT_HASH_KEY_REPLACEMENT",
"return",
"(",
"tok",
",",
"h",
")"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | keyword_indexer.collect_words | Collect all of the words to be indexed from a stream item.
This scans `si` for all of the configured tagger IDs. It
collects all of the token values (the
:attr:`streamcorpus.Token.token`) and returns a
:class:`collections.Counter` of them.
:param si: stream item to scan
:type si: :class:`streamcorpus.StreamItem`
:return: counter of :class:`unicode` words to index
:returntype: :class:`collections.Counter` | streamcorpus_pipeline/_kvlayer_keyword_search.py | def collect_words(self, si):
'''Collect all of the words to be indexed from a stream item.
This scans `si` for all of the configured tagger IDs. It
collects all of the token values (the
:attr:`streamcorpus.Token.token`) and returns a
:class:`collections.Counter` of them.
:param si: stream item to scan
:type si: :class:`streamcorpus.StreamItem`
:return: counter of :class:`unicode` words to index
:returntype: :class:`collections.Counter`
'''
counter = Counter()
for tagger_id, sentences in si.body.sentences.iteritems():
if ((self.keyword_tagger_ids is not None
and tagger_id not in self.keyword_tagger_ids)):
continue
for sentence in sentences:
for token in sentence.tokens:
term = token.token # always a UTF-8 byte string
term = term.decode('utf-8')
term = cleanse(term)
if ((self.keyword_size_limit is not None and
len(term) > self.keyword_size_limit)):
continue
if term not in self.stop_words:
counter[term] += 1
return counter | def collect_words(self, si):
'''Collect all of the words to be indexed from a stream item.
This scans `si` for all of the configured tagger IDs. It
collects all of the token values (the
:attr:`streamcorpus.Token.token`) and returns a
:class:`collections.Counter` of them.
:param si: stream item to scan
:type si: :class:`streamcorpus.StreamItem`
:return: counter of :class:`unicode` words to index
:returntype: :class:`collections.Counter`
'''
counter = Counter()
for tagger_id, sentences in si.body.sentences.iteritems():
if ((self.keyword_tagger_ids is not None
and tagger_id not in self.keyword_tagger_ids)):
continue
for sentence in sentences:
for token in sentence.tokens:
term = token.token # always a UTF-8 byte string
term = term.decode('utf-8')
term = cleanse(term)
if ((self.keyword_size_limit is not None and
len(term) > self.keyword_size_limit)):
continue
if term not in self.stop_words:
counter[term] += 1
return counter | [
"Collect",
"all",
"of",
"the",
"words",
"to",
"be",
"indexed",
"from",
"a",
"stream",
"item",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_kvlayer_keyword_search.py#L104-L133 | [
"def",
"collect_words",
"(",
"self",
",",
"si",
")",
":",
"counter",
"=",
"Counter",
"(",
")",
"for",
"tagger_id",
",",
"sentences",
"in",
"si",
".",
"body",
".",
"sentences",
".",
"iteritems",
"(",
")",
":",
"if",
"(",
"(",
"self",
".",
"keyword_tagger_ids",
"is",
"not",
"None",
"and",
"tagger_id",
"not",
"in",
"self",
".",
"keyword_tagger_ids",
")",
")",
":",
"continue",
"for",
"sentence",
"in",
"sentences",
":",
"for",
"token",
"in",
"sentence",
".",
"tokens",
":",
"term",
"=",
"token",
".",
"token",
"# always a UTF-8 byte string",
"term",
"=",
"term",
".",
"decode",
"(",
"'utf-8'",
")",
"term",
"=",
"cleanse",
"(",
"term",
")",
"if",
"(",
"(",
"self",
".",
"keyword_size_limit",
"is",
"not",
"None",
"and",
"len",
"(",
"term",
")",
">",
"self",
".",
"keyword_size_limit",
")",
")",
":",
"continue",
"if",
"term",
"not",
"in",
"self",
".",
"stop_words",
":",
"counter",
"[",
"term",
"]",
"+=",
"1",
"return",
"counter"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | keyword_indexer.index | Record index records for a single document.
Which indexes this creates depends on the parameters to the
constructor. This records all of the requested indexes for
a single document. | streamcorpus_pipeline/_kvlayer_keyword_search.py | def index(self, si):
'''Record index records for a single document.
Which indexes this creates depends on the parameters to the
constructor. This records all of the requested indexes for
a single document.
'''
if not si.body.clean_visible:
logger.warn('stream item %s has no clean_visible part, '
'skipping keyword indexing', si.stream_id)
return
# Count tokens in si.clean_visible
# We will recycle hash==0 for "# of documents"
hash_counts = defaultdict(int)
hash_counts[DOCUMENT_HASH_KEY] = 1
hash_kw = defaultdict(int)
words = self.collect_words(si)
for tok, count in words.iteritems():
(tok, tok_hash) = self.make_hash_kw(tok)
hash_counts[tok_hash] += count
hash_kw[tok] = tok_hash
# Convert this and write it out
if self.hash_docs:
(k1, k2) = key_for_stream_item(si)
kvps = [((h, k1, k2), n) for (h, n) in hash_counts.iteritems()
if h != DOCUMENT_HASH_KEY]
self.client.put(HASH_TF_INDEX_TABLE, *kvps)
if self.hash_frequencies:
kvps = [((h,), 1) for h in hash_counts.iterkeys()]
self.client.increment(HASH_FREQUENCY_TABLE, *kvps)
if self.hash_keywords:
kvps = [((h, t), 1) for (t, h) in hash_kw.iteritems()]
self.client.increment(HASH_KEYWORD_INDEX_TABLE, *kvps) | def index(self, si):
'''Record index records for a single document.
Which indexes this creates depends on the parameters to the
constructor. This records all of the requested indexes for
a single document.
'''
if not si.body.clean_visible:
logger.warn('stream item %s has no clean_visible part, '
'skipping keyword indexing', si.stream_id)
return
# Count tokens in si.clean_visible
# We will recycle hash==0 for "# of documents"
hash_counts = defaultdict(int)
hash_counts[DOCUMENT_HASH_KEY] = 1
hash_kw = defaultdict(int)
words = self.collect_words(si)
for tok, count in words.iteritems():
(tok, tok_hash) = self.make_hash_kw(tok)
hash_counts[tok_hash] += count
hash_kw[tok] = tok_hash
# Convert this and write it out
if self.hash_docs:
(k1, k2) = key_for_stream_item(si)
kvps = [((h, k1, k2), n) for (h, n) in hash_counts.iteritems()
if h != DOCUMENT_HASH_KEY]
self.client.put(HASH_TF_INDEX_TABLE, *kvps)
if self.hash_frequencies:
kvps = [((h,), 1) for h in hash_counts.iterkeys()]
self.client.increment(HASH_FREQUENCY_TABLE, *kvps)
if self.hash_keywords:
kvps = [((h, t), 1) for (t, h) in hash_kw.iteritems()]
self.client.increment(HASH_KEYWORD_INDEX_TABLE, *kvps) | [
"Record",
"index",
"records",
"for",
"a",
"single",
"document",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_kvlayer_keyword_search.py#L135-L172 | [
"def",
"index",
"(",
"self",
",",
"si",
")",
":",
"if",
"not",
"si",
".",
"body",
".",
"clean_visible",
":",
"logger",
".",
"warn",
"(",
"'stream item %s has no clean_visible part, '",
"'skipping keyword indexing'",
",",
"si",
".",
"stream_id",
")",
"return",
"# Count tokens in si.clean_visible",
"# We will recycle hash==0 for \"# of documents\"",
"hash_counts",
"=",
"defaultdict",
"(",
"int",
")",
"hash_counts",
"[",
"DOCUMENT_HASH_KEY",
"]",
"=",
"1",
"hash_kw",
"=",
"defaultdict",
"(",
"int",
")",
"words",
"=",
"self",
".",
"collect_words",
"(",
"si",
")",
"for",
"tok",
",",
"count",
"in",
"words",
".",
"iteritems",
"(",
")",
":",
"(",
"tok",
",",
"tok_hash",
")",
"=",
"self",
".",
"make_hash_kw",
"(",
"tok",
")",
"hash_counts",
"[",
"tok_hash",
"]",
"+=",
"count",
"hash_kw",
"[",
"tok",
"]",
"=",
"tok_hash",
"# Convert this and write it out",
"if",
"self",
".",
"hash_docs",
":",
"(",
"k1",
",",
"k2",
")",
"=",
"key_for_stream_item",
"(",
"si",
")",
"kvps",
"=",
"[",
"(",
"(",
"h",
",",
"k1",
",",
"k2",
")",
",",
"n",
")",
"for",
"(",
"h",
",",
"n",
")",
"in",
"hash_counts",
".",
"iteritems",
"(",
")",
"if",
"h",
"!=",
"DOCUMENT_HASH_KEY",
"]",
"self",
".",
"client",
".",
"put",
"(",
"HASH_TF_INDEX_TABLE",
",",
"*",
"kvps",
")",
"if",
"self",
".",
"hash_frequencies",
":",
"kvps",
"=",
"[",
"(",
"(",
"h",
",",
")",
",",
"1",
")",
"for",
"h",
"in",
"hash_counts",
".",
"iterkeys",
"(",
")",
"]",
"self",
".",
"client",
".",
"increment",
"(",
"HASH_FREQUENCY_TABLE",
",",
"*",
"kvps",
")",
"if",
"self",
".",
"hash_keywords",
":",
"kvps",
"=",
"[",
"(",
"(",
"h",
",",
"t",
")",
",",
"1",
")",
"for",
"(",
"t",
",",
"h",
")",
"in",
"hash_kw",
".",
"iteritems",
"(",
")",
"]",
"self",
".",
"client",
".",
"increment",
"(",
"HASH_KEYWORD_INDEX_TABLE",
",",
"*",
"kvps",
")"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | keyword_indexer.invert_hash | Get strings that correspond to some hash.
No string will correspond to :data:`DOCUMENT_HASH_KEY`; use
:data:`DOCUMENT_HASH_KEY_REPLACEMENT` instead.
:param int tok_hash: Murmur hash to query
:return: list of :class:`unicode` strings | streamcorpus_pipeline/_kvlayer_keyword_search.py | def invert_hash(self, tok_hash):
'''Get strings that correspond to some hash.
No string will correspond to :data:`DOCUMENT_HASH_KEY`; use
:data:`DOCUMENT_HASH_KEY_REPLACEMENT` instead.
:param int tok_hash: Murmur hash to query
:return: list of :class:`unicode` strings
'''
return [tok_encoded.decode('utf8')
for (_, tok_encoded) in
self.client.scan_keys(HASH_KEYWORD_INDEX_TABLE,
((tok_hash,), (tok_hash,)))] | def invert_hash(self, tok_hash):
'''Get strings that correspond to some hash.
No string will correspond to :data:`DOCUMENT_HASH_KEY`; use
:data:`DOCUMENT_HASH_KEY_REPLACEMENT` instead.
:param int tok_hash: Murmur hash to query
:return: list of :class:`unicode` strings
'''
return [tok_encoded.decode('utf8')
for (_, tok_encoded) in
self.client.scan_keys(HASH_KEYWORD_INDEX_TABLE,
((tok_hash,), (tok_hash,)))] | [
"Get",
"strings",
"that",
"correspond",
"to",
"some",
"hash",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_kvlayer_keyword_search.py#L174-L187 | [
"def",
"invert_hash",
"(",
"self",
",",
"tok_hash",
")",
":",
"return",
"[",
"tok_encoded",
".",
"decode",
"(",
"'utf8'",
")",
"for",
"(",
"_",
",",
"tok_encoded",
")",
"in",
"self",
".",
"client",
".",
"scan_keys",
"(",
"HASH_KEYWORD_INDEX_TABLE",
",",
"(",
"(",
"tok_hash",
",",
")",
",",
"(",
"tok_hash",
",",
")",
")",
")",
"]"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | keyword_indexer.document_frequencies | Get document frequencies for a list of hashes.
This will return all zeros unless the index was written with
`hash_frequencies` set. If :data:`DOCUMENT_HASH_KEY` is
included in `hashes`, that value will be returned with the
total number of documents indexed. If you are looking for
documents with that hash, pass
:data:`DOCUMENT_HASH_KEY_REPLACEMENT` instead.
:param hashes: hashes to query
:paramtype hashes: list of :class:`int`
:return: map from hash to document frequency | streamcorpus_pipeline/_kvlayer_keyword_search.py | def document_frequencies(self, hashes):
'''Get document frequencies for a list of hashes.
This will return all zeros unless the index was written with
`hash_frequencies` set. If :data:`DOCUMENT_HASH_KEY` is
included in `hashes`, that value will be returned with the
total number of documents indexed. If you are looking for
documents with that hash, pass
:data:`DOCUMENT_HASH_KEY_REPLACEMENT` instead.
:param hashes: hashes to query
:paramtype hashes: list of :class:`int`
:return: map from hash to document frequency
'''
result = {}
for (k, v) in self.client.get(HASH_FREQUENCY_TABLE,
*[(h,) for h in hashes]):
if v is None:
v = 0
result[k[0]] = v
return result | def document_frequencies(self, hashes):
'''Get document frequencies for a list of hashes.
This will return all zeros unless the index was written with
`hash_frequencies` set. If :data:`DOCUMENT_HASH_KEY` is
included in `hashes`, that value will be returned with the
total number of documents indexed. If you are looking for
documents with that hash, pass
:data:`DOCUMENT_HASH_KEY_REPLACEMENT` instead.
:param hashes: hashes to query
:paramtype hashes: list of :class:`int`
:return: map from hash to document frequency
'''
result = {}
for (k, v) in self.client.get(HASH_FREQUENCY_TABLE,
*[(h,) for h in hashes]):
if v is None:
v = 0
result[k[0]] = v
return result | [
"Get",
"document",
"frequencies",
"for",
"a",
"list",
"of",
"hashes",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_kvlayer_keyword_search.py#L189-L210 | [
"def",
"document_frequencies",
"(",
"self",
",",
"hashes",
")",
":",
"result",
"=",
"{",
"}",
"for",
"(",
"k",
",",
"v",
")",
"in",
"self",
".",
"client",
".",
"get",
"(",
"HASH_FREQUENCY_TABLE",
",",
"*",
"[",
"(",
"h",
",",
")",
"for",
"h",
"in",
"hashes",
"]",
")",
":",
"if",
"v",
"is",
"None",
":",
"v",
"=",
"0",
"result",
"[",
"k",
"[",
"0",
"]",
"]",
"=",
"v",
"return",
"result"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | keyword_indexer.lookup | Get stream IDs for a single hash.
This yields strings that can be retrieved using
:func:`streamcorpus_pipeline._kvlayer.get_kvlayer_stream_item`,
or fed back into :mod:`coordinate` or other job queue systems.
Note that for common terms this can return a large number of
stream IDs! This is a scan over a dense region of a
:mod:`kvlayer` table so it should be reasonably efficient,
but be prepared for it to return many documents in a large
corpus. Blindly storing the results in a :class:`list`
may be inadvisable.
This will return nothing unless the index was written with
:attr:`hash_docs` set. No document will correspond to
:data:`DOCUMENT_HASH_KEY`; use
:data:`DOCUMENT_HASH_KEY_REPLACEMENT` instead.
:param int h: Murmur hash to look up | streamcorpus_pipeline/_kvlayer_keyword_search.py | def lookup(self, h):
'''Get stream IDs for a single hash.
This yields strings that can be retrieved using
:func:`streamcorpus_pipeline._kvlayer.get_kvlayer_stream_item`,
or fed back into :mod:`coordinate` or other job queue systems.
Note that for common terms this can return a large number of
stream IDs! This is a scan over a dense region of a
:mod:`kvlayer` table so it should be reasonably efficient,
but be prepared for it to return many documents in a large
corpus. Blindly storing the results in a :class:`list`
may be inadvisable.
This will return nothing unless the index was written with
:attr:`hash_docs` set. No document will correspond to
:data:`DOCUMENT_HASH_KEY`; use
:data:`DOCUMENT_HASH_KEY_REPLACEMENT` instead.
:param int h: Murmur hash to look up
'''
for (_, k1, k2) in self.client.scan_keys(HASH_TF_INDEX_TABLE,
((h,), (h,))):
yield kvlayer_key_to_stream_id((k1, k2)) | def lookup(self, h):
'''Get stream IDs for a single hash.
This yields strings that can be retrieved using
:func:`streamcorpus_pipeline._kvlayer.get_kvlayer_stream_item`,
or fed back into :mod:`coordinate` or other job queue systems.
Note that for common terms this can return a large number of
stream IDs! This is a scan over a dense region of a
:mod:`kvlayer` table so it should be reasonably efficient,
but be prepared for it to return many documents in a large
corpus. Blindly storing the results in a :class:`list`
may be inadvisable.
This will return nothing unless the index was written with
:attr:`hash_docs` set. No document will correspond to
:data:`DOCUMENT_HASH_KEY`; use
:data:`DOCUMENT_HASH_KEY_REPLACEMENT` instead.
:param int h: Murmur hash to look up
'''
for (_, k1, k2) in self.client.scan_keys(HASH_TF_INDEX_TABLE,
((h,), (h,))):
yield kvlayer_key_to_stream_id((k1, k2)) | [
"Get",
"stream",
"IDs",
"for",
"a",
"single",
"hash",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_kvlayer_keyword_search.py#L212-L236 | [
"def",
"lookup",
"(",
"self",
",",
"h",
")",
":",
"for",
"(",
"_",
",",
"k1",
",",
"k2",
")",
"in",
"self",
".",
"client",
".",
"scan_keys",
"(",
"HASH_TF_INDEX_TABLE",
",",
"(",
"(",
"h",
",",
")",
",",
"(",
"h",
",",
")",
")",
")",
":",
"yield",
"kvlayer_key_to_stream_id",
"(",
"(",
"k1",
",",
"k2",
")",
")"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | keyword_indexer.lookup_tf | Get stream IDs and term frequencies for a single hash.
This yields pairs of strings that can be retrieved using
:func:`streamcorpus_pipeline._kvlayer.get_kvlayer_stream_item`
and the corresponding term frequency.
..see:: :meth:`lookup` | streamcorpus_pipeline/_kvlayer_keyword_search.py | def lookup_tf(self, h):
'''Get stream IDs and term frequencies for a single hash.
This yields pairs of strings that can be retrieved using
:func:`streamcorpus_pipeline._kvlayer.get_kvlayer_stream_item`
and the corresponding term frequency.
..see:: :meth:`lookup`
'''
for ((_, k1, k2), v) in self.client.scan(HASH_TF_INDEX_TABLE,
((h,), (h,))):
yield (kvlayer_key_to_stream_id((k1, k2)), v) | def lookup_tf(self, h):
'''Get stream IDs and term frequencies for a single hash.
This yields pairs of strings that can be retrieved using
:func:`streamcorpus_pipeline._kvlayer.get_kvlayer_stream_item`
and the corresponding term frequency.
..see:: :meth:`lookup`
'''
for ((_, k1, k2), v) in self.client.scan(HASH_TF_INDEX_TABLE,
((h,), (h,))):
yield (kvlayer_key_to_stream_id((k1, k2)), v) | [
"Get",
"stream",
"IDs",
"and",
"term",
"frequencies",
"for",
"a",
"single",
"hash",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_kvlayer_keyword_search.py#L238-L251 | [
"def",
"lookup_tf",
"(",
"self",
",",
"h",
")",
":",
"for",
"(",
"(",
"_",
",",
"k1",
",",
"k2",
")",
",",
"v",
")",
"in",
"self",
".",
"client",
".",
"scan",
"(",
"HASH_TF_INDEX_TABLE",
",",
"(",
"(",
"h",
",",
")",
",",
"(",
"h",
",",
")",
")",
")",
":",
"yield",
"(",
"kvlayer_key_to_stream_id",
"(",
"(",
"k1",
",",
"k2",
")",
")",
",",
"v",
")"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | _make_stream_items | Given a spinn3r feed, produce a sequence of valid StreamItems.
Because of goopy Python interactions, you probably need to call
this and re-yield its results, as
>>> with open(filename, 'rb') as f:
... for si in _make_stream_items(f):
... yield si | streamcorpus_pipeline/_spinn3r_feed_storage.py | def _make_stream_items(f):
"""Given a spinn3r feed, produce a sequence of valid StreamItems.
Because of goopy Python interactions, you probably need to call
this and re-yield its results, as
>>> with open(filename, 'rb') as f:
... for si in _make_stream_items(f):
... yield si
"""
reader = ProtoStreamReader(f)
return itertools.ifilter(
lambda x: x is not None,
itertools.imap(_make_stream_item, reader)) | def _make_stream_items(f):
"""Given a spinn3r feed, produce a sequence of valid StreamItems.
Because of goopy Python interactions, you probably need to call
this and re-yield its results, as
>>> with open(filename, 'rb') as f:
... for si in _make_stream_items(f):
... yield si
"""
reader = ProtoStreamReader(f)
return itertools.ifilter(
lambda x: x is not None,
itertools.imap(_make_stream_item, reader)) | [
"Given",
"a",
"spinn3r",
"feed",
"produce",
"a",
"sequence",
"of",
"valid",
"StreamItems",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_spinn3r_feed_storage.py#L226-L240 | [
"def",
"_make_stream_items",
"(",
"f",
")",
":",
"reader",
"=",
"ProtoStreamReader",
"(",
"f",
")",
"return",
"itertools",
".",
"ifilter",
"(",
"lambda",
"x",
":",
"x",
"is",
"not",
"None",
",",
"itertools",
".",
"imap",
"(",
"_make_stream_item",
",",
"reader",
")",
")"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | _make_stream_item | Given a single spinn3r feed entry, produce a single StreamItem.
Returns 'None' if a complete item can't be constructed. | streamcorpus_pipeline/_spinn3r_feed_storage.py | def _make_stream_item(entry):
"""Given a single spinn3r feed entry, produce a single StreamItem.
Returns 'None' if a complete item can't be constructed.
"""
# get standard metadata, assuming it's present...
if not hasattr(entry, 'permalink_entry'):
return None
pe = entry.permalink_entry
# ...and create a streamitem...
si = streamcorpus.make_stream_item(
pe.date_found[:-1] + '.0Z',
pe.canonical_link.href.encode('utf8'))
if not si.stream_time:
logger.debug('failed to generate stream_time from {0!r}'
.format(pe.date_found))
return None
if not si.abs_url:
logger.debug('failed to generate abs_url from {0!r}'
.format(pe.canonical_link.href))
return None
# ...filling in the actual data
si.body = _make_content_item(
pe.content,
alternate_data=entry.feed_entry.content.data)
if not si.body:
return None
if not si.body.raw:
return None
if pe.content_extract.data:
si.other_content['extract'] = _make_content_item(pe.content_extract)
si.other_content['title'] = streamcorpus.ContentItem(
raw=pe.title.encode('utf8'),
media_type=pe.content_extract.mime_type,
encoding='UTF-8')
si.other_content['feed_entry_title'] = streamcorpus.ContentItem(
raw=entry.feed_entry.title.encode('utf8'),
media_type=entry.feed_entry.content.mime_type,
encoding='UTF-8')
if entry.feed_entry.content.data:
si.other_content['feed_entry'] = _make_content_item(
entry.feed_entry.content)
si.source_metadata['lang'] = pe.lang[0].code
si.source_metadata['author'] = json.dumps(
dict(
name=pe.author[0].name,
email=pe.author[0].email,
link=pe.author[0].link[0].href,
)
)
si.source = entry.source.publisher_type
return si | def _make_stream_item(entry):
"""Given a single spinn3r feed entry, produce a single StreamItem.
Returns 'None' if a complete item can't be constructed.
"""
# get standard metadata, assuming it's present...
if not hasattr(entry, 'permalink_entry'):
return None
pe = entry.permalink_entry
# ...and create a streamitem...
si = streamcorpus.make_stream_item(
pe.date_found[:-1] + '.0Z',
pe.canonical_link.href.encode('utf8'))
if not si.stream_time:
logger.debug('failed to generate stream_time from {0!r}'
.format(pe.date_found))
return None
if not si.abs_url:
logger.debug('failed to generate abs_url from {0!r}'
.format(pe.canonical_link.href))
return None
# ...filling in the actual data
si.body = _make_content_item(
pe.content,
alternate_data=entry.feed_entry.content.data)
if not si.body:
return None
if not si.body.raw:
return None
if pe.content_extract.data:
si.other_content['extract'] = _make_content_item(pe.content_extract)
si.other_content['title'] = streamcorpus.ContentItem(
raw=pe.title.encode('utf8'),
media_type=pe.content_extract.mime_type,
encoding='UTF-8')
si.other_content['feed_entry_title'] = streamcorpus.ContentItem(
raw=entry.feed_entry.title.encode('utf8'),
media_type=entry.feed_entry.content.mime_type,
encoding='UTF-8')
if entry.feed_entry.content.data:
si.other_content['feed_entry'] = _make_content_item(
entry.feed_entry.content)
si.source_metadata['lang'] = pe.lang[0].code
si.source_metadata['author'] = json.dumps(
dict(
name=pe.author[0].name,
email=pe.author[0].email,
link=pe.author[0].link[0].href,
)
)
si.source = entry.source.publisher_type
return si | [
"Given",
"a",
"single",
"spinn3r",
"feed",
"entry",
"produce",
"a",
"single",
"StreamItem",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_spinn3r_feed_storage.py#L243-L298 | [
"def",
"_make_stream_item",
"(",
"entry",
")",
":",
"# get standard metadata, assuming it's present...",
"if",
"not",
"hasattr",
"(",
"entry",
",",
"'permalink_entry'",
")",
":",
"return",
"None",
"pe",
"=",
"entry",
".",
"permalink_entry",
"# ...and create a streamitem...",
"si",
"=",
"streamcorpus",
".",
"make_stream_item",
"(",
"pe",
".",
"date_found",
"[",
":",
"-",
"1",
"]",
"+",
"'.0Z'",
",",
"pe",
".",
"canonical_link",
".",
"href",
".",
"encode",
"(",
"'utf8'",
")",
")",
"if",
"not",
"si",
".",
"stream_time",
":",
"logger",
".",
"debug",
"(",
"'failed to generate stream_time from {0!r}'",
".",
"format",
"(",
"pe",
".",
"date_found",
")",
")",
"return",
"None",
"if",
"not",
"si",
".",
"abs_url",
":",
"logger",
".",
"debug",
"(",
"'failed to generate abs_url from {0!r}'",
".",
"format",
"(",
"pe",
".",
"canonical_link",
".",
"href",
")",
")",
"return",
"None",
"# ...filling in the actual data",
"si",
".",
"body",
"=",
"_make_content_item",
"(",
"pe",
".",
"content",
",",
"alternate_data",
"=",
"entry",
".",
"feed_entry",
".",
"content",
".",
"data",
")",
"if",
"not",
"si",
".",
"body",
":",
"return",
"None",
"if",
"not",
"si",
".",
"body",
".",
"raw",
":",
"return",
"None",
"if",
"pe",
".",
"content_extract",
".",
"data",
":",
"si",
".",
"other_content",
"[",
"'extract'",
"]",
"=",
"_make_content_item",
"(",
"pe",
".",
"content_extract",
")",
"si",
".",
"other_content",
"[",
"'title'",
"]",
"=",
"streamcorpus",
".",
"ContentItem",
"(",
"raw",
"=",
"pe",
".",
"title",
".",
"encode",
"(",
"'utf8'",
")",
",",
"media_type",
"=",
"pe",
".",
"content_extract",
".",
"mime_type",
",",
"encoding",
"=",
"'UTF-8'",
")",
"si",
".",
"other_content",
"[",
"'feed_entry_title'",
"]",
"=",
"streamcorpus",
".",
"ContentItem",
"(",
"raw",
"=",
"entry",
".",
"feed_entry",
".",
"title",
".",
"encode",
"(",
"'utf8'",
")",
",",
"media_type",
"=",
"entry",
".",
"feed_entry",
".",
"content",
".",
"mime_type",
",",
"encoding",
"=",
"'UTF-8'",
")",
"if",
"entry",
".",
"feed_entry",
".",
"content",
".",
"data",
":",
"si",
".",
"other_content",
"[",
"'feed_entry'",
"]",
"=",
"_make_content_item",
"(",
"entry",
".",
"feed_entry",
".",
"content",
")",
"si",
".",
"source_metadata",
"[",
"'lang'",
"]",
"=",
"pe",
".",
"lang",
"[",
"0",
"]",
".",
"code",
"si",
".",
"source_metadata",
"[",
"'author'",
"]",
"=",
"json",
".",
"dumps",
"(",
"dict",
"(",
"name",
"=",
"pe",
".",
"author",
"[",
"0",
"]",
".",
"name",
",",
"email",
"=",
"pe",
".",
"author",
"[",
"0",
"]",
".",
"email",
",",
"link",
"=",
"pe",
".",
"author",
"[",
"0",
"]",
".",
"link",
"[",
"0",
"]",
".",
"href",
",",
")",
")",
"si",
".",
"source",
"=",
"entry",
".",
"source",
".",
"publisher_type",
"return",
"si"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | _make_content_item | Create a ContentItem from a node in the spinn3r data tree.
The ContentItem is created with raw data set to ``node.data``,
decompressed if the node's encoding is 'zlib', and UTF-8
normalized, with a MIME type from ``node.mime_type``.
``node``
the actual node from the spinn3r protobuf data
``mime_type``
string MIME type to use (defaults to ``node.mime_type``)
``alternate_data``
alternate (compressed) data to use, if ``node.data`` is missing
or can't be decompressed | streamcorpus_pipeline/_spinn3r_feed_storage.py | def _make_content_item(node, mime_type=None, alternate_data=None):
"""Create a ContentItem from a node in the spinn3r data tree.
The ContentItem is created with raw data set to ``node.data``,
decompressed if the node's encoding is 'zlib', and UTF-8
normalized, with a MIME type from ``node.mime_type``.
``node``
the actual node from the spinn3r protobuf data
``mime_type``
string MIME type to use (defaults to ``node.mime_type``)
``alternate_data``
alternate (compressed) data to use, if ``node.data`` is missing
or can't be decompressed
"""
raw = node.data
if getattr(node, 'encoding', None) == 'zlib':
try:
raw = zlib.decompress(node.data)
except Exception, exc:
if alternate_data is not None:
try:
raw = zlib.decompress(alternate_data)
except Exception:
raise exc # the original exception
else:
raise
if mime_type is None:
mime_type = node.mime_type
raw = raw.decode('utf8').encode('utf8')
return streamcorpus.ContentItem(raw=raw, media_type=mime_type) | def _make_content_item(node, mime_type=None, alternate_data=None):
"""Create a ContentItem from a node in the spinn3r data tree.
The ContentItem is created with raw data set to ``node.data``,
decompressed if the node's encoding is 'zlib', and UTF-8
normalized, with a MIME type from ``node.mime_type``.
``node``
the actual node from the spinn3r protobuf data
``mime_type``
string MIME type to use (defaults to ``node.mime_type``)
``alternate_data``
alternate (compressed) data to use, if ``node.data`` is missing
or can't be decompressed
"""
raw = node.data
if getattr(node, 'encoding', None) == 'zlib':
try:
raw = zlib.decompress(node.data)
except Exception, exc:
if alternate_data is not None:
try:
raw = zlib.decompress(alternate_data)
except Exception:
raise exc # the original exception
else:
raise
if mime_type is None:
mime_type = node.mime_type
raw = raw.decode('utf8').encode('utf8')
return streamcorpus.ContentItem(raw=raw, media_type=mime_type) | [
"Create",
"a",
"ContentItem",
"from",
"a",
"node",
"in",
"the",
"spinn3r",
"data",
"tree",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_spinn3r_feed_storage.py#L301-L332 | [
"def",
"_make_content_item",
"(",
"node",
",",
"mime_type",
"=",
"None",
",",
"alternate_data",
"=",
"None",
")",
":",
"raw",
"=",
"node",
".",
"data",
"if",
"getattr",
"(",
"node",
",",
"'encoding'",
",",
"None",
")",
"==",
"'zlib'",
":",
"try",
":",
"raw",
"=",
"zlib",
".",
"decompress",
"(",
"node",
".",
"data",
")",
"except",
"Exception",
",",
"exc",
":",
"if",
"alternate_data",
"is",
"not",
"None",
":",
"try",
":",
"raw",
"=",
"zlib",
".",
"decompress",
"(",
"alternate_data",
")",
"except",
"Exception",
":",
"raise",
"exc",
"# the original exception",
"else",
":",
"raise",
"if",
"mime_type",
"is",
"None",
":",
"mime_type",
"=",
"node",
".",
"mime_type",
"raw",
"=",
"raw",
".",
"decode",
"(",
"'utf8'",
")",
".",
"encode",
"(",
"'utf8'",
")",
"return",
"streamcorpus",
".",
"ContentItem",
"(",
"raw",
"=",
"raw",
",",
"media_type",
"=",
"mime_type",
")"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | ProtoStreamReader._read | Read (up to) 'n' bytes from the underlying file. If any bytes
have been pushed in with _unread() those are returned first. | streamcorpus_pipeline/_spinn3r_feed_storage.py | def _read(self, n):
"""Read (up to) 'n' bytes from the underlying file. If any bytes
have been pushed in with _unread() those are returned first."""
if n <= len(self._prefix):
# the read can be fulfilled entirely from the prefix
result = self._prefix[:n]
self._prefix = self._prefix[n:]
return result
# otherwise we need to read some
n -= len(self._prefix)
result = self._prefix + self.f.read(n)
self._prefix = ""
return result | def _read(self, n):
"""Read (up to) 'n' bytes from the underlying file. If any bytes
have been pushed in with _unread() those are returned first."""
if n <= len(self._prefix):
# the read can be fulfilled entirely from the prefix
result = self._prefix[:n]
self._prefix = self._prefix[n:]
return result
# otherwise we need to read some
n -= len(self._prefix)
result = self._prefix + self.f.read(n)
self._prefix = ""
return result | [
"Read",
"(",
"up",
"to",
")",
"n",
"bytes",
"from",
"the",
"underlying",
"file",
".",
"If",
"any",
"bytes",
"have",
"been",
"pushed",
"in",
"with",
"_unread",
"()",
"those",
"are",
"returned",
"first",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_spinn3r_feed_storage.py#L104-L116 | [
"def",
"_read",
"(",
"self",
",",
"n",
")",
":",
"if",
"n",
"<=",
"len",
"(",
"self",
".",
"_prefix",
")",
":",
"# the read can be fulfilled entirely from the prefix",
"result",
"=",
"self",
".",
"_prefix",
"[",
":",
"n",
"]",
"self",
".",
"_prefix",
"=",
"self",
".",
"_prefix",
"[",
"n",
":",
"]",
"return",
"result",
"# otherwise we need to read some",
"n",
"-=",
"len",
"(",
"self",
".",
"_prefix",
")",
"result",
"=",
"self",
".",
"_prefix",
"+",
"self",
".",
"f",
".",
"read",
"(",
"n",
")",
"self",
".",
"_prefix",
"=",
"\"\"",
"return",
"result"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | ProtoStreamReader._read_varint | Read exactly a varint out of the underlying file. | streamcorpus_pipeline/_spinn3r_feed_storage.py | def _read_varint(self):
"""Read exactly a varint out of the underlying file."""
buf = self._read(8)
(n, l) = _DecodeVarint(buf, 0)
self._unread(buf[l:])
return n | def _read_varint(self):
"""Read exactly a varint out of the underlying file."""
buf = self._read(8)
(n, l) = _DecodeVarint(buf, 0)
self._unread(buf[l:])
return n | [
"Read",
"exactly",
"a",
"varint",
"out",
"of",
"the",
"underlying",
"file",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_spinn3r_feed_storage.py#L118-L123 | [
"def",
"_read_varint",
"(",
"self",
")",
":",
"buf",
"=",
"self",
".",
"_read",
"(",
"8",
")",
"(",
"n",
",",
"l",
")",
"=",
"_DecodeVarint",
"(",
"buf",
",",
"0",
")",
"self",
".",
"_unread",
"(",
"buf",
"[",
"l",
":",
"]",
")",
"return",
"n"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | ProtoStreamReader._read_a | Read some protobuf-encoded object stored in a single block
out of the file. | streamcorpus_pipeline/_spinn3r_feed_storage.py | def _read_a(self, cls):
"""Read some protobuf-encoded object stored in a single block
out of the file."""
o = cls()
o.ParseFromString(self._read_block())
return o | def _read_a(self, cls):
"""Read some protobuf-encoded object stored in a single block
out of the file."""
o = cls()
o.ParseFromString(self._read_block())
return o | [
"Read",
"some",
"protobuf",
"-",
"encoded",
"object",
"stored",
"in",
"a",
"single",
"block",
"out",
"of",
"the",
"file",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_spinn3r_feed_storage.py#L131-L136 | [
"def",
"_read_a",
"(",
"self",
",",
"cls",
")",
":",
"o",
"=",
"cls",
"(",
")",
"o",
".",
"ParseFromString",
"(",
"self",
".",
"_read_block",
"(",
")",
")",
"return",
"o"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | parse_keys_and_ranges | Parse the :class:`from_kvlayer` input string.
This accepts two formats. In the textual format, it accepts any
number of stream IDs in timestamp-docid format, separated by ``,``
or ``;``, and processes those as individual stream IDs. In the
binary format, it accepts 20-byte key blobs (16 bytes md5 hash, 4
bytes timestamp) split by ``;`` or ``<``; e.g., ``a<f;x`` loads
scans keys `a` through `f` and loads singly key `x`.
`keyfunc` and `rangefunc` are run as generators and their yields
are yielded from this function. | streamcorpus_pipeline/_kvlayer.py | def parse_keys_and_ranges(i_str, keyfunc, rangefunc):
'''Parse the :class:`from_kvlayer` input string.
This accepts two formats. In the textual format, it accepts any
number of stream IDs in timestamp-docid format, separated by ``,``
or ``;``, and processes those as individual stream IDs. In the
binary format, it accepts 20-byte key blobs (16 bytes md5 hash, 4
bytes timestamp) split by ``;`` or ``<``; e.g., ``a<f;x`` loads
scans keys `a` through `f` and loads singly key `x`.
`keyfunc` and `rangefunc` are run as generators and their yields
are yielded from this function.
'''
while i_str:
m = _STREAM_ID_RE.match(i_str)
if m:
# old style text stream_id
for retval in keyfunc(stream_id_to_kvlayer_key(m.group())):
yield retval
i_str = i_str[m.end():]
while i_str and ((i_str[0] == ',') or (i_str[0] == ';')):
i_str = i_str[1:]
continue
if len(i_str) == SI_KEY_LENGTH:
# one key, get it.
key = parse_si_key(i_str)
for retval in keyfunc(key):
yield retval
return
keya = i_str[:SI_KEY_LENGTH]
splitc = i_str[SI_KEY_LENGTH]
if splitc == '<':
# range
keyb = i_str[SI_KEY_LENGTH+1:SI_KEY_LENGTH+1+SI_KEY_LENGTH]
i_str = i_str[SI_KEY_LENGTH+1+SI_KEY_LENGTH:]
keya = parse_si_key(keya)
keyb = parse_si_key(keyb)
for retval in rangefunc(keya, keyb):
yield retval
elif splitc == ';':
# keya is single key to load
keya = parse_si_key(keya)
for retval in keyfunc(keya):
yield retval
i_str = i_str[SI_KEY_LENGTH+1+1:]
else:
logger.error('bogus key splitter %s, %r', splitc, i_str)
return | def parse_keys_and_ranges(i_str, keyfunc, rangefunc):
'''Parse the :class:`from_kvlayer` input string.
This accepts two formats. In the textual format, it accepts any
number of stream IDs in timestamp-docid format, separated by ``,``
or ``;``, and processes those as individual stream IDs. In the
binary format, it accepts 20-byte key blobs (16 bytes md5 hash, 4
bytes timestamp) split by ``;`` or ``<``; e.g., ``a<f;x`` loads
scans keys `a` through `f` and loads singly key `x`.
`keyfunc` and `rangefunc` are run as generators and their yields
are yielded from this function.
'''
while i_str:
m = _STREAM_ID_RE.match(i_str)
if m:
# old style text stream_id
for retval in keyfunc(stream_id_to_kvlayer_key(m.group())):
yield retval
i_str = i_str[m.end():]
while i_str and ((i_str[0] == ',') or (i_str[0] == ';')):
i_str = i_str[1:]
continue
if len(i_str) == SI_KEY_LENGTH:
# one key, get it.
key = parse_si_key(i_str)
for retval in keyfunc(key):
yield retval
return
keya = i_str[:SI_KEY_LENGTH]
splitc = i_str[SI_KEY_LENGTH]
if splitc == '<':
# range
keyb = i_str[SI_KEY_LENGTH+1:SI_KEY_LENGTH+1+SI_KEY_LENGTH]
i_str = i_str[SI_KEY_LENGTH+1+SI_KEY_LENGTH:]
keya = parse_si_key(keya)
keyb = parse_si_key(keyb)
for retval in rangefunc(keya, keyb):
yield retval
elif splitc == ';':
# keya is single key to load
keya = parse_si_key(keya)
for retval in keyfunc(keya):
yield retval
i_str = i_str[SI_KEY_LENGTH+1+1:]
else:
logger.error('bogus key splitter %s, %r', splitc, i_str)
return | [
"Parse",
"the",
":",
"class",
":",
"from_kvlayer",
"input",
"string",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_kvlayer.py#L128-L178 | [
"def",
"parse_keys_and_ranges",
"(",
"i_str",
",",
"keyfunc",
",",
"rangefunc",
")",
":",
"while",
"i_str",
":",
"m",
"=",
"_STREAM_ID_RE",
".",
"match",
"(",
"i_str",
")",
"if",
"m",
":",
"# old style text stream_id",
"for",
"retval",
"in",
"keyfunc",
"(",
"stream_id_to_kvlayer_key",
"(",
"m",
".",
"group",
"(",
")",
")",
")",
":",
"yield",
"retval",
"i_str",
"=",
"i_str",
"[",
"m",
".",
"end",
"(",
")",
":",
"]",
"while",
"i_str",
"and",
"(",
"(",
"i_str",
"[",
"0",
"]",
"==",
"','",
")",
"or",
"(",
"i_str",
"[",
"0",
"]",
"==",
"';'",
")",
")",
":",
"i_str",
"=",
"i_str",
"[",
"1",
":",
"]",
"continue",
"if",
"len",
"(",
"i_str",
")",
"==",
"SI_KEY_LENGTH",
":",
"# one key, get it.",
"key",
"=",
"parse_si_key",
"(",
"i_str",
")",
"for",
"retval",
"in",
"keyfunc",
"(",
"key",
")",
":",
"yield",
"retval",
"return",
"keya",
"=",
"i_str",
"[",
":",
"SI_KEY_LENGTH",
"]",
"splitc",
"=",
"i_str",
"[",
"SI_KEY_LENGTH",
"]",
"if",
"splitc",
"==",
"'<'",
":",
"# range",
"keyb",
"=",
"i_str",
"[",
"SI_KEY_LENGTH",
"+",
"1",
":",
"SI_KEY_LENGTH",
"+",
"1",
"+",
"SI_KEY_LENGTH",
"]",
"i_str",
"=",
"i_str",
"[",
"SI_KEY_LENGTH",
"+",
"1",
"+",
"SI_KEY_LENGTH",
":",
"]",
"keya",
"=",
"parse_si_key",
"(",
"keya",
")",
"keyb",
"=",
"parse_si_key",
"(",
"keyb",
")",
"for",
"retval",
"in",
"rangefunc",
"(",
"keya",
",",
"keyb",
")",
":",
"yield",
"retval",
"elif",
"splitc",
"==",
"';'",
":",
"# keya is single key to load",
"keya",
"=",
"parse_si_key",
"(",
"keya",
")",
"for",
"retval",
"in",
"keyfunc",
"(",
"keya",
")",
":",
"yield",
"retval",
"i_str",
"=",
"i_str",
"[",
"SI_KEY_LENGTH",
"+",
"1",
"+",
"1",
":",
"]",
"else",
":",
"logger",
".",
"error",
"(",
"'bogus key splitter %s, %r'",
",",
"splitc",
",",
"i_str",
")",
"return"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | get_kvlayer_stream_item | Retrieve a :class:`streamcorpus.StreamItem` from :mod:`kvlayer`.
This function requires that `client` already be set up properly::
client = kvlayer.client()
client.setup_namespace(STREAM_ITEM_TABLE_DEFS,
STREAM_ITEM_VALUE_DEFS)
si = get_kvlayer_stream_item(client, stream_id)
`stream_id` is in the form of
:data:`streamcorpus.StreamItem.stream_id` and contains the
``epoch_ticks``, a hyphen, and the ``doc_id``.
:param client: kvlayer client object
:type client: :class:`kvlayer.AbstractStorage`
:param str stream_id: stream Id to retrieve
:return: corresponding :class:`streamcorpus.StreamItem`
:raise exceptions.KeyError: if `stream_id` is malformed or does
not correspond to anything in the database | streamcorpus_pipeline/_kvlayer.py | def get_kvlayer_stream_item(client, stream_id):
'''Retrieve a :class:`streamcorpus.StreamItem` from :mod:`kvlayer`.
This function requires that `client` already be set up properly::
client = kvlayer.client()
client.setup_namespace(STREAM_ITEM_TABLE_DEFS,
STREAM_ITEM_VALUE_DEFS)
si = get_kvlayer_stream_item(client, stream_id)
`stream_id` is in the form of
:data:`streamcorpus.StreamItem.stream_id` and contains the
``epoch_ticks``, a hyphen, and the ``doc_id``.
:param client: kvlayer client object
:type client: :class:`kvlayer.AbstractStorage`
:param str stream_id: stream Id to retrieve
:return: corresponding :class:`streamcorpus.StreamItem`
:raise exceptions.KeyError: if `stream_id` is malformed or does
not correspond to anything in the database
'''
if client is None:
client = kvlayer.client()
client.setup_namespace(STREAM_ITEM_TABLE_DEFS,
STREAM_ITEM_VALUE_DEFS)
key = stream_id_to_kvlayer_key(stream_id)
for k, v in client.get(STREAM_ITEMS_TABLE, key):
if v is not None:
errors, bytestr = streamcorpus.decrypt_and_uncompress(v)
return streamcorpus.deserialize(bytestr)
raise KeyError(stream_id) | def get_kvlayer_stream_item(client, stream_id):
'''Retrieve a :class:`streamcorpus.StreamItem` from :mod:`kvlayer`.
This function requires that `client` already be set up properly::
client = kvlayer.client()
client.setup_namespace(STREAM_ITEM_TABLE_DEFS,
STREAM_ITEM_VALUE_DEFS)
si = get_kvlayer_stream_item(client, stream_id)
`stream_id` is in the form of
:data:`streamcorpus.StreamItem.stream_id` and contains the
``epoch_ticks``, a hyphen, and the ``doc_id``.
:param client: kvlayer client object
:type client: :class:`kvlayer.AbstractStorage`
:param str stream_id: stream Id to retrieve
:return: corresponding :class:`streamcorpus.StreamItem`
:raise exceptions.KeyError: if `stream_id` is malformed or does
not correspond to anything in the database
'''
if client is None:
client = kvlayer.client()
client.setup_namespace(STREAM_ITEM_TABLE_DEFS,
STREAM_ITEM_VALUE_DEFS)
key = stream_id_to_kvlayer_key(stream_id)
for k, v in client.get(STREAM_ITEMS_TABLE, key):
if v is not None:
errors, bytestr = streamcorpus.decrypt_and_uncompress(v)
return streamcorpus.deserialize(bytestr)
raise KeyError(stream_id) | [
"Retrieve",
"a",
":",
"class",
":",
"streamcorpus",
".",
"StreamItem",
"from",
":",
"mod",
":",
"kvlayer",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_kvlayer.py#L182-L213 | [
"def",
"get_kvlayer_stream_item",
"(",
"client",
",",
"stream_id",
")",
":",
"if",
"client",
"is",
"None",
":",
"client",
"=",
"kvlayer",
".",
"client",
"(",
")",
"client",
".",
"setup_namespace",
"(",
"STREAM_ITEM_TABLE_DEFS",
",",
"STREAM_ITEM_VALUE_DEFS",
")",
"key",
"=",
"stream_id_to_kvlayer_key",
"(",
"stream_id",
")",
"for",
"k",
",",
"v",
"in",
"client",
".",
"get",
"(",
"STREAM_ITEMS_TABLE",
",",
"key",
")",
":",
"if",
"v",
"is",
"not",
"None",
":",
"errors",
",",
"bytestr",
"=",
"streamcorpus",
".",
"decrypt_and_uncompress",
"(",
"v",
")",
"return",
"streamcorpus",
".",
"deserialize",
"(",
"bytestr",
")",
"raise",
"KeyError",
"(",
"stream_id",
")"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | make_doc_id_range | Construct a tuple(begin, end) of one-tuple kvlayer keys from a
hexdigest doc_id. | streamcorpus_pipeline/_kvlayer.py | def make_doc_id_range(doc_id):
'''Construct a tuple(begin, end) of one-tuple kvlayer keys from a
hexdigest doc_id.
'''
assert len(doc_id) == 32, 'expecting 32 hex string, not: %r' % doc_id
bin_docid = base64.b16decode(doc_id.upper())
doc_id_range = ((bin_docid,), (bin_docid,))
return doc_id_range | def make_doc_id_range(doc_id):
'''Construct a tuple(begin, end) of one-tuple kvlayer keys from a
hexdigest doc_id.
'''
assert len(doc_id) == 32, 'expecting 32 hex string, not: %r' % doc_id
bin_docid = base64.b16decode(doc_id.upper())
doc_id_range = ((bin_docid,), (bin_docid,))
return doc_id_range | [
"Construct",
"a",
"tuple",
"(",
"begin",
"end",
")",
"of",
"one",
"-",
"tuple",
"kvlayer",
"keys",
"from",
"a",
"hexdigest",
"doc_id",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_kvlayer.py#L216-L224 | [
"def",
"make_doc_id_range",
"(",
"doc_id",
")",
":",
"assert",
"len",
"(",
"doc_id",
")",
"==",
"32",
",",
"'expecting 32 hex string, not: %r'",
"%",
"doc_id",
"bin_docid",
"=",
"base64",
".",
"b16decode",
"(",
"doc_id",
".",
"upper",
"(",
")",
")",
"doc_id_range",
"=",
"(",
"(",
"bin_docid",
",",
")",
",",
"(",
"bin_docid",
",",
")",
")",
"return",
"doc_id_range"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | get_kvlayer_stream_item_by_doc_id | Retrieve :class:`streamcorpus.StreamItem`s from :mod:`kvlayer`.
Namely, it returns an iterator over all documents with the given
docid. The docid should be an md5 hash of the document's abs_url.
:param client: kvlayer client object
:type client: :class:`kvlayer.AbstractStorage`
:param str doc_id: doc id of documents to retrieve
:return: generator of :class:`streamcorpus.StreamItem` | streamcorpus_pipeline/_kvlayer.py | def get_kvlayer_stream_item_by_doc_id(client, doc_id):
'''Retrieve :class:`streamcorpus.StreamItem`s from :mod:`kvlayer`.
Namely, it returns an iterator over all documents with the given
docid. The docid should be an md5 hash of the document's abs_url.
:param client: kvlayer client object
:type client: :class:`kvlayer.AbstractStorage`
:param str doc_id: doc id of documents to retrieve
:return: generator of :class:`streamcorpus.StreamItem`
'''
if client is None:
client = kvlayer.client()
client.setup_namespace(STREAM_ITEM_TABLE_DEFS,
STREAM_ITEM_VALUE_DEFS)
doc_id_range = make_doc_id_range(doc_id)
for k, v in client.scan(STREAM_ITEMS_TABLE, doc_id_range):
if v is not None:
errors, bytestr = streamcorpus.decrypt_and_uncompress(v)
yield streamcorpus.deserialize(bytestr) | def get_kvlayer_stream_item_by_doc_id(client, doc_id):
'''Retrieve :class:`streamcorpus.StreamItem`s from :mod:`kvlayer`.
Namely, it returns an iterator over all documents with the given
docid. The docid should be an md5 hash of the document's abs_url.
:param client: kvlayer client object
:type client: :class:`kvlayer.AbstractStorage`
:param str doc_id: doc id of documents to retrieve
:return: generator of :class:`streamcorpus.StreamItem`
'''
if client is None:
client = kvlayer.client()
client.setup_namespace(STREAM_ITEM_TABLE_DEFS,
STREAM_ITEM_VALUE_DEFS)
doc_id_range = make_doc_id_range(doc_id)
for k, v in client.scan(STREAM_ITEMS_TABLE, doc_id_range):
if v is not None:
errors, bytestr = streamcorpus.decrypt_and_uncompress(v)
yield streamcorpus.deserialize(bytestr) | [
"Retrieve",
":",
"class",
":",
"streamcorpus",
".",
"StreamItem",
"s",
"from",
":",
"mod",
":",
"kvlayer",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_kvlayer.py#L227-L246 | [
"def",
"get_kvlayer_stream_item_by_doc_id",
"(",
"client",
",",
"doc_id",
")",
":",
"if",
"client",
"is",
"None",
":",
"client",
"=",
"kvlayer",
".",
"client",
"(",
")",
"client",
".",
"setup_namespace",
"(",
"STREAM_ITEM_TABLE_DEFS",
",",
"STREAM_ITEM_VALUE_DEFS",
")",
"doc_id_range",
"=",
"make_doc_id_range",
"(",
"doc_id",
")",
"for",
"k",
",",
"v",
"in",
"client",
".",
"scan",
"(",
"STREAM_ITEMS_TABLE",
",",
"doc_id_range",
")",
":",
"if",
"v",
"is",
"not",
"None",
":",
"errors",
",",
"bytestr",
"=",
"streamcorpus",
".",
"decrypt_and_uncompress",
"(",
"v",
")",
"yield",
"streamcorpus",
".",
"deserialize",
"(",
"bytestr",
")"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | get_kvlayer_stream_ids_by_doc_id | Retrieve stream ids from :mod:`kvlayer`.
Namely, it returns an iterator over all stream ids with the given
docid. The docid should be an md5 hash of the document's abs_url.
:param client: kvlayer client object
:type client: :class:`kvlayer.AbstractStorage`
:param str doc_id: doc id of documents to retrieve
:return: generator of str | streamcorpus_pipeline/_kvlayer.py | def get_kvlayer_stream_ids_by_doc_id(client, doc_id):
'''Retrieve stream ids from :mod:`kvlayer`.
Namely, it returns an iterator over all stream ids with the given
docid. The docid should be an md5 hash of the document's abs_url.
:param client: kvlayer client object
:type client: :class:`kvlayer.AbstractStorage`
:param str doc_id: doc id of documents to retrieve
:return: generator of str
'''
if client is None:
client = kvlayer.client()
client.setup_namespace(STREAM_ITEM_TABLE_DEFS,
STREAM_ITEM_VALUE_DEFS)
doc_id_range = make_doc_id_range(doc_id)
for k in client.scan_keys(STREAM_ITEMS_TABLE, doc_id_range):
yield kvlayer_key_to_stream_id(k) | def get_kvlayer_stream_ids_by_doc_id(client, doc_id):
'''Retrieve stream ids from :mod:`kvlayer`.
Namely, it returns an iterator over all stream ids with the given
docid. The docid should be an md5 hash of the document's abs_url.
:param client: kvlayer client object
:type client: :class:`kvlayer.AbstractStorage`
:param str doc_id: doc id of documents to retrieve
:return: generator of str
'''
if client is None:
client = kvlayer.client()
client.setup_namespace(STREAM_ITEM_TABLE_DEFS,
STREAM_ITEM_VALUE_DEFS)
doc_id_range = make_doc_id_range(doc_id)
for k in client.scan_keys(STREAM_ITEMS_TABLE, doc_id_range):
yield kvlayer_key_to_stream_id(k) | [
"Retrieve",
"stream",
"ids",
"from",
":",
"mod",
":",
"kvlayer",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_kvlayer.py#L249-L266 | [
"def",
"get_kvlayer_stream_ids_by_doc_id",
"(",
"client",
",",
"doc_id",
")",
":",
"if",
"client",
"is",
"None",
":",
"client",
"=",
"kvlayer",
".",
"client",
"(",
")",
"client",
".",
"setup_namespace",
"(",
"STREAM_ITEM_TABLE_DEFS",
",",
"STREAM_ITEM_VALUE_DEFS",
")",
"doc_id_range",
"=",
"make_doc_id_range",
"(",
"doc_id",
")",
"for",
"k",
"in",
"client",
".",
"scan_keys",
"(",
"STREAM_ITEMS_TABLE",
",",
"doc_id_range",
")",
":",
"yield",
"kvlayer_key_to_stream_id",
"(",
"k",
")"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | serialize_si_key | Return packed bytes representation of StreamItem kvlayer key.
The result is 20 bytes, 16 of md5 hash, 4 of int timestamp. | streamcorpus_pipeline/_kvlayer.py | def serialize_si_key(si_key):
'''
Return packed bytes representation of StreamItem kvlayer key.
The result is 20 bytes, 16 of md5 hash, 4 of int timestamp.
'''
if len(si_key[0]) != 16:
raise ValueError('bad StreamItem key, expected 16 byte '
'md5 hash binary digest, got: {0!r}'.format(si_key))
return struct.pack('>16si', si_key[0], si_key[1]) | def serialize_si_key(si_key):
'''
Return packed bytes representation of StreamItem kvlayer key.
The result is 20 bytes, 16 of md5 hash, 4 of int timestamp.
'''
if len(si_key[0]) != 16:
raise ValueError('bad StreamItem key, expected 16 byte '
'md5 hash binary digest, got: {0!r}'.format(si_key))
return struct.pack('>16si', si_key[0], si_key[1]) | [
"Return",
"packed",
"bytes",
"representation",
"of",
"StreamItem",
"kvlayer",
"key",
".",
"The",
"result",
"is",
"20",
"bytes",
"16",
"of",
"md5",
"hash",
"4",
"of",
"int",
"timestamp",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_kvlayer.py#L392-L400 | [
"def",
"serialize_si_key",
"(",
"si_key",
")",
":",
"if",
"len",
"(",
"si_key",
"[",
"0",
"]",
")",
"!=",
"16",
":",
"raise",
"ValueError",
"(",
"'bad StreamItem key, expected 16 byte '",
"'md5 hash binary digest, got: {0!r}'",
".",
"format",
"(",
"si_key",
")",
")",
"return",
"struct",
".",
"pack",
"(",
"'>16si'",
",",
"si_key",
"[",
"0",
"]",
",",
"si_key",
"[",
"1",
"]",
")"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | streamitem_to_key_data | extract the parts of a StreamItem that go into a kvlayer key,
convert StreamItem to blob for storage.
return (kvlayer key tuple), data blob | streamcorpus_pipeline/_kvlayer.py | def streamitem_to_key_data(si):
'''
extract the parts of a StreamItem that go into a kvlayer key,
convert StreamItem to blob for storage.
return (kvlayer key tuple), data blob
'''
key = key_for_stream_item(si)
data = streamcorpus.serialize(si)
errors, data = streamcorpus.compress_and_encrypt(data)
assert not errors, errors
return key, data | def streamitem_to_key_data(si):
'''
extract the parts of a StreamItem that go into a kvlayer key,
convert StreamItem to blob for storage.
return (kvlayer key tuple), data blob
'''
key = key_for_stream_item(si)
data = streamcorpus.serialize(si)
errors, data = streamcorpus.compress_and_encrypt(data)
assert not errors, errors
return key, data | [
"extract",
"the",
"parts",
"of",
"a",
"StreamItem",
"that",
"go",
"into",
"a",
"kvlayer",
"key",
"convert",
"StreamItem",
"to",
"blob",
"for",
"storage",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_kvlayer.py#L411-L422 | [
"def",
"streamitem_to_key_data",
"(",
"si",
")",
":",
"key",
"=",
"key_for_stream_item",
"(",
"si",
")",
"data",
"=",
"streamcorpus",
".",
"serialize",
"(",
"si",
")",
"errors",
",",
"data",
"=",
"streamcorpus",
".",
"compress_and_encrypt",
"(",
"data",
")",
"assert",
"not",
"errors",
",",
"errors",
"return",
"key",
",",
"data"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | working_directory | Change working directory and restore the previous on exit | wimpy/util.py | def working_directory(path):
"""Change working directory and restore the previous on exit"""
prev_dir = os.getcwd()
os.chdir(str(path))
try:
yield
finally:
os.chdir(prev_dir) | def working_directory(path):
"""Change working directory and restore the previous on exit"""
prev_dir = os.getcwd()
os.chdir(str(path))
try:
yield
finally:
os.chdir(prev_dir) | [
"Change",
"working",
"directory",
"and",
"restore",
"the",
"previous",
"on",
"exit"
] | wimglenn/wimpy | python | https://github.com/wimglenn/wimpy/blob/4e8ebe4e7052d88c9f88ac7dcaa1b587cc2cf86e/wimpy/util.py#L41-L48 | [
"def",
"working_directory",
"(",
"path",
")",
":",
"prev_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"os",
".",
"chdir",
"(",
"str",
"(",
"path",
")",
")",
"try",
":",
"yield",
"finally",
":",
"os",
".",
"chdir",
"(",
"prev_dir",
")"
] | 4e8ebe4e7052d88c9f88ac7dcaa1b587cc2cf86e |
test | strip_prefix | Removes the prefix, if it's there, otherwise returns input string unchanged.
If strict is True, also ensures the prefix was present | wimpy/util.py | def strip_prefix(s, prefix, strict=False):
"""Removes the prefix, if it's there, otherwise returns input string unchanged.
If strict is True, also ensures the prefix was present"""
if s.startswith(prefix):
return s[len(prefix) :]
elif strict:
raise WimpyError("string doesn't start with prefix")
return s | def strip_prefix(s, prefix, strict=False):
"""Removes the prefix, if it's there, otherwise returns input string unchanged.
If strict is True, also ensures the prefix was present"""
if s.startswith(prefix):
return s[len(prefix) :]
elif strict:
raise WimpyError("string doesn't start with prefix")
return s | [
"Removes",
"the",
"prefix",
"if",
"it",
"s",
"there",
"otherwise",
"returns",
"input",
"string",
"unchanged",
".",
"If",
"strict",
"is",
"True",
"also",
"ensures",
"the",
"prefix",
"was",
"present"
] | wimglenn/wimpy | python | https://github.com/wimglenn/wimpy/blob/4e8ebe4e7052d88c9f88ac7dcaa1b587cc2cf86e/wimpy/util.py#L51-L58 | [
"def",
"strip_prefix",
"(",
"s",
",",
"prefix",
",",
"strict",
"=",
"False",
")",
":",
"if",
"s",
".",
"startswith",
"(",
"prefix",
")",
":",
"return",
"s",
"[",
"len",
"(",
"prefix",
")",
":",
"]",
"elif",
"strict",
":",
"raise",
"WimpyError",
"(",
"\"string doesn't start with prefix\"",
")",
"return",
"s"
] | 4e8ebe4e7052d88c9f88ac7dcaa1b587cc2cf86e |
test | strip_suffix | Removes the suffix, if it's there, otherwise returns input string unchanged.
If strict is True, also ensures the suffix was present | wimpy/util.py | def strip_suffix(s, suffix, strict=False):
"""Removes the suffix, if it's there, otherwise returns input string unchanged.
If strict is True, also ensures the suffix was present"""
if s.endswith(suffix):
return s[: len(s) - len(suffix)]
elif strict:
raise WimpyError("string doesn't end with suffix")
return s | def strip_suffix(s, suffix, strict=False):
"""Removes the suffix, if it's there, otherwise returns input string unchanged.
If strict is True, also ensures the suffix was present"""
if s.endswith(suffix):
return s[: len(s) - len(suffix)]
elif strict:
raise WimpyError("string doesn't end with suffix")
return s | [
"Removes",
"the",
"suffix",
"if",
"it",
"s",
"there",
"otherwise",
"returns",
"input",
"string",
"unchanged",
".",
"If",
"strict",
"is",
"True",
"also",
"ensures",
"the",
"suffix",
"was",
"present"
] | wimglenn/wimpy | python | https://github.com/wimglenn/wimpy/blob/4e8ebe4e7052d88c9f88ac7dcaa1b587cc2cf86e/wimpy/util.py#L61-L68 | [
"def",
"strip_suffix",
"(",
"s",
",",
"suffix",
",",
"strict",
"=",
"False",
")",
":",
"if",
"s",
".",
"endswith",
"(",
"suffix",
")",
":",
"return",
"s",
"[",
":",
"len",
"(",
"s",
")",
"-",
"len",
"(",
"suffix",
")",
"]",
"elif",
"strict",
":",
"raise",
"WimpyError",
"(",
"\"string doesn't end with suffix\"",
")",
"return",
"s"
] | 4e8ebe4e7052d88c9f88ac7dcaa1b587cc2cf86e |
test | is_subsequence | Are all the elements of needle contained in haystack, and in the same order?
There may be other elements interspersed throughout | wimpy/util.py | def is_subsequence(needle, haystack):
"""Are all the elements of needle contained in haystack, and in the same order?
There may be other elements interspersed throughout"""
it = iter(haystack)
for element in needle:
if element not in it:
return False
return True | def is_subsequence(needle, haystack):
"""Are all the elements of needle contained in haystack, and in the same order?
There may be other elements interspersed throughout"""
it = iter(haystack)
for element in needle:
if element not in it:
return False
return True | [
"Are",
"all",
"the",
"elements",
"of",
"needle",
"contained",
"in",
"haystack",
"and",
"in",
"the",
"same",
"order?",
"There",
"may",
"be",
"other",
"elements",
"interspersed",
"throughout"
] | wimglenn/wimpy | python | https://github.com/wimglenn/wimpy/blob/4e8ebe4e7052d88c9f88ac7dcaa1b587cc2cf86e/wimpy/util.py#L110-L117 | [
"def",
"is_subsequence",
"(",
"needle",
",",
"haystack",
")",
":",
"it",
"=",
"iter",
"(",
"haystack",
")",
"for",
"element",
"in",
"needle",
":",
"if",
"element",
"not",
"in",
"it",
":",
"return",
"False",
"return",
"True"
] | 4e8ebe4e7052d88c9f88ac7dcaa1b587cc2cf86e |
test | cube | Return an Ice application with a default home page.
Create :class:`Ice` object, add a route to return the default page
when a client requests the server root, i.e. /, using HTTP GET
method, add an error handler to return HTTP error pages when an
error occurs and return this object. The returned object can be used
as a WSGI application.
Returns:
Ice: WSGI application. | ice.py | def cube():
"""Return an Ice application with a default home page.
Create :class:`Ice` object, add a route to return the default page
when a client requests the server root, i.e. /, using HTTP GET
method, add an error handler to return HTTP error pages when an
error occurs and return this object. The returned object can be used
as a WSGI application.
Returns:
Ice: WSGI application.
"""
app = Ice()
@app.get('/')
def default_home_page():
"""Return a default home page."""
return simple_html('It works!',
'<h1>It works!</h1>\n'
'<p>This is the default ice web page.</p>')
@app.error()
def generic_error_page():
"""Return a simple and generic error page."""
return simple_html(app.response.status_line,
'<h1>{title}</h1>\n'
'<p>{description}</p>\n'
'<hr>\n'
'<address>Ice/{version}</address>'.format(
title=app.response.status_line,
description=app.response.status_detail,
version=__version__))
def simple_html(title, body):
"""Return a simple HTML page."""
return (
'<!DOCTYPE html>\n'
'<html>\n<head><title>{title}</title></head>\n'
'<body>\n{body}\n</body>\n</html>\n'
).format(title=title, body=body)
return app | def cube():
"""Return an Ice application with a default home page.
Create :class:`Ice` object, add a route to return the default page
when a client requests the server root, i.e. /, using HTTP GET
method, add an error handler to return HTTP error pages when an
error occurs and return this object. The returned object can be used
as a WSGI application.
Returns:
Ice: WSGI application.
"""
app = Ice()
@app.get('/')
def default_home_page():
"""Return a default home page."""
return simple_html('It works!',
'<h1>It works!</h1>\n'
'<p>This is the default ice web page.</p>')
@app.error()
def generic_error_page():
"""Return a simple and generic error page."""
return simple_html(app.response.status_line,
'<h1>{title}</h1>\n'
'<p>{description}</p>\n'
'<hr>\n'
'<address>Ice/{version}</address>'.format(
title=app.response.status_line,
description=app.response.status_detail,
version=__version__))
def simple_html(title, body):
"""Return a simple HTML page."""
return (
'<!DOCTYPE html>\n'
'<html>\n<head><title>{title}</title></head>\n'
'<body>\n{body}\n</body>\n</html>\n'
).format(title=title, body=body)
return app | [
"Return",
"an",
"Ice",
"application",
"with",
"a",
"default",
"home",
"page",
"."
] | susam/ice | python | https://github.com/susam/ice/blob/532e685c504ea96f9e42833594585159ac1d2068/ice.py#L50-L91 | [
"def",
"cube",
"(",
")",
":",
"app",
"=",
"Ice",
"(",
")",
"@",
"app",
".",
"get",
"(",
"'/'",
")",
"def",
"default_home_page",
"(",
")",
":",
"\"\"\"Return a default home page.\"\"\"",
"return",
"simple_html",
"(",
"'It works!'",
",",
"'<h1>It works!</h1>\\n'",
"'<p>This is the default ice web page.</p>'",
")",
"@",
"app",
".",
"error",
"(",
")",
"def",
"generic_error_page",
"(",
")",
":",
"\"\"\"Return a simple and generic error page.\"\"\"",
"return",
"simple_html",
"(",
"app",
".",
"response",
".",
"status_line",
",",
"'<h1>{title}</h1>\\n'",
"'<p>{description}</p>\\n'",
"'<hr>\\n'",
"'<address>Ice/{version}</address>'",
".",
"format",
"(",
"title",
"=",
"app",
".",
"response",
".",
"status_line",
",",
"description",
"=",
"app",
".",
"response",
".",
"status_detail",
",",
"version",
"=",
"__version__",
")",
")",
"def",
"simple_html",
"(",
"title",
",",
"body",
")",
":",
"\"\"\"Return a simple HTML page.\"\"\"",
"return",
"(",
"'<!DOCTYPE html>\\n'",
"'<html>\\n<head><title>{title}</title></head>\\n'",
"'<body>\\n{body}\\n</body>\\n</html>\\n'",
")",
".",
"format",
"(",
"title",
"=",
"title",
",",
"body",
"=",
"body",
")",
"return",
"app"
] | 532e685c504ea96f9e42833594585159ac1d2068 |
test | Ice.run | Run the application using a simple WSGI server.
Arguments:
host (str, optional): Host on which to listen.
port (int, optional): Port number on which to listen. | ice.py | def run(self, host='127.0.0.1', port=8080):
"""Run the application using a simple WSGI server.
Arguments:
host (str, optional): Host on which to listen.
port (int, optional): Port number on which to listen.
"""
from wsgiref import simple_server
self._server = simple_server.make_server(host, port, self)
self._server.serve_forever() | def run(self, host='127.0.0.1', port=8080):
"""Run the application using a simple WSGI server.
Arguments:
host (str, optional): Host on which to listen.
port (int, optional): Port number on which to listen.
"""
from wsgiref import simple_server
self._server = simple_server.make_server(host, port, self)
self._server.serve_forever() | [
"Run",
"the",
"application",
"using",
"a",
"simple",
"WSGI",
"server",
"."
] | susam/ice | python | https://github.com/susam/ice/blob/532e685c504ea96f9e42833594585159ac1d2068/ice.py#L108-L117 | [
"def",
"run",
"(",
"self",
",",
"host",
"=",
"'127.0.0.1'",
",",
"port",
"=",
"8080",
")",
":",
"from",
"wsgiref",
"import",
"simple_server",
"self",
".",
"_server",
"=",
"simple_server",
".",
"make_server",
"(",
"host",
",",
"port",
",",
"self",
")",
"self",
".",
"_server",
".",
"serve_forever",
"(",
")"
] | 532e685c504ea96f9e42833594585159ac1d2068 |
test | Ice.exit | Stop the simple WSGI server running the appliation. | ice.py | def exit(self):
"""Stop the simple WSGI server running the appliation."""
if self._server is not None:
self._server.shutdown()
self._server.server_close()
self._server = None | def exit(self):
"""Stop the simple WSGI server running the appliation."""
if self._server is not None:
self._server.shutdown()
self._server.server_close()
self._server = None | [
"Stop",
"the",
"simple",
"WSGI",
"server",
"running",
"the",
"appliation",
"."
] | susam/ice | python | https://github.com/susam/ice/blob/532e685c504ea96f9e42833594585159ac1d2068/ice.py#L119-L124 | [
"def",
"exit",
"(",
"self",
")",
":",
"if",
"self",
".",
"_server",
"is",
"not",
"None",
":",
"self",
".",
"_server",
".",
"shutdown",
"(",
")",
"self",
".",
"_server",
".",
"server_close",
"(",
")",
"self",
".",
"_server",
"=",
"None"
] | 532e685c504ea96f9e42833594585159ac1d2068 |
test | Ice.route | Decorator to add route for a request with any HTTP method.
Arguments:
method (str): HTTP method name, e.g. GET, POST, etc.
pattern (str): Routing pattern the path must match.
Returns:
function: Decorator function to add route. | ice.py | def route(self, method, pattern):
"""Decorator to add route for a request with any HTTP method.
Arguments:
method (str): HTTP method name, e.g. GET, POST, etc.
pattern (str): Routing pattern the path must match.
Returns:
function: Decorator function to add route.
"""
def decorator(callback):
self._router.add(method, pattern, callback)
return callback
return decorator | def route(self, method, pattern):
"""Decorator to add route for a request with any HTTP method.
Arguments:
method (str): HTTP method name, e.g. GET, POST, etc.
pattern (str): Routing pattern the path must match.
Returns:
function: Decorator function to add route.
"""
def decorator(callback):
self._router.add(method, pattern, callback)
return callback
return decorator | [
"Decorator",
"to",
"add",
"route",
"for",
"a",
"request",
"with",
"any",
"HTTP",
"method",
"."
] | susam/ice | python | https://github.com/susam/ice/blob/532e685c504ea96f9e42833594585159ac1d2068/ice.py#L157-L170 | [
"def",
"route",
"(",
"self",
",",
"method",
",",
"pattern",
")",
":",
"def",
"decorator",
"(",
"callback",
")",
":",
"self",
".",
"_router",
".",
"add",
"(",
"method",
",",
"pattern",
",",
"callback",
")",
"return",
"callback",
"return",
"decorator"
] | 532e685c504ea96f9e42833594585159ac1d2068 |
test | Ice.error | Decorator to add a callback that generates error page.
The *status* parameter specifies the HTTP response status code
for which the decorated callback should be invoked. If the
*status* argument is not specified, then the decorated callable
is considered to be a fallback callback.
A fallback callback, when defined, is invoked to generate the
error page for any HTTP response representing an error when
there is no error handler defined explicitly for the response
code of the HTTP response.
Arguments:
status(int, optional): HTTP response status code.
Returns:
function: Decorator function to add error handler. | ice.py | def error(self, status=None):
"""Decorator to add a callback that generates error page.
The *status* parameter specifies the HTTP response status code
for which the decorated callback should be invoked. If the
*status* argument is not specified, then the decorated callable
is considered to be a fallback callback.
A fallback callback, when defined, is invoked to generate the
error page for any HTTP response representing an error when
there is no error handler defined explicitly for the response
code of the HTTP response.
Arguments:
status(int, optional): HTTP response status code.
Returns:
function: Decorator function to add error handler.
"""
def decorator(callback):
self._error_handlers[status] = callback
return callback
return decorator | def error(self, status=None):
"""Decorator to add a callback that generates error page.
The *status* parameter specifies the HTTP response status code
for which the decorated callback should be invoked. If the
*status* argument is not specified, then the decorated callable
is considered to be a fallback callback.
A fallback callback, when defined, is invoked to generate the
error page for any HTTP response representing an error when
there is no error handler defined explicitly for the response
code of the HTTP response.
Arguments:
status(int, optional): HTTP response status code.
Returns:
function: Decorator function to add error handler.
"""
def decorator(callback):
self._error_handlers[status] = callback
return callback
return decorator | [
"Decorator",
"to",
"add",
"a",
"callback",
"that",
"generates",
"error",
"page",
"."
] | susam/ice | python | https://github.com/susam/ice/blob/532e685c504ea96f9e42833594585159ac1d2068/ice.py#L172-L194 | [
"def",
"error",
"(",
"self",
",",
"status",
"=",
"None",
")",
":",
"def",
"decorator",
"(",
"callback",
")",
":",
"self",
".",
"_error_handlers",
"[",
"status",
"]",
"=",
"callback",
"return",
"callback",
"return",
"decorator"
] | 532e685c504ea96f9e42833594585159ac1d2068 |
test | Ice.static | Send content of a static file as response.
The path to the document root directory should be specified as
the root argument. This is very important to prevent directory
traversal attack. This method guarantees that only files within
the document root directory are served and no files outside this
directory can be accessed by a client.
The path to the actual file to be returned should be specified
as the path argument. This path must be relative to the document
directory.
The *media_type* and *charset* arguments are used to set the
Content-Type header of the HTTP response. If *media_type*
is not specified or specified as ``None`` (the default), then it
is guessed from the filename of the file to be returned.
Arguments:
root (str): Path to document root directory.
path (str): Path to file relative to document root directory.
media_type (str, optional): Media type of file.
charset (str, optional): Character set of file.
Returns:
bytes: Content of file to be returned in the HTTP response. | ice.py | def static(self, root, path, media_type=None, charset='UTF-8'):
"""Send content of a static file as response.
The path to the document root directory should be specified as
the root argument. This is very important to prevent directory
traversal attack. This method guarantees that only files within
the document root directory are served and no files outside this
directory can be accessed by a client.
The path to the actual file to be returned should be specified
as the path argument. This path must be relative to the document
directory.
The *media_type* and *charset* arguments are used to set the
Content-Type header of the HTTP response. If *media_type*
is not specified or specified as ``None`` (the default), then it
is guessed from the filename of the file to be returned.
Arguments:
root (str): Path to document root directory.
path (str): Path to file relative to document root directory.
media_type (str, optional): Media type of file.
charset (str, optional): Character set of file.
Returns:
bytes: Content of file to be returned in the HTTP response.
"""
root = os.path.abspath(os.path.join(root, ''))
path = os.path.abspath(os.path.join(root, path.lstrip('/\\')))
# Save the filename from the path in the response state, so that
# a following download() call can default to this filename for
# downloadable file when filename is not explicitly specified.
self.response.state['filename'] = os.path.basename(path)
if not path.startswith(root):
return 403
elif not os.path.isfile(path):
return 404
if media_type is not None:
self.response.media_type = media_type
else:
self.response.media_type = mimetypes.guess_type(path)[0]
self.response.charset = charset
with open(path, 'rb') as f:
return f.read() | def static(self, root, path, media_type=None, charset='UTF-8'):
"""Send content of a static file as response.
The path to the document root directory should be specified as
the root argument. This is very important to prevent directory
traversal attack. This method guarantees that only files within
the document root directory are served and no files outside this
directory can be accessed by a client.
The path to the actual file to be returned should be specified
as the path argument. This path must be relative to the document
directory.
The *media_type* and *charset* arguments are used to set the
Content-Type header of the HTTP response. If *media_type*
is not specified or specified as ``None`` (the default), then it
is guessed from the filename of the file to be returned.
Arguments:
root (str): Path to document root directory.
path (str): Path to file relative to document root directory.
media_type (str, optional): Media type of file.
charset (str, optional): Character set of file.
Returns:
bytes: Content of file to be returned in the HTTP response.
"""
root = os.path.abspath(os.path.join(root, ''))
path = os.path.abspath(os.path.join(root, path.lstrip('/\\')))
# Save the filename from the path in the response state, so that
# a following download() call can default to this filename for
# downloadable file when filename is not explicitly specified.
self.response.state['filename'] = os.path.basename(path)
if not path.startswith(root):
return 403
elif not os.path.isfile(path):
return 404
if media_type is not None:
self.response.media_type = media_type
else:
self.response.media_type = mimetypes.guess_type(path)[0]
self.response.charset = charset
with open(path, 'rb') as f:
return f.read() | [
"Send",
"content",
"of",
"a",
"static",
"file",
"as",
"response",
"."
] | susam/ice | python | https://github.com/susam/ice/blob/532e685c504ea96f9e42833594585159ac1d2068/ice.py#L196-L243 | [
"def",
"static",
"(",
"self",
",",
"root",
",",
"path",
",",
"media_type",
"=",
"None",
",",
"charset",
"=",
"'UTF-8'",
")",
":",
"root",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"''",
")",
")",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"path",
".",
"lstrip",
"(",
"'/\\\\'",
")",
")",
")",
"# Save the filename from the path in the response state, so that",
"# a following download() call can default to this filename for",
"# downloadable file when filename is not explicitly specified.",
"self",
".",
"response",
".",
"state",
"[",
"'filename'",
"]",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
"if",
"not",
"path",
".",
"startswith",
"(",
"root",
")",
":",
"return",
"403",
"elif",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"return",
"404",
"if",
"media_type",
"is",
"not",
"None",
":",
"self",
".",
"response",
".",
"media_type",
"=",
"media_type",
"else",
":",
"self",
".",
"response",
".",
"media_type",
"=",
"mimetypes",
".",
"guess_type",
"(",
"path",
")",
"[",
"0",
"]",
"self",
".",
"response",
".",
"charset",
"=",
"charset",
"with",
"open",
"(",
"path",
",",
"'rb'",
")",
"as",
"f",
":",
"return",
"f",
".",
"read",
"(",
")"
] | 532e685c504ea96f9e42833594585159ac1d2068 |
test | Ice.download | Send content as attachment (downloadable file).
The *content* is sent after setting Content-Disposition header
such that the client prompts the user to save the content
locally as a file. An HTTP response status code may be specified
as *content*. If the status code is not ``200``, then this
method does nothing and returns the status code.
The filename used for the download is determined according to
the following rules. The rules are followed in the specified
order.
1. If *filename* is specified, then the base name from this
argument, i.e. ``os.path.basename(filename)``, is used as the
filename for the download.
2. If *filename* is not specified or specified as ``None``
(the default), then the base name from the file path
specified to a previous :meth:`static` call made while
handling the current request is used.
3. If *filename* is not specified and there was no
:meth:`static` call made previously for the current
request, then the base name from the current HTTP request
path is used.
4. As a result of the above steps, if the resultant *filename*
turns out to be empty, then :exc:`ice.LogicError` is raised.
The *media_type* and *charset* arguments are used in the same
manner as they are used in :meth:`static`.
Arguments:
content (str, bytes or int): Content to be sent as download or
HTTP status code of the response to be returned.
filename (str): Filename to use for saving the content
media_type (str, optional): Media type of file.
charset (str, optional): Character set of file.
Returns:
content, i.e. the first argument passed to this method.
Raises:
LogicError: When filename cannot be determined. | ice.py | def download(self, content, filename=None,
media_type=None, charset='UTF-8'):
"""Send content as attachment (downloadable file).
The *content* is sent after setting Content-Disposition header
such that the client prompts the user to save the content
locally as a file. An HTTP response status code may be specified
as *content*. If the status code is not ``200``, then this
method does nothing and returns the status code.
The filename used for the download is determined according to
the following rules. The rules are followed in the specified
order.
1. If *filename* is specified, then the base name from this
argument, i.e. ``os.path.basename(filename)``, is used as the
filename for the download.
2. If *filename* is not specified or specified as ``None``
(the default), then the base name from the file path
specified to a previous :meth:`static` call made while
handling the current request is used.
3. If *filename* is not specified and there was no
:meth:`static` call made previously for the current
request, then the base name from the current HTTP request
path is used.
4. As a result of the above steps, if the resultant *filename*
turns out to be empty, then :exc:`ice.LogicError` is raised.
The *media_type* and *charset* arguments are used in the same
manner as they are used in :meth:`static`.
Arguments:
content (str, bytes or int): Content to be sent as download or
HTTP status code of the response to be returned.
filename (str): Filename to use for saving the content
media_type (str, optional): Media type of file.
charset (str, optional): Character set of file.
Returns:
content, i.e. the first argument passed to this method.
Raises:
LogicError: When filename cannot be determined.
"""
if isinstance(content, int) and content != 200:
return content
if filename is not None:
filename = os.path.basename(filename)
elif 'filename' in self.response.state:
filename = self.response.state['filename']
else:
filename = os.path.basename(self.request.path)
if filename == '':
raise LogicError('Cannot determine filename for download')
if media_type is not None:
self.response.media_type = media_type
else:
self.response.media_type = mimetypes.guess_type(filename)[0]
self.response.charset = charset
self.response.add_header('Content-Disposition', 'attachment; '
'filename="{}"'.format(filename))
return content | def download(self, content, filename=None,
media_type=None, charset='UTF-8'):
"""Send content as attachment (downloadable file).
The *content* is sent after setting Content-Disposition header
such that the client prompts the user to save the content
locally as a file. An HTTP response status code may be specified
as *content*. If the status code is not ``200``, then this
method does nothing and returns the status code.
The filename used for the download is determined according to
the following rules. The rules are followed in the specified
order.
1. If *filename* is specified, then the base name from this
argument, i.e. ``os.path.basename(filename)``, is used as the
filename for the download.
2. If *filename* is not specified or specified as ``None``
(the default), then the base name from the file path
specified to a previous :meth:`static` call made while
handling the current request is used.
3. If *filename* is not specified and there was no
:meth:`static` call made previously for the current
request, then the base name from the current HTTP request
path is used.
4. As a result of the above steps, if the resultant *filename*
turns out to be empty, then :exc:`ice.LogicError` is raised.
The *media_type* and *charset* arguments are used in the same
manner as they are used in :meth:`static`.
Arguments:
content (str, bytes or int): Content to be sent as download or
HTTP status code of the response to be returned.
filename (str): Filename to use for saving the content
media_type (str, optional): Media type of file.
charset (str, optional): Character set of file.
Returns:
content, i.e. the first argument passed to this method.
Raises:
LogicError: When filename cannot be determined.
"""
if isinstance(content, int) and content != 200:
return content
if filename is not None:
filename = os.path.basename(filename)
elif 'filename' in self.response.state:
filename = self.response.state['filename']
else:
filename = os.path.basename(self.request.path)
if filename == '':
raise LogicError('Cannot determine filename for download')
if media_type is not None:
self.response.media_type = media_type
else:
self.response.media_type = mimetypes.guess_type(filename)[0]
self.response.charset = charset
self.response.add_header('Content-Disposition', 'attachment; '
'filename="{}"'.format(filename))
return content | [
"Send",
"content",
"as",
"attachment",
"(",
"downloadable",
"file",
")",
"."
] | susam/ice | python | https://github.com/susam/ice/blob/532e685c504ea96f9e42833594585159ac1d2068/ice.py#L245-L308 | [
"def",
"download",
"(",
"self",
",",
"content",
",",
"filename",
"=",
"None",
",",
"media_type",
"=",
"None",
",",
"charset",
"=",
"'UTF-8'",
")",
":",
"if",
"isinstance",
"(",
"content",
",",
"int",
")",
"and",
"content",
"!=",
"200",
":",
"return",
"content",
"if",
"filename",
"is",
"not",
"None",
":",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"filename",
")",
"elif",
"'filename'",
"in",
"self",
".",
"response",
".",
"state",
":",
"filename",
"=",
"self",
".",
"response",
".",
"state",
"[",
"'filename'",
"]",
"else",
":",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"self",
".",
"request",
".",
"path",
")",
"if",
"filename",
"==",
"''",
":",
"raise",
"LogicError",
"(",
"'Cannot determine filename for download'",
")",
"if",
"media_type",
"is",
"not",
"None",
":",
"self",
".",
"response",
".",
"media_type",
"=",
"media_type",
"else",
":",
"self",
".",
"response",
".",
"media_type",
"=",
"mimetypes",
".",
"guess_type",
"(",
"filename",
")",
"[",
"0",
"]",
"self",
".",
"response",
".",
"charset",
"=",
"charset",
"self",
".",
"response",
".",
"add_header",
"(",
"'Content-Disposition'",
",",
"'attachment; '",
"'filename=\"{}\"'",
".",
"format",
"(",
"filename",
")",
")",
"return",
"content"
] | 532e685c504ea96f9e42833594585159ac1d2068 |
test | Ice._get_error_page_callback | Return an error page for the current response status. | ice.py | def _get_error_page_callback(self):
"""Return an error page for the current response status."""
if self.response.status in self._error_handlers:
return self._error_handlers[self.response.status]
elif None in self._error_handlers:
return self._error_handlers[None]
else:
# Rudimentary error handler if no error handler was found
self.response.media_type = 'text/plain'
return lambda: self.response.status_line | def _get_error_page_callback(self):
"""Return an error page for the current response status."""
if self.response.status in self._error_handlers:
return self._error_handlers[self.response.status]
elif None in self._error_handlers:
return self._error_handlers[None]
else:
# Rudimentary error handler if no error handler was found
self.response.media_type = 'text/plain'
return lambda: self.response.status_line | [
"Return",
"an",
"error",
"page",
"for",
"the",
"current",
"response",
"status",
"."
] | susam/ice | python | https://github.com/susam/ice/blob/532e685c504ea96f9e42833594585159ac1d2068/ice.py#L359-L368 | [
"def",
"_get_error_page_callback",
"(",
"self",
")",
":",
"if",
"self",
".",
"response",
".",
"status",
"in",
"self",
".",
"_error_handlers",
":",
"return",
"self",
".",
"_error_handlers",
"[",
"self",
".",
"response",
".",
"status",
"]",
"elif",
"None",
"in",
"self",
".",
"_error_handlers",
":",
"return",
"self",
".",
"_error_handlers",
"[",
"None",
"]",
"else",
":",
"# Rudimentary error handler if no error handler was found",
"self",
".",
"response",
".",
"media_type",
"=",
"'text/plain'",
"return",
"lambda",
":",
"self",
".",
"response",
".",
"status_line"
] | 532e685c504ea96f9e42833594585159ac1d2068 |
test | Router.add | Add a route.
Arguments:
method (str): HTTP method, e.g. GET, POST, etc.
pattern (str): Pattern that request paths must match.
callback (str): Route handler that is invoked when a request
path matches the *pattern*. | ice.py | def add(self, method, pattern, callback):
"""Add a route.
Arguments:
method (str): HTTP method, e.g. GET, POST, etc.
pattern (str): Pattern that request paths must match.
callback (str): Route handler that is invoked when a request
path matches the *pattern*.
"""
pat_type, pat = self._normalize_pattern(pattern)
if pat_type == 'literal':
self._literal[method][pat] = callback
elif pat_type == 'wildcard':
self._wildcard[method].append(WildcardRoute(pat, callback))
else:
self._regex[method].append(RegexRoute(pat, callback)) | def add(self, method, pattern, callback):
"""Add a route.
Arguments:
method (str): HTTP method, e.g. GET, POST, etc.
pattern (str): Pattern that request paths must match.
callback (str): Route handler that is invoked when a request
path matches the *pattern*.
"""
pat_type, pat = self._normalize_pattern(pattern)
if pat_type == 'literal':
self._literal[method][pat] = callback
elif pat_type == 'wildcard':
self._wildcard[method].append(WildcardRoute(pat, callback))
else:
self._regex[method].append(RegexRoute(pat, callback)) | [
"Add",
"a",
"route",
"."
] | susam/ice | python | https://github.com/susam/ice/blob/532e685c504ea96f9e42833594585159ac1d2068/ice.py#L381-L396 | [
"def",
"add",
"(",
"self",
",",
"method",
",",
"pattern",
",",
"callback",
")",
":",
"pat_type",
",",
"pat",
"=",
"self",
".",
"_normalize_pattern",
"(",
"pattern",
")",
"if",
"pat_type",
"==",
"'literal'",
":",
"self",
".",
"_literal",
"[",
"method",
"]",
"[",
"pat",
"]",
"=",
"callback",
"elif",
"pat_type",
"==",
"'wildcard'",
":",
"self",
".",
"_wildcard",
"[",
"method",
"]",
".",
"append",
"(",
"WildcardRoute",
"(",
"pat",
",",
"callback",
")",
")",
"else",
":",
"self",
".",
"_regex",
"[",
"method",
"]",
".",
"append",
"(",
"RegexRoute",
"(",
"pat",
",",
"callback",
")",
")"
] | 532e685c504ea96f9e42833594585159ac1d2068 |
test | Router.contains_method | Check if there is at least one handler for *method*.
Arguments:
method (str): HTTP method name, e.g. GET, POST, etc.
Returns:
``True`` if there is at least one route defined for *method*,
``False`` otherwise | ice.py | def contains_method(self, method):
"""Check if there is at least one handler for *method*.
Arguments:
method (str): HTTP method name, e.g. GET, POST, etc.
Returns:
``True`` if there is at least one route defined for *method*,
``False`` otherwise
"""
return method in itertools.chain(self._literal, self._wildcard,
self._regex) | def contains_method(self, method):
"""Check if there is at least one handler for *method*.
Arguments:
method (str): HTTP method name, e.g. GET, POST, etc.
Returns:
``True`` if there is at least one route defined for *method*,
``False`` otherwise
"""
return method in itertools.chain(self._literal, self._wildcard,
self._regex) | [
"Check",
"if",
"there",
"is",
"at",
"least",
"one",
"handler",
"for",
"*",
"method",
"*",
"."
] | susam/ice | python | https://github.com/susam/ice/blob/532e685c504ea96f9e42833594585159ac1d2068/ice.py#L398-L409 | [
"def",
"contains_method",
"(",
"self",
",",
"method",
")",
":",
"return",
"method",
"in",
"itertools",
".",
"chain",
"(",
"self",
".",
"_literal",
",",
"self",
".",
"_wildcard",
",",
"self",
".",
"_regex",
")"
] | 532e685c504ea96f9e42833594585159ac1d2068 |
test | Router.resolve | Resolve a request to a route handler.
Arguments:
method (str): HTTP method, e.g. GET, POST, etc. (type: str)
path (str): Request path
Returns:
tuple or None: A tuple of three items:
1. Route handler (callable)
2. Positional arguments (list)
3. Keyword arguments (dict)
``None`` if no route matches the request. | ice.py | def resolve(self, method, path):
"""Resolve a request to a route handler.
Arguments:
method (str): HTTP method, e.g. GET, POST, etc. (type: str)
path (str): Request path
Returns:
tuple or None: A tuple of three items:
1. Route handler (callable)
2. Positional arguments (list)
3. Keyword arguments (dict)
``None`` if no route matches the request.
"""
if method in self._literal and path in self._literal[method]:
return self._literal[method][path], [], {}
else:
return self._resolve_non_literal_route(method, path) | def resolve(self, method, path):
"""Resolve a request to a route handler.
Arguments:
method (str): HTTP method, e.g. GET, POST, etc. (type: str)
path (str): Request path
Returns:
tuple or None: A tuple of three items:
1. Route handler (callable)
2. Positional arguments (list)
3. Keyword arguments (dict)
``None`` if no route matches the request.
"""
if method in self._literal and path in self._literal[method]:
return self._literal[method][path], [], {}
else:
return self._resolve_non_literal_route(method, path) | [
"Resolve",
"a",
"request",
"to",
"a",
"route",
"handler",
"."
] | susam/ice | python | https://github.com/susam/ice/blob/532e685c504ea96f9e42833594585159ac1d2068/ice.py#L411-L430 | [
"def",
"resolve",
"(",
"self",
",",
"method",
",",
"path",
")",
":",
"if",
"method",
"in",
"self",
".",
"_literal",
"and",
"path",
"in",
"self",
".",
"_literal",
"[",
"method",
"]",
":",
"return",
"self",
".",
"_literal",
"[",
"method",
"]",
"[",
"path",
"]",
",",
"[",
"]",
",",
"{",
"}",
"else",
":",
"return",
"self",
".",
"_resolve_non_literal_route",
"(",
"method",
",",
"path",
")"
] | 532e685c504ea96f9e42833594585159ac1d2068 |
test | Router._resolve_non_literal_route | Resolve a request to a wildcard or regex route handler.
Arguments:
method (str): HTTP method name, e.g. GET, POST, etc.
path (str): Request path
Returns:
tuple or None: A tuple of three items:
1. Route handler (callable)
2. Positional arguments (list)
3. Keyword arguments (dict)
``None`` if no route matches the request. | ice.py | def _resolve_non_literal_route(self, method, path):
"""Resolve a request to a wildcard or regex route handler.
Arguments:
method (str): HTTP method name, e.g. GET, POST, etc.
path (str): Request path
Returns:
tuple or None: A tuple of three items:
1. Route handler (callable)
2. Positional arguments (list)
3. Keyword arguments (dict)
``None`` if no route matches the request.
"""
for route_dict in (self._wildcard, self._regex):
if method in route_dict:
for route in reversed(route_dict[method]):
callback_data = route.match(path)
if callback_data is not None:
return callback_data
return None | def _resolve_non_literal_route(self, method, path):
"""Resolve a request to a wildcard or regex route handler.
Arguments:
method (str): HTTP method name, e.g. GET, POST, etc.
path (str): Request path
Returns:
tuple or None: A tuple of three items:
1. Route handler (callable)
2. Positional arguments (list)
3. Keyword arguments (dict)
``None`` if no route matches the request.
"""
for route_dict in (self._wildcard, self._regex):
if method in route_dict:
for route in reversed(route_dict[method]):
callback_data = route.match(path)
if callback_data is not None:
return callback_data
return None | [
"Resolve",
"a",
"request",
"to",
"a",
"wildcard",
"or",
"regex",
"route",
"handler",
"."
] | susam/ice | python | https://github.com/susam/ice/blob/532e685c504ea96f9e42833594585159ac1d2068/ice.py#L433-L455 | [
"def",
"_resolve_non_literal_route",
"(",
"self",
",",
"method",
",",
"path",
")",
":",
"for",
"route_dict",
"in",
"(",
"self",
".",
"_wildcard",
",",
"self",
".",
"_regex",
")",
":",
"if",
"method",
"in",
"route_dict",
":",
"for",
"route",
"in",
"reversed",
"(",
"route_dict",
"[",
"method",
"]",
")",
":",
"callback_data",
"=",
"route",
".",
"match",
"(",
"path",
")",
"if",
"callback_data",
"is",
"not",
"None",
":",
"return",
"callback_data",
"return",
"None"
] | 532e685c504ea96f9e42833594585159ac1d2068 |
test | Router._normalize_pattern | Return a normalized form of the pattern.
Normalize the pattern by removing pattern type prefix if it
exists in the pattern. Then return the pattern type and the
pattern as a tuple of two strings.
Arguments:
pattern (str): Route pattern to match request paths
Returns:
tuple: Ruple of pattern type (str) and pattern (str) | ice.py | def _normalize_pattern(pattern):
"""Return a normalized form of the pattern.
Normalize the pattern by removing pattern type prefix if it
exists in the pattern. Then return the pattern type and the
pattern as a tuple of two strings.
Arguments:
pattern (str): Route pattern to match request paths
Returns:
tuple: Ruple of pattern type (str) and pattern (str)
"""
if pattern.startswith('regex:'):
pattern_type = 'regex'
pattern = pattern[len('regex:'):]
elif pattern.startswith('wildcard:'):
pattern_type = 'wildcard'
pattern = pattern[len('wildcard:'):]
elif pattern.startswith('literal:'):
pattern_type = 'literal'
pattern = pattern[len('literal:'):]
elif RegexRoute.like(pattern):
pattern_type = 'regex'
elif WildcardRoute.like(pattern):
pattern_type = 'wildcard'
else:
pattern_type = 'literal'
return pattern_type, pattern | def _normalize_pattern(pattern):
"""Return a normalized form of the pattern.
Normalize the pattern by removing pattern type prefix if it
exists in the pattern. Then return the pattern type and the
pattern as a tuple of two strings.
Arguments:
pattern (str): Route pattern to match request paths
Returns:
tuple: Ruple of pattern type (str) and pattern (str)
"""
if pattern.startswith('regex:'):
pattern_type = 'regex'
pattern = pattern[len('regex:'):]
elif pattern.startswith('wildcard:'):
pattern_type = 'wildcard'
pattern = pattern[len('wildcard:'):]
elif pattern.startswith('literal:'):
pattern_type = 'literal'
pattern = pattern[len('literal:'):]
elif RegexRoute.like(pattern):
pattern_type = 'regex'
elif WildcardRoute.like(pattern):
pattern_type = 'wildcard'
else:
pattern_type = 'literal'
return pattern_type, pattern | [
"Return",
"a",
"normalized",
"form",
"of",
"the",
"pattern",
"."
] | susam/ice | python | https://github.com/susam/ice/blob/532e685c504ea96f9e42833594585159ac1d2068/ice.py#L458-L486 | [
"def",
"_normalize_pattern",
"(",
"pattern",
")",
":",
"if",
"pattern",
".",
"startswith",
"(",
"'regex:'",
")",
":",
"pattern_type",
"=",
"'regex'",
"pattern",
"=",
"pattern",
"[",
"len",
"(",
"'regex:'",
")",
":",
"]",
"elif",
"pattern",
".",
"startswith",
"(",
"'wildcard:'",
")",
":",
"pattern_type",
"=",
"'wildcard'",
"pattern",
"=",
"pattern",
"[",
"len",
"(",
"'wildcard:'",
")",
":",
"]",
"elif",
"pattern",
".",
"startswith",
"(",
"'literal:'",
")",
":",
"pattern_type",
"=",
"'literal'",
"pattern",
"=",
"pattern",
"[",
"len",
"(",
"'literal:'",
")",
":",
"]",
"elif",
"RegexRoute",
".",
"like",
"(",
"pattern",
")",
":",
"pattern_type",
"=",
"'regex'",
"elif",
"WildcardRoute",
".",
"like",
"(",
"pattern",
")",
":",
"pattern_type",
"=",
"'wildcard'",
"else",
":",
"pattern_type",
"=",
"'literal'",
"return",
"pattern_type",
",",
"pattern"
] | 532e685c504ea96f9e42833594585159ac1d2068 |
test | WildcardRoute.match | Return route handler with arguments if path matches this route.
Arguments:
path (str): Request path
Returns:
tuple or None: A tuple of three items:
1. Route handler (callable)
2. Positional arguments (list)
3. Keyword arguments (dict)
``None`` if the route does not match the path. | ice.py | def match(self, path):
"""Return route handler with arguments if path matches this route.
Arguments:
path (str): Request path
Returns:
tuple or None: A tuple of three items:
1. Route handler (callable)
2. Positional arguments (list)
3. Keyword arguments (dict)
``None`` if the route does not match the path.
"""
match = self._re.search(path)
if match is None:
return None
args = []
kwargs = {}
for i, wildcard in enumerate(self._wildcards):
if wildcard.name == '!':
continue
value = wildcard.value(match.groups()[i])
if not wildcard.name:
args.append(value)
else:
kwargs[wildcard.name] = value
return self._callback, args, kwargs | def match(self, path):
"""Return route handler with arguments if path matches this route.
Arguments:
path (str): Request path
Returns:
tuple or None: A tuple of three items:
1. Route handler (callable)
2. Positional arguments (list)
3. Keyword arguments (dict)
``None`` if the route does not match the path.
"""
match = self._re.search(path)
if match is None:
return None
args = []
kwargs = {}
for i, wildcard in enumerate(self._wildcards):
if wildcard.name == '!':
continue
value = wildcard.value(match.groups()[i])
if not wildcard.name:
args.append(value)
else:
kwargs[wildcard.name] = value
return self._callback, args, kwargs | [
"Return",
"route",
"handler",
"with",
"arguments",
"if",
"path",
"matches",
"this",
"route",
"."
] | susam/ice | python | https://github.com/susam/ice/blob/532e685c504ea96f9e42833594585159ac1d2068/ice.py#L515-L543 | [
"def",
"match",
"(",
"self",
",",
"path",
")",
":",
"match",
"=",
"self",
".",
"_re",
".",
"search",
"(",
"path",
")",
"if",
"match",
"is",
"None",
":",
"return",
"None",
"args",
"=",
"[",
"]",
"kwargs",
"=",
"{",
"}",
"for",
"i",
",",
"wildcard",
"in",
"enumerate",
"(",
"self",
".",
"_wildcards",
")",
":",
"if",
"wildcard",
".",
"name",
"==",
"'!'",
":",
"continue",
"value",
"=",
"wildcard",
".",
"value",
"(",
"match",
".",
"groups",
"(",
")",
"[",
"i",
"]",
")",
"if",
"not",
"wildcard",
".",
"name",
":",
"args",
".",
"append",
"(",
"value",
")",
"else",
":",
"kwargs",
"[",
"wildcard",
".",
"name",
"]",
"=",
"value",
"return",
"self",
".",
"_callback",
",",
"args",
",",
"kwargs"
] | 532e685c504ea96f9e42833594585159ac1d2068 |
test | RegexRoute.match | Return route handler with arguments if path matches this route.
Arguments:
path (str): Request path
Returns:
tuple or None: A tuple of three items:
1. Route handler (callable)
2. Positional arguments (list)
3. Keyword arguments (dict)
``None`` if the route does not match the path. | ice.py | def match(self, path):
"""Return route handler with arguments if path matches this route.
Arguments:
path (str): Request path
Returns:
tuple or None: A tuple of three items:
1. Route handler (callable)
2. Positional arguments (list)
3. Keyword arguments (dict)
``None`` if the route does not match the path.
"""
match = self._re.search(path)
if match is None:
return None
kwargs_indexes = match.re.groupindex.values()
args_indexes = [i for i in range(1, match.re.groups + 1)
if i not in kwargs_indexes]
args = [match.group(i) for i in args_indexes]
kwargs = {}
for name, index in match.re.groupindex.items():
kwargs[name] = match.group(index)
return self._callback, args, kwargs | def match(self, path):
"""Return route handler with arguments if path matches this route.
Arguments:
path (str): Request path
Returns:
tuple or None: A tuple of three items:
1. Route handler (callable)
2. Positional arguments (list)
3. Keyword arguments (dict)
``None`` if the route does not match the path.
"""
match = self._re.search(path)
if match is None:
return None
kwargs_indexes = match.re.groupindex.values()
args_indexes = [i for i in range(1, match.re.groups + 1)
if i not in kwargs_indexes]
args = [match.group(i) for i in args_indexes]
kwargs = {}
for name, index in match.re.groupindex.items():
kwargs[name] = match.group(index)
return self._callback, args, kwargs | [
"Return",
"route",
"handler",
"with",
"arguments",
"if",
"path",
"matches",
"this",
"route",
"."
] | susam/ice | python | https://github.com/susam/ice/blob/532e685c504ea96f9e42833594585159ac1d2068/ice.py#L641-L666 | [
"def",
"match",
"(",
"self",
",",
"path",
")",
":",
"match",
"=",
"self",
".",
"_re",
".",
"search",
"(",
"path",
")",
"if",
"match",
"is",
"None",
":",
"return",
"None",
"kwargs_indexes",
"=",
"match",
".",
"re",
".",
"groupindex",
".",
"values",
"(",
")",
"args_indexes",
"=",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"match",
".",
"re",
".",
"groups",
"+",
"1",
")",
"if",
"i",
"not",
"in",
"kwargs_indexes",
"]",
"args",
"=",
"[",
"match",
".",
"group",
"(",
"i",
")",
"for",
"i",
"in",
"args_indexes",
"]",
"kwargs",
"=",
"{",
"}",
"for",
"name",
",",
"index",
"in",
"match",
".",
"re",
".",
"groupindex",
".",
"items",
"(",
")",
":",
"kwargs",
"[",
"name",
"]",
"=",
"match",
".",
"group",
"(",
"index",
")",
"return",
"self",
".",
"_callback",
",",
"args",
",",
"kwargs"
] | 532e685c504ea96f9e42833594585159ac1d2068 |
test | Response.response | Return the HTTP response body.
Returns:
bytes: HTTP response body as a sequence of bytes | ice.py | def response(self):
"""Return the HTTP response body.
Returns:
bytes: HTTP response body as a sequence of bytes
"""
if isinstance(self.body, bytes):
out = self.body
elif isinstance(self.body, str):
out = self.body.encode(self.charset)
else:
out = b''
self.add_header('Content-Type', self.content_type)
self.add_header('Content-Length', str(len(out)))
self.start(self.status_line, self._headers)
return [out] | def response(self):
"""Return the HTTP response body.
Returns:
bytes: HTTP response body as a sequence of bytes
"""
if isinstance(self.body, bytes):
out = self.body
elif isinstance(self.body, str):
out = self.body.encode(self.charset)
else:
out = b''
self.add_header('Content-Type', self.content_type)
self.add_header('Content-Length', str(len(out)))
self.start(self.status_line, self._headers)
return [out] | [
"Return",
"the",
"HTTP",
"response",
"body",
"."
] | susam/ice | python | https://github.com/susam/ice/blob/532e685c504ea96f9e42833594585159ac1d2068/ice.py#L766-L782 | [
"def",
"response",
"(",
"self",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"body",
",",
"bytes",
")",
":",
"out",
"=",
"self",
".",
"body",
"elif",
"isinstance",
"(",
"self",
".",
"body",
",",
"str",
")",
":",
"out",
"=",
"self",
".",
"body",
".",
"encode",
"(",
"self",
".",
"charset",
")",
"else",
":",
"out",
"=",
"b''",
"self",
".",
"add_header",
"(",
"'Content-Type'",
",",
"self",
".",
"content_type",
")",
"self",
".",
"add_header",
"(",
"'Content-Length'",
",",
"str",
"(",
"len",
"(",
"out",
")",
")",
")",
"self",
".",
"start",
"(",
"self",
".",
"status_line",
",",
"self",
".",
"_headers",
")",
"return",
"[",
"out",
"]"
] | 532e685c504ea96f9e42833594585159ac1d2068 |
test | Response.add_header | Add an HTTP header to response object.
Arguments:
name (str): HTTP header field name
value (str): HTTP header field value | ice.py | def add_header(self, name, value):
"""Add an HTTP header to response object.
Arguments:
name (str): HTTP header field name
value (str): HTTP header field value
"""
if value is not None:
self._headers.append((name, value)) | def add_header(self, name, value):
"""Add an HTTP header to response object.
Arguments:
name (str): HTTP header field name
value (str): HTTP header field value
"""
if value is not None:
self._headers.append((name, value)) | [
"Add",
"an",
"HTTP",
"header",
"to",
"response",
"object",
"."
] | susam/ice | python | https://github.com/susam/ice/blob/532e685c504ea96f9e42833594585159ac1d2068/ice.py#L784-L792 | [
"def",
"add_header",
"(",
"self",
",",
"name",
",",
"value",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"self",
".",
"_headers",
".",
"append",
"(",
"(",
"name",
",",
"value",
")",
")"
] | 532e685c504ea96f9e42833594585159ac1d2068 |
test | Response.set_cookie | Add a Set-Cookie header to response object.
For a description about cookie attribute values, see
https://docs.python.org/3/library/http.cookies.html#http.cookies.Morsel.
Arguments:
name (str): Name of the cookie
value (str): Value of the cookie
attrs (dict): Dicitionary with cookie attribute keys and
values. | ice.py | def set_cookie(self, name, value, attrs={}):
"""Add a Set-Cookie header to response object.
For a description about cookie attribute values, see
https://docs.python.org/3/library/http.cookies.html#http.cookies.Morsel.
Arguments:
name (str): Name of the cookie
value (str): Value of the cookie
attrs (dict): Dicitionary with cookie attribute keys and
values.
"""
cookie = http.cookies.SimpleCookie()
cookie[name] = value
for key, value in attrs.items():
cookie[name][key] = value
self.add_header('Set-Cookie', cookie[name].OutputString()) | def set_cookie(self, name, value, attrs={}):
"""Add a Set-Cookie header to response object.
For a description about cookie attribute values, see
https://docs.python.org/3/library/http.cookies.html#http.cookies.Morsel.
Arguments:
name (str): Name of the cookie
value (str): Value of the cookie
attrs (dict): Dicitionary with cookie attribute keys and
values.
"""
cookie = http.cookies.SimpleCookie()
cookie[name] = value
for key, value in attrs.items():
cookie[name][key] = value
self.add_header('Set-Cookie', cookie[name].OutputString()) | [
"Add",
"a",
"Set",
"-",
"Cookie",
"header",
"to",
"response",
"object",
"."
] | susam/ice | python | https://github.com/susam/ice/blob/532e685c504ea96f9e42833594585159ac1d2068/ice.py#L794-L810 | [
"def",
"set_cookie",
"(",
"self",
",",
"name",
",",
"value",
",",
"attrs",
"=",
"{",
"}",
")",
":",
"cookie",
"=",
"http",
".",
"cookies",
".",
"SimpleCookie",
"(",
")",
"cookie",
"[",
"name",
"]",
"=",
"value",
"for",
"key",
",",
"value",
"in",
"attrs",
".",
"items",
"(",
")",
":",
"cookie",
"[",
"name",
"]",
"[",
"key",
"]",
"=",
"value",
"self",
".",
"add_header",
"(",
"'Set-Cookie'",
",",
"cookie",
"[",
"name",
"]",
".",
"OutputString",
"(",
")",
")"
] | 532e685c504ea96f9e42833594585159ac1d2068 |
test | Response.status_line | Return the HTTP response status line.
The status line is determined from :attr:`status` code. For
example, if the status code is 200, then '200 OK' is returned.
Returns:
str: Status line | ice.py | def status_line(self):
"""Return the HTTP response status line.
The status line is determined from :attr:`status` code. For
example, if the status code is 200, then '200 OK' is returned.
Returns:
str: Status line
"""
return (str(self.status) + ' ' +
Response._responses[self.status].phrase) | def status_line(self):
"""Return the HTTP response status line.
The status line is determined from :attr:`status` code. For
example, if the status code is 200, then '200 OK' is returned.
Returns:
str: Status line
"""
return (str(self.status) + ' ' +
Response._responses[self.status].phrase) | [
"Return",
"the",
"HTTP",
"response",
"status",
"line",
"."
] | susam/ice | python | https://github.com/susam/ice/blob/532e685c504ea96f9e42833594585159ac1d2068/ice.py#L813-L823 | [
"def",
"status_line",
"(",
"self",
")",
":",
"return",
"(",
"str",
"(",
"self",
".",
"status",
")",
"+",
"' '",
"+",
"Response",
".",
"_responses",
"[",
"self",
".",
"status",
"]",
".",
"phrase",
")"
] | 532e685c504ea96f9e42833594585159ac1d2068 |
test | Response.content_type | Return the value of Content-Type header field.
The value for the Content-Type header field is determined from
the :attr:`media_type` and :attr:`charset` data attributes.
Returns:
str: Value of Content-Type header field | ice.py | def content_type(self):
"""Return the value of Content-Type header field.
The value for the Content-Type header field is determined from
the :attr:`media_type` and :attr:`charset` data attributes.
Returns:
str: Value of Content-Type header field
"""
if (self.media_type is not None and
self.media_type.startswith('text/') and
self.charset is not None):
return self.media_type + '; charset=' + self.charset
else:
return self.media_type | def content_type(self):
"""Return the value of Content-Type header field.
The value for the Content-Type header field is determined from
the :attr:`media_type` and :attr:`charset` data attributes.
Returns:
str: Value of Content-Type header field
"""
if (self.media_type is not None and
self.media_type.startswith('text/') and
self.charset is not None):
return self.media_type + '; charset=' + self.charset
else:
return self.media_type | [
"Return",
"the",
"value",
"of",
"Content",
"-",
"Type",
"header",
"field",
"."
] | susam/ice | python | https://github.com/susam/ice/blob/532e685c504ea96f9e42833594585159ac1d2068/ice.py#L835-L849 | [
"def",
"content_type",
"(",
"self",
")",
":",
"if",
"(",
"self",
".",
"media_type",
"is",
"not",
"None",
"and",
"self",
".",
"media_type",
".",
"startswith",
"(",
"'text/'",
")",
"and",
"self",
".",
"charset",
"is",
"not",
"None",
")",
":",
"return",
"self",
".",
"media_type",
"+",
"'; charset='",
"+",
"self",
".",
"charset",
"else",
":",
"return",
"self",
".",
"media_type"
] | 532e685c504ea96f9e42833594585159ac1d2068 |
test | MultiDict.getall | Return the list of all values for the specified key.
Arguments:
key (object): Key
default (list): Default value to return if the key does not
exist, defaults to ``[]``, i.e. an empty list.
Returns:
list: List of all values for the specified key if the key
exists, ``default`` otherwise. | ice.py | def getall(self, key, default=[]):
"""Return the list of all values for the specified key.
Arguments:
key (object): Key
default (list): Default value to return if the key does not
exist, defaults to ``[]``, i.e. an empty list.
Returns:
list: List of all values for the specified key if the key
exists, ``default`` otherwise.
"""
return self.data[key] if key in self.data else default | def getall(self, key, default=[]):
"""Return the list of all values for the specified key.
Arguments:
key (object): Key
default (list): Default value to return if the key does not
exist, defaults to ``[]``, i.e. an empty list.
Returns:
list: List of all values for the specified key if the key
exists, ``default`` otherwise.
"""
return self.data[key] if key in self.data else default | [
"Return",
"the",
"list",
"of",
"all",
"values",
"for",
"the",
"specified",
"key",
"."
] | susam/ice | python | https://github.com/susam/ice/blob/532e685c504ea96f9e42833594585159ac1d2068/ice.py#L884-L896 | [
"def",
"getall",
"(",
"self",
",",
"key",
",",
"default",
"=",
"[",
"]",
")",
":",
"return",
"self",
".",
"data",
"[",
"key",
"]",
"if",
"key",
"in",
"self",
".",
"data",
"else",
"default"
] | 532e685c504ea96f9e42833594585159ac1d2068 |
test | rmtree | remove all files and directories below path, including path
itself; works even when shutil.rmtree fails because of read-only
files in NFS and Windows. Follows symlinks.
`use_shutil` defaults to True; useful for testing
`followlinks` defaults to False; if set to True, shutil.rmtree is
not used. | streamcorpus_pipeline/_rmtree.py | def rmtree(path, use_shutil=True, followlinks=False, retries=10):
'''remove all files and directories below path, including path
itself; works even when shutil.rmtree fails because of read-only
files in NFS and Windows. Follows symlinks.
`use_shutil` defaults to True; useful for testing
`followlinks` defaults to False; if set to True, shutil.rmtree is
not used.
'''
if use_shutil and not followlinks:
try:
shutil.rmtree(path)
return
except Exception, exc:
logger.info('shutil.rmtree(%s) failed, so resorting to recursive delete', path)
logger.debug('\ntrapped:\n%s', traceback.format_exc(exc))
if not os.path.isdir(path):
os.remove(path)
return
## bottom up traversal removing files and then removing directories
for root, dir_names, file_names in os.walk(path, topdown=False, followlinks=followlinks):
for fname in file_names:
fpath = os.path.join(root, fname)
tries = 0
while tries < retries:
tries += 1
try:
os.remove(fpath)
break
except Exception, exc:
time.sleep(0.1)
if os.path.exists(fpath):
logger.critical('os.remove(%s) failed, so leaving data behind!!!', fpath)
logger.critical('\ntrapped:\n%s', traceback.format_exc(exc))
#logger.critical(get_open_fds())
for dname in dir_names:
full_path = os.path.join(root, dname)
if os.path.islink(full_path):
real_path = os.path.realpath(full_path)
os.remove(full_path)
full_path = real_path
os.rmdir(full_path)
if os.path.exists(path):
os.rmdir(path) | def rmtree(path, use_shutil=True, followlinks=False, retries=10):
'''remove all files and directories below path, including path
itself; works even when shutil.rmtree fails because of read-only
files in NFS and Windows. Follows symlinks.
`use_shutil` defaults to True; useful for testing
`followlinks` defaults to False; if set to True, shutil.rmtree is
not used.
'''
if use_shutil and not followlinks:
try:
shutil.rmtree(path)
return
except Exception, exc:
logger.info('shutil.rmtree(%s) failed, so resorting to recursive delete', path)
logger.debug('\ntrapped:\n%s', traceback.format_exc(exc))
if not os.path.isdir(path):
os.remove(path)
return
## bottom up traversal removing files and then removing directories
for root, dir_names, file_names in os.walk(path, topdown=False, followlinks=followlinks):
for fname in file_names:
fpath = os.path.join(root, fname)
tries = 0
while tries < retries:
tries += 1
try:
os.remove(fpath)
break
except Exception, exc:
time.sleep(0.1)
if os.path.exists(fpath):
logger.critical('os.remove(%s) failed, so leaving data behind!!!', fpath)
logger.critical('\ntrapped:\n%s', traceback.format_exc(exc))
#logger.critical(get_open_fds())
for dname in dir_names:
full_path = os.path.join(root, dname)
if os.path.islink(full_path):
real_path = os.path.realpath(full_path)
os.remove(full_path)
full_path = real_path
os.rmdir(full_path)
if os.path.exists(path):
os.rmdir(path) | [
"remove",
"all",
"files",
"and",
"directories",
"below",
"path",
"including",
"path",
"itself",
";",
"works",
"even",
"when",
"shutil",
".",
"rmtree",
"fails",
"because",
"of",
"read",
"-",
"only",
"files",
"in",
"NFS",
"and",
"Windows",
".",
"Follows",
"symlinks",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_rmtree.py#L17-L64 | [
"def",
"rmtree",
"(",
"path",
",",
"use_shutil",
"=",
"True",
",",
"followlinks",
"=",
"False",
",",
"retries",
"=",
"10",
")",
":",
"if",
"use_shutil",
"and",
"not",
"followlinks",
":",
"try",
":",
"shutil",
".",
"rmtree",
"(",
"path",
")",
"return",
"except",
"Exception",
",",
"exc",
":",
"logger",
".",
"info",
"(",
"'shutil.rmtree(%s) failed, so resorting to recursive delete'",
",",
"path",
")",
"logger",
".",
"debug",
"(",
"'\\ntrapped:\\n%s'",
",",
"traceback",
".",
"format_exc",
"(",
"exc",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"os",
".",
"remove",
"(",
"path",
")",
"return",
"## bottom up traversal removing files and then removing directories",
"for",
"root",
",",
"dir_names",
",",
"file_names",
"in",
"os",
".",
"walk",
"(",
"path",
",",
"topdown",
"=",
"False",
",",
"followlinks",
"=",
"followlinks",
")",
":",
"for",
"fname",
"in",
"file_names",
":",
"fpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"fname",
")",
"tries",
"=",
"0",
"while",
"tries",
"<",
"retries",
":",
"tries",
"+=",
"1",
"try",
":",
"os",
".",
"remove",
"(",
"fpath",
")",
"break",
"except",
"Exception",
",",
"exc",
":",
"time",
".",
"sleep",
"(",
"0.1",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"fpath",
")",
":",
"logger",
".",
"critical",
"(",
"'os.remove(%s) failed, so leaving data behind!!!'",
",",
"fpath",
")",
"logger",
".",
"critical",
"(",
"'\\ntrapped:\\n%s'",
",",
"traceback",
".",
"format_exc",
"(",
"exc",
")",
")",
"#logger.critical(get_open_fds())",
"for",
"dname",
"in",
"dir_names",
":",
"full_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"dname",
")",
"if",
"os",
".",
"path",
".",
"islink",
"(",
"full_path",
")",
":",
"real_path",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"full_path",
")",
"os",
".",
"remove",
"(",
"full_path",
")",
"full_path",
"=",
"real_path",
"os",
".",
"rmdir",
"(",
"full_path",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"os",
".",
"rmdir",
"(",
"path",
")"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | get_open_fds | return list of open files for current process
.. warning: will only work on UNIX-like os-es. | streamcorpus_pipeline/_rmtree.py | def get_open_fds(verbose=False):
'''return list of open files for current process
.. warning: will only work on UNIX-like os-es.
'''
pid = os.getpid()
procs = subprocess.check_output(
[ "lsof", '-w', '-Ff', "-p", str( pid ) ] )
if verbose:
oprocs = subprocess.check_output(
[ "lsof", '-w', "-p", str( pid ) ] )
logger.info(oprocs)
open_files = filter(
lambda s: s and s[ 0 ] == 'f' and s[1: ].isdigit(),
procs.split( '\n' ) )
return open_files | def get_open_fds(verbose=False):
'''return list of open files for current process
.. warning: will only work on UNIX-like os-es.
'''
pid = os.getpid()
procs = subprocess.check_output(
[ "lsof", '-w', '-Ff', "-p", str( pid ) ] )
if verbose:
oprocs = subprocess.check_output(
[ "lsof", '-w', "-p", str( pid ) ] )
logger.info(oprocs)
open_files = filter(
lambda s: s and s[ 0 ] == 'f' and s[1: ].isdigit(),
procs.split( '\n' ) )
return open_files | [
"return",
"list",
"of",
"open",
"files",
"for",
"current",
"process"
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_rmtree.py#L66-L81 | [
"def",
"get_open_fds",
"(",
"verbose",
"=",
"False",
")",
":",
"pid",
"=",
"os",
".",
"getpid",
"(",
")",
"procs",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"\"lsof\"",
",",
"'-w'",
",",
"'-Ff'",
",",
"\"-p\"",
",",
"str",
"(",
"pid",
")",
"]",
")",
"if",
"verbose",
":",
"oprocs",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"\"lsof\"",
",",
"'-w'",
",",
"\"-p\"",
",",
"str",
"(",
"pid",
")",
"]",
")",
"logger",
".",
"info",
"(",
"oprocs",
")",
"open_files",
"=",
"filter",
"(",
"lambda",
"s",
":",
"s",
"and",
"s",
"[",
"0",
"]",
"==",
"'f'",
"and",
"s",
"[",
"1",
":",
"]",
".",
"isdigit",
"(",
")",
",",
"procs",
".",
"split",
"(",
"'\\n'",
")",
")",
"return",
"open_files"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | file_type_stats | returns a kba.pipeline "transform" function that generates file
type stats from the stream_items that it sees. Currently, these
stats are just the first five non-whitespace characters. | streamcorpus_pipeline/_guess_media_type.py | def file_type_stats(config):
'''
returns a kba.pipeline "transform" function that generates file
type stats from the stream_items that it sees. Currently, these
stats are just the first five non-whitespace characters.
'''
## make a closure around config
def _file_type_stats(stream_item, context):
if stream_item.body and stream_item.body.raw:
#print repr(stream_item.body.raw[:250])
#sys.stdout.flush()
#doctype_m = doctype_re.match(stream_item.body.raw[:250])
#if doctype_m:
#print 'DOCTYPE: %s' % repr(doctype_m.group('doctype').lower())
if 'doctype html' in stream_item.body.raw[:250].lower():
print 'DOCTYPE: html'
else:
#if probably_html.search(stream_item.body.raw):
if has_tags(stream_item.body.raw[:400]):
print 'PROBABLY_HTML'
else:
xml = xml_ish.search(stream_item.body.raw)
if xml:
print 'XML: %s' % repr(xml.group('intro'))
else:
pdf = pdf_start.search(stream_item.body.raw)
if pdf:
print 'PDF %s' % repr(pdf.group('version'))
else:
ext = stream_item.abs_url.split('.')[-1]
if len(ext) < 6:
print 'UNK ext: %s' % repr(ext)
else:
first = first_letters.match(stream_item.body.raw)
if first and False:
print 'UNK letters: %s' % repr(first.group('first_letters'))
else:
print 'UNK first bytes: %s' % repr(stream_item.body.raw[:50])
#m = first_three_letters.search(stream_item.body.raw)
#if m:
# print repr(m.group('first_three_letters')).lower().strip()
#else:
# print repr(stream_item.body.raw[:50]).lower().strip()
return stream_item
return _file_type_stats | def file_type_stats(config):
'''
returns a kba.pipeline "transform" function that generates file
type stats from the stream_items that it sees. Currently, these
stats are just the first five non-whitespace characters.
'''
## make a closure around config
def _file_type_stats(stream_item, context):
if stream_item.body and stream_item.body.raw:
#print repr(stream_item.body.raw[:250])
#sys.stdout.flush()
#doctype_m = doctype_re.match(stream_item.body.raw[:250])
#if doctype_m:
#print 'DOCTYPE: %s' % repr(doctype_m.group('doctype').lower())
if 'doctype html' in stream_item.body.raw[:250].lower():
print 'DOCTYPE: html'
else:
#if probably_html.search(stream_item.body.raw):
if has_tags(stream_item.body.raw[:400]):
print 'PROBABLY_HTML'
else:
xml = xml_ish.search(stream_item.body.raw)
if xml:
print 'XML: %s' % repr(xml.group('intro'))
else:
pdf = pdf_start.search(stream_item.body.raw)
if pdf:
print 'PDF %s' % repr(pdf.group('version'))
else:
ext = stream_item.abs_url.split('.')[-1]
if len(ext) < 6:
print 'UNK ext: %s' % repr(ext)
else:
first = first_letters.match(stream_item.body.raw)
if first and False:
print 'UNK letters: %s' % repr(first.group('first_letters'))
else:
print 'UNK first bytes: %s' % repr(stream_item.body.raw[:50])
#m = first_three_letters.search(stream_item.body.raw)
#if m:
# print repr(m.group('first_three_letters')).lower().strip()
#else:
# print repr(stream_item.body.raw[:50]).lower().strip()
return stream_item
return _file_type_stats | [
"returns",
"a",
"kba",
".",
"pipeline",
"transform",
"function",
"that",
"generates",
"file",
"type",
"stats",
"from",
"the",
"stream_items",
"that",
"it",
"sees",
".",
"Currently",
"these",
"stats",
"are",
"just",
"the",
"first",
"five",
"non",
"-",
"whitespace",
"characters",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_guess_media_type.py#L55-L100 | [
"def",
"file_type_stats",
"(",
"config",
")",
":",
"## make a closure around config",
"def",
"_file_type_stats",
"(",
"stream_item",
",",
"context",
")",
":",
"if",
"stream_item",
".",
"body",
"and",
"stream_item",
".",
"body",
".",
"raw",
":",
"#print repr(stream_item.body.raw[:250])",
"#sys.stdout.flush()",
"#doctype_m = doctype_re.match(stream_item.body.raw[:250])",
"#if doctype_m:",
"#print 'DOCTYPE: %s' % repr(doctype_m.group('doctype').lower())",
"if",
"'doctype html'",
"in",
"stream_item",
".",
"body",
".",
"raw",
"[",
":",
"250",
"]",
".",
"lower",
"(",
")",
":",
"print",
"'DOCTYPE: html'",
"else",
":",
"#if probably_html.search(stream_item.body.raw):",
"if",
"has_tags",
"(",
"stream_item",
".",
"body",
".",
"raw",
"[",
":",
"400",
"]",
")",
":",
"print",
"'PROBABLY_HTML'",
"else",
":",
"xml",
"=",
"xml_ish",
".",
"search",
"(",
"stream_item",
".",
"body",
".",
"raw",
")",
"if",
"xml",
":",
"print",
"'XML: %s'",
"%",
"repr",
"(",
"xml",
".",
"group",
"(",
"'intro'",
")",
")",
"else",
":",
"pdf",
"=",
"pdf_start",
".",
"search",
"(",
"stream_item",
".",
"body",
".",
"raw",
")",
"if",
"pdf",
":",
"print",
"'PDF %s'",
"%",
"repr",
"(",
"pdf",
".",
"group",
"(",
"'version'",
")",
")",
"else",
":",
"ext",
"=",
"stream_item",
".",
"abs_url",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"if",
"len",
"(",
"ext",
")",
"<",
"6",
":",
"print",
"'UNK ext: %s'",
"%",
"repr",
"(",
"ext",
")",
"else",
":",
"first",
"=",
"first_letters",
".",
"match",
"(",
"stream_item",
".",
"body",
".",
"raw",
")",
"if",
"first",
"and",
"False",
":",
"print",
"'UNK letters: %s'",
"%",
"repr",
"(",
"first",
".",
"group",
"(",
"'first_letters'",
")",
")",
"else",
":",
"print",
"'UNK first bytes: %s'",
"%",
"repr",
"(",
"stream_item",
".",
"body",
".",
"raw",
"[",
":",
"50",
"]",
")",
"#m = first_three_letters.search(stream_item.body.raw)",
"#if m:",
"# print repr(m.group('first_three_letters')).lower().strip()",
"#else:",
"# print repr(stream_item.body.raw[:50]).lower().strip()",
"return",
"stream_item",
"return",
"_file_type_stats"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | rejester_run | get a rejester.WorkUnit with KBA s3 path, fetch it, and save
some counts about it. | examples/verify_kba2014.py | def rejester_run(work_unit):
'''get a rejester.WorkUnit with KBA s3 path, fetch it, and save
some counts about it.
'''
#fname = 'verify-chunks-%d-%d' % (os.getpid(), time.time())
fname = work_unit.key.strip().split('/')[-1]
output_dir_path = work_unit.data.get('output_dir_path', '/mnt')
u = uuid.uuid3(uuid.UUID(int=0), work_unit.key.strip())
path1 = u.hex[0]
path2 = u.hex[1]
fpath = os.path.join(output_dir_path, path1, path2, fname)
if not os.path.exists(os.path.dirname(fpath)):
os.makedirs(os.path.dirname(fpath))
output = gzip.open(fpath + '-out.gz', 'wb')
expected_si_count = int(fname.split('-')[1])
max_tries = 20
tries = 0
while tries < max_tries:
try:
exc, si_count, serif_count, clean_visible_bytes, clean_visible_count, stream_ids = \
attempt_fetch(work_unit, fpath)
if si_count != expected_si_count:
print 'retrying because si_count = %d != %d expected_si_count' % (si_count, expected_si_count)
sys.stdout.flush()
tries += 1
continue
else:
print 'succeeded in reading si_count = %d' % (si_count,)
sys.stdout.flush()
output.write( '%s\t%d\t%d\t%d\t%d\t%s\t%s\n' % (
exc, si_count, serif_count, clean_visible_bytes, clean_visible_count,
work_unit.key.strip(), ','.join(['%s|%s' % tup for tup in stream_ids])) )
break
except Exception, exc:
print 'broken?'
print traceback.format_exc(exc)
sys.stdout.flush()
tries += 1
output.write(traceback.format_exc(exc))
output.close() | def rejester_run(work_unit):
'''get a rejester.WorkUnit with KBA s3 path, fetch it, and save
some counts about it.
'''
#fname = 'verify-chunks-%d-%d' % (os.getpid(), time.time())
fname = work_unit.key.strip().split('/')[-1]
output_dir_path = work_unit.data.get('output_dir_path', '/mnt')
u = uuid.uuid3(uuid.UUID(int=0), work_unit.key.strip())
path1 = u.hex[0]
path2 = u.hex[1]
fpath = os.path.join(output_dir_path, path1, path2, fname)
if not os.path.exists(os.path.dirname(fpath)):
os.makedirs(os.path.dirname(fpath))
output = gzip.open(fpath + '-out.gz', 'wb')
expected_si_count = int(fname.split('-')[1])
max_tries = 20
tries = 0
while tries < max_tries:
try:
exc, si_count, serif_count, clean_visible_bytes, clean_visible_count, stream_ids = \
attempt_fetch(work_unit, fpath)
if si_count != expected_si_count:
print 'retrying because si_count = %d != %d expected_si_count' % (si_count, expected_si_count)
sys.stdout.flush()
tries += 1
continue
else:
print 'succeeded in reading si_count = %d' % (si_count,)
sys.stdout.flush()
output.write( '%s\t%d\t%d\t%d\t%d\t%s\t%s\n' % (
exc, si_count, serif_count, clean_visible_bytes, clean_visible_count,
work_unit.key.strip(), ','.join(['%s|%s' % tup for tup in stream_ids])) )
break
except Exception, exc:
print 'broken?'
print traceback.format_exc(exc)
sys.stdout.flush()
tries += 1
output.write(traceback.format_exc(exc))
output.close() | [
"get",
"a",
"rejester",
".",
"WorkUnit",
"with",
"KBA",
"s3",
"path",
"fetch",
"it",
"and",
"save",
"some",
"counts",
"about",
"it",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/examples/verify_kba2014.py#L34-L78 | [
"def",
"rejester_run",
"(",
"work_unit",
")",
":",
"#fname = 'verify-chunks-%d-%d' % (os.getpid(), time.time())",
"fname",
"=",
"work_unit",
".",
"key",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"output_dir_path",
"=",
"work_unit",
".",
"data",
".",
"get",
"(",
"'output_dir_path'",
",",
"'/mnt'",
")",
"u",
"=",
"uuid",
".",
"uuid3",
"(",
"uuid",
".",
"UUID",
"(",
"int",
"=",
"0",
")",
",",
"work_unit",
".",
"key",
".",
"strip",
"(",
")",
")",
"path1",
"=",
"u",
".",
"hex",
"[",
"0",
"]",
"path2",
"=",
"u",
".",
"hex",
"[",
"1",
"]",
"fpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir_path",
",",
"path1",
",",
"path2",
",",
"fname",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"fpath",
")",
")",
":",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"fpath",
")",
")",
"output",
"=",
"gzip",
".",
"open",
"(",
"fpath",
"+",
"'-out.gz'",
",",
"'wb'",
")",
"expected_si_count",
"=",
"int",
"(",
"fname",
".",
"split",
"(",
"'-'",
")",
"[",
"1",
"]",
")",
"max_tries",
"=",
"20",
"tries",
"=",
"0",
"while",
"tries",
"<",
"max_tries",
":",
"try",
":",
"exc",
",",
"si_count",
",",
"serif_count",
",",
"clean_visible_bytes",
",",
"clean_visible_count",
",",
"stream_ids",
"=",
"attempt_fetch",
"(",
"work_unit",
",",
"fpath",
")",
"if",
"si_count",
"!=",
"expected_si_count",
":",
"print",
"'retrying because si_count = %d != %d expected_si_count'",
"%",
"(",
"si_count",
",",
"expected_si_count",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"tries",
"+=",
"1",
"continue",
"else",
":",
"print",
"'succeeded in reading si_count = %d'",
"%",
"(",
"si_count",
",",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"output",
".",
"write",
"(",
"'%s\\t%d\\t%d\\t%d\\t%d\\t%s\\t%s\\n'",
"%",
"(",
"exc",
",",
"si_count",
",",
"serif_count",
",",
"clean_visible_bytes",
",",
"clean_visible_count",
",",
"work_unit",
".",
"key",
".",
"strip",
"(",
")",
",",
"','",
".",
"join",
"(",
"[",
"'%s|%s'",
"%",
"tup",
"for",
"tup",
"in",
"stream_ids",
"]",
")",
")",
")",
"break",
"except",
"Exception",
",",
"exc",
":",
"print",
"'broken?'",
"print",
"traceback",
".",
"format_exc",
"(",
"exc",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"tries",
"+=",
"1",
"output",
".",
"write",
"(",
"traceback",
".",
"format_exc",
"(",
"exc",
")",
")",
"output",
".",
"close",
"(",
")"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | attempt_fetch | attempt a fetch and iteration over a work_unit.key path in s3 | examples/verify_kba2014.py | def attempt_fetch(work_unit, fpath):
'''attempt a fetch and iteration over a work_unit.key path in s3
'''
url = 'http://s3.amazonaws.com/aws-publicdatasets/' + work_unit.key.strip()
## cheapest way to iterate over the corpus is a few stages of
## streamed child processes. Note that stderr needs to go
## separately to a file so that reading the stdin doesn't get
## blocked:
cmd = '(wget -O - %s | gpg --no-permission-warning --trust-model always --output - --decrypt - | xz --decompress) 2> %s-err' % (url, fpath)
print cmd
child = Popen(cmd, stdout=PIPE, shell=True)
print 'child launched'
sys.stdout.flush()
si_count = 0
serif_count = 0
exc = ''
stream_ids = list()
clean_visible_bytes = 0
clean_visible_count = 0
try:
for si in Chunk(file_obj=child.stdout):
print si.stream_id, si.abs_url
if si.body.language:
lang = si.body.language.code
else:
lang = ''
stream_ids.append((lang, si.stream_id))
if si.body.clean_visible:
clean_visible_count += 1
clean_visible_bytes += len(si.body.clean_visible)
si_count += 1
if 'serif' in si.body.sentences:
serif_count += 1
except Exception, exc:
exc = re.sub('\s+', ' ', str(exc)).strip()
child.terminate()
child.wait()
child.stdout.close()
return exc, si_count, serif_count, clean_visible_bytes, clean_visible_count, stream_ids | def attempt_fetch(work_unit, fpath):
'''attempt a fetch and iteration over a work_unit.key path in s3
'''
url = 'http://s3.amazonaws.com/aws-publicdatasets/' + work_unit.key.strip()
## cheapest way to iterate over the corpus is a few stages of
## streamed child processes. Note that stderr needs to go
## separately to a file so that reading the stdin doesn't get
## blocked:
cmd = '(wget -O - %s | gpg --no-permission-warning --trust-model always --output - --decrypt - | xz --decompress) 2> %s-err' % (url, fpath)
print cmd
child = Popen(cmd, stdout=PIPE, shell=True)
print 'child launched'
sys.stdout.flush()
si_count = 0
serif_count = 0
exc = ''
stream_ids = list()
clean_visible_bytes = 0
clean_visible_count = 0
try:
for si in Chunk(file_obj=child.stdout):
print si.stream_id, si.abs_url
if si.body.language:
lang = si.body.language.code
else:
lang = ''
stream_ids.append((lang, si.stream_id))
if si.body.clean_visible:
clean_visible_count += 1
clean_visible_bytes += len(si.body.clean_visible)
si_count += 1
if 'serif' in si.body.sentences:
serif_count += 1
except Exception, exc:
exc = re.sub('\s+', ' ', str(exc)).strip()
child.terminate()
child.wait()
child.stdout.close()
return exc, si_count, serif_count, clean_visible_bytes, clean_visible_count, stream_ids | [
"attempt",
"a",
"fetch",
"and",
"iteration",
"over",
"a",
"work_unit",
".",
"key",
"path",
"in",
"s3"
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/examples/verify_kba2014.py#L80-L121 | [
"def",
"attempt_fetch",
"(",
"work_unit",
",",
"fpath",
")",
":",
"url",
"=",
"'http://s3.amazonaws.com/aws-publicdatasets/'",
"+",
"work_unit",
".",
"key",
".",
"strip",
"(",
")",
"## cheapest way to iterate over the corpus is a few stages of",
"## streamed child processes. Note that stderr needs to go",
"## separately to a file so that reading the stdin doesn't get",
"## blocked:",
"cmd",
"=",
"'(wget -O - %s | gpg --no-permission-warning --trust-model always --output - --decrypt - | xz --decompress) 2> %s-err'",
"%",
"(",
"url",
",",
"fpath",
")",
"print",
"cmd",
"child",
"=",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"PIPE",
",",
"shell",
"=",
"True",
")",
"print",
"'child launched'",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"si_count",
"=",
"0",
"serif_count",
"=",
"0",
"exc",
"=",
"''",
"stream_ids",
"=",
"list",
"(",
")",
"clean_visible_bytes",
"=",
"0",
"clean_visible_count",
"=",
"0",
"try",
":",
"for",
"si",
"in",
"Chunk",
"(",
"file_obj",
"=",
"child",
".",
"stdout",
")",
":",
"print",
"si",
".",
"stream_id",
",",
"si",
".",
"abs_url",
"if",
"si",
".",
"body",
".",
"language",
":",
"lang",
"=",
"si",
".",
"body",
".",
"language",
".",
"code",
"else",
":",
"lang",
"=",
"''",
"stream_ids",
".",
"append",
"(",
"(",
"lang",
",",
"si",
".",
"stream_id",
")",
")",
"if",
"si",
".",
"body",
".",
"clean_visible",
":",
"clean_visible_count",
"+=",
"1",
"clean_visible_bytes",
"+=",
"len",
"(",
"si",
".",
"body",
".",
"clean_visible",
")",
"si_count",
"+=",
"1",
"if",
"'serif'",
"in",
"si",
".",
"body",
".",
"sentences",
":",
"serif_count",
"+=",
"1",
"except",
"Exception",
",",
"exc",
":",
"exc",
"=",
"re",
".",
"sub",
"(",
"'\\s+'",
",",
"' '",
",",
"str",
"(",
"exc",
")",
")",
".",
"strip",
"(",
")",
"child",
".",
"terminate",
"(",
")",
"child",
".",
"wait",
"(",
")",
"child",
".",
"stdout",
".",
"close",
"(",
")",
"return",
"exc",
",",
"si_count",
",",
"serif_count",
",",
"clean_visible_bytes",
",",
"clean_visible_count",
",",
"stream_ids"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | get_file_lines | Return a list of non-empty lines from `file_path`. | adjspecies/__init__.py | def get_file_lines(file_name):
"""Return a list of non-empty lines from `file_path`."""
file_path = path.join(path.dirname(path.abspath(__file__)), file_name)
with open(file_path) as file_obj:
return [line for line in file_obj.read().splitlines() if line] | def get_file_lines(file_name):
"""Return a list of non-empty lines from `file_path`."""
file_path = path.join(path.dirname(path.abspath(__file__)), file_name)
with open(file_path) as file_obj:
return [line for line in file_obj.read().splitlines() if line] | [
"Return",
"a",
"list",
"of",
"non",
"-",
"empty",
"lines",
"from",
"file_path",
"."
] | hipikat/adjspecies | python | https://github.com/hipikat/adjspecies/blob/bffceceb08a868ea215f16dd341159d39ca75971/adjspecies/__init__.py#L25-L29 | [
"def",
"get_file_lines",
"(",
"file_name",
")",
":",
"file_path",
"=",
"path",
".",
"join",
"(",
"path",
".",
"dirname",
"(",
"path",
".",
"abspath",
"(",
"__file__",
")",
")",
",",
"file_name",
")",
"with",
"open",
"(",
"file_path",
")",
"as",
"file_obj",
":",
"return",
"[",
"line",
"for",
"line",
"in",
"file_obj",
".",
"read",
"(",
")",
".",
"splitlines",
"(",
")",
"if",
"line",
"]"
] | bffceceb08a868ea215f16dd341159d39ca75971 |
test | get_describers | Return a describer tuple in the form `(name, position)`,
where position is either 'prefix' or 'suffix'. | adjspecies/__init__.py | def get_describers():
"""
Return a describer tuple in the form `(name, position)`,
where position is either 'prefix' or 'suffix'.
"""
adjectives = map(lambda x: (x, 'prefix'), get_file_lines('adjectives.txt'))
animal_nouns = map(lambda x: (x, 'suffix'), get_file_lines('nouns.txt'))
return list(chain(adjectives, animal_nouns)) | def get_describers():
"""
Return a describer tuple in the form `(name, position)`,
where position is either 'prefix' or 'suffix'.
"""
adjectives = map(lambda x: (x, 'prefix'), get_file_lines('adjectives.txt'))
animal_nouns = map(lambda x: (x, 'suffix'), get_file_lines('nouns.txt'))
return list(chain(adjectives, animal_nouns)) | [
"Return",
"a",
"describer",
"tuple",
"in",
"the",
"form",
"(",
"name",
"position",
")",
"where",
"position",
"is",
"either",
"prefix",
"or",
"suffix",
"."
] | hipikat/adjspecies | python | https://github.com/hipikat/adjspecies/blob/bffceceb08a868ea215f16dd341159d39ca75971/adjspecies/__init__.py#L37-L44 | [
"def",
"get_describers",
"(",
")",
":",
"adjectives",
"=",
"map",
"(",
"lambda",
"x",
":",
"(",
"x",
",",
"'prefix'",
")",
",",
"get_file_lines",
"(",
"'adjectives.txt'",
")",
")",
"animal_nouns",
"=",
"map",
"(",
"lambda",
"x",
":",
"(",
"x",
",",
"'suffix'",
")",
",",
"get_file_lines",
"(",
"'nouns.txt'",
")",
")",
"return",
"list",
"(",
"chain",
"(",
"adjectives",
",",
"animal_nouns",
")",
")"
] | bffceceb08a868ea215f16dd341159d39ca75971 |
test | _random_adjspecies_pair | Return an ordered 2-tuple containing a species and a describer. | adjspecies/__init__.py | def _random_adjspecies_pair():
"""Return an ordered 2-tuple containing a species and a describer."""
describer, desc_position = random_describer()
if desc_position == 'prefix':
return (describer, random_species())
elif desc_position == 'suffix':
return (random_species(), describer) | def _random_adjspecies_pair():
"""Return an ordered 2-tuple containing a species and a describer."""
describer, desc_position = random_describer()
if desc_position == 'prefix':
return (describer, random_species())
elif desc_position == 'suffix':
return (random_species(), describer) | [
"Return",
"an",
"ordered",
"2",
"-",
"tuple",
"containing",
"a",
"species",
"and",
"a",
"describer",
"."
] | hipikat/adjspecies | python | https://github.com/hipikat/adjspecies/blob/bffceceb08a868ea215f16dd341159d39ca75971/adjspecies/__init__.py#L52-L58 | [
"def",
"_random_adjspecies_pair",
"(",
")",
":",
"describer",
",",
"desc_position",
"=",
"random_describer",
"(",
")",
"if",
"desc_position",
"==",
"'prefix'",
":",
"return",
"(",
"describer",
",",
"random_species",
"(",
")",
")",
"elif",
"desc_position",
"==",
"'suffix'",
":",
"return",
"(",
"random_species",
"(",
")",
",",
"describer",
")"
] | bffceceb08a868ea215f16dd341159d39ca75971 |
test | random_adjspecies_pair | Return an ordered 2-tuple containing a species and a describer.
The letter-count of the pair is guarantee to not exceed `maxlen` if
it is given. If `prevent_stutter` is True, the last letter of the
first item of the pair will be different from the first letter of
the second item. | adjspecies/__init__.py | def random_adjspecies_pair(maxlen=None, prevent_stutter=True):
"""
Return an ordered 2-tuple containing a species and a describer.
The letter-count of the pair is guarantee to not exceed `maxlen` if
it is given. If `prevent_stutter` is True, the last letter of the
first item of the pair will be different from the first letter of
the second item.
"""
while True:
pair = _random_adjspecies_pair()
if maxlen and len(''.join(pair)) > maxlen:
continue
if prevent_stutter and pair[0][-1] == pair[1][0]:
continue
return pair | def random_adjspecies_pair(maxlen=None, prevent_stutter=True):
"""
Return an ordered 2-tuple containing a species and a describer.
The letter-count of the pair is guarantee to not exceed `maxlen` if
it is given. If `prevent_stutter` is True, the last letter of the
first item of the pair will be different from the first letter of
the second item.
"""
while True:
pair = _random_adjspecies_pair()
if maxlen and len(''.join(pair)) > maxlen:
continue
if prevent_stutter and pair[0][-1] == pair[1][0]:
continue
return pair | [
"Return",
"an",
"ordered",
"2",
"-",
"tuple",
"containing",
"a",
"species",
"and",
"a",
"describer",
".",
"The",
"letter",
"-",
"count",
"of",
"the",
"pair",
"is",
"guarantee",
"to",
"not",
"exceed",
"maxlen",
"if",
"it",
"is",
"given",
".",
"If",
"prevent_stutter",
"is",
"True",
"the",
"last",
"letter",
"of",
"the",
"first",
"item",
"of",
"the",
"pair",
"will",
"be",
"different",
"from",
"the",
"first",
"letter",
"of",
"the",
"second",
"item",
"."
] | hipikat/adjspecies | python | https://github.com/hipikat/adjspecies/blob/bffceceb08a868ea215f16dd341159d39ca75971/adjspecies/__init__.py#L61-L75 | [
"def",
"random_adjspecies_pair",
"(",
"maxlen",
"=",
"None",
",",
"prevent_stutter",
"=",
"True",
")",
":",
"while",
"True",
":",
"pair",
"=",
"_random_adjspecies_pair",
"(",
")",
"if",
"maxlen",
"and",
"len",
"(",
"''",
".",
"join",
"(",
"pair",
")",
")",
">",
"maxlen",
":",
"continue",
"if",
"prevent_stutter",
"and",
"pair",
"[",
"0",
"]",
"[",
"-",
"1",
"]",
"==",
"pair",
"[",
"1",
"]",
"[",
"0",
"]",
":",
"continue",
"return",
"pair"
] | bffceceb08a868ea215f16dd341159d39ca75971 |
test | random_adjspecies | Return a random adjective/species, separated by `sep`. The keyword
arguments `maxlen` and `prevent_stutter` are the same as for
`random_adjspecies_pair`, but note that the maximum length argument is
not affected by the separator. | adjspecies/__init__.py | def random_adjspecies(sep='', maxlen=8, prevent_stutter=True):
"""
Return a random adjective/species, separated by `sep`. The keyword
arguments `maxlen` and `prevent_stutter` are the same as for
`random_adjspecies_pair`, but note that the maximum length argument is
not affected by the separator.
"""
pair = random_adjspecies_pair(maxlen, prevent_stutter)
return pair[0] + sep + pair[1] | def random_adjspecies(sep='', maxlen=8, prevent_stutter=True):
"""
Return a random adjective/species, separated by `sep`. The keyword
arguments `maxlen` and `prevent_stutter` are the same as for
`random_adjspecies_pair`, but note that the maximum length argument is
not affected by the separator.
"""
pair = random_adjspecies_pair(maxlen, prevent_stutter)
return pair[0] + sep + pair[1] | [
"Return",
"a",
"random",
"adjective",
"/",
"species",
"separated",
"by",
"sep",
".",
"The",
"keyword",
"arguments",
"maxlen",
"and",
"prevent_stutter",
"are",
"the",
"same",
"as",
"for",
"random_adjspecies_pair",
"but",
"note",
"that",
"the",
"maximum",
"length",
"argument",
"is",
"not",
"affected",
"by",
"the",
"separator",
"."
] | hipikat/adjspecies | python | https://github.com/hipikat/adjspecies/blob/bffceceb08a868ea215f16dd341159d39ca75971/adjspecies/__init__.py#L78-L86 | [
"def",
"random_adjspecies",
"(",
"sep",
"=",
"''",
",",
"maxlen",
"=",
"8",
",",
"prevent_stutter",
"=",
"True",
")",
":",
"pair",
"=",
"random_adjspecies_pair",
"(",
"maxlen",
",",
"prevent_stutter",
")",
"return",
"pair",
"[",
"0",
"]",
"+",
"sep",
"+",
"pair",
"[",
"1",
"]"
] | bffceceb08a868ea215f16dd341159d39ca75971 |
test | morph | Morphological analysis for Japanese. | goolabs/commands.py | def morph(ctx, app_id, sentence_file, json_flag,
sentence, info_filter, pos_filter, request_id):
# type: (Context, unicode, Optional[IO], bool, unicode, unicode, unicode, unicode) -> None # NOQA
""" Morphological analysis for Japanese."""
app_id = clean_app_id(app_id)
sentence = clean_sentence(sentence, sentence_file)
if info_filter:
info_filter = info_filter.replace(',', '|')
if pos_filter:
pos_filter = pos_filter.replace(',', '|')
api = GoolabsAPI(app_id)
ret = api.morph(
sentence=sentence,
info_filter=info_filter,
pos_filter=pos_filter,
request_id=request_id,
)
if json_flag:
click.echo(format_json(api.response.json()))
return
for words in ret['word_list']:
for word in words:
click.echo(','.join(word)) | def morph(ctx, app_id, sentence_file, json_flag,
sentence, info_filter, pos_filter, request_id):
# type: (Context, unicode, Optional[IO], bool, unicode, unicode, unicode, unicode) -> None # NOQA
""" Morphological analysis for Japanese."""
app_id = clean_app_id(app_id)
sentence = clean_sentence(sentence, sentence_file)
if info_filter:
info_filter = info_filter.replace(',', '|')
if pos_filter:
pos_filter = pos_filter.replace(',', '|')
api = GoolabsAPI(app_id)
ret = api.morph(
sentence=sentence,
info_filter=info_filter,
pos_filter=pos_filter,
request_id=request_id,
)
if json_flag:
click.echo(format_json(api.response.json()))
return
for words in ret['word_list']:
for word in words:
click.echo(','.join(word)) | [
"Morphological",
"analysis",
"for",
"Japanese",
"."
] | tell-k/goolabs | python | https://github.com/tell-k/goolabs/blob/3b87d0409e55c71290158ad6d5e2d8bb9a338c46/goolabs/commands.py#L107-L135 | [
"def",
"morph",
"(",
"ctx",
",",
"app_id",
",",
"sentence_file",
",",
"json_flag",
",",
"sentence",
",",
"info_filter",
",",
"pos_filter",
",",
"request_id",
")",
":",
"# type: (Context, unicode, Optional[IO], bool, unicode, unicode, unicode, unicode) -> None # NOQA",
"app_id",
"=",
"clean_app_id",
"(",
"app_id",
")",
"sentence",
"=",
"clean_sentence",
"(",
"sentence",
",",
"sentence_file",
")",
"if",
"info_filter",
":",
"info_filter",
"=",
"info_filter",
".",
"replace",
"(",
"','",
",",
"'|'",
")",
"if",
"pos_filter",
":",
"pos_filter",
"=",
"pos_filter",
".",
"replace",
"(",
"','",
",",
"'|'",
")",
"api",
"=",
"GoolabsAPI",
"(",
"app_id",
")",
"ret",
"=",
"api",
".",
"morph",
"(",
"sentence",
"=",
"sentence",
",",
"info_filter",
"=",
"info_filter",
",",
"pos_filter",
"=",
"pos_filter",
",",
"request_id",
"=",
"request_id",
",",
")",
"if",
"json_flag",
":",
"click",
".",
"echo",
"(",
"format_json",
"(",
"api",
".",
"response",
".",
"json",
"(",
")",
")",
")",
"return",
"for",
"words",
"in",
"ret",
"[",
"'word_list'",
"]",
":",
"for",
"word",
"in",
"words",
":",
"click",
".",
"echo",
"(",
"','",
".",
"join",
"(",
"word",
")",
")"
] | 3b87d0409e55c71290158ad6d5e2d8bb9a338c46 |
test | similarity | Scoring the similarity of two words. | goolabs/commands.py | def similarity(ctx, app_id, json_flag, query_pair, request_id):
# type: (Context, unicode, bool, List[unicode], unicode) -> None
""" Scoring the similarity of two words. """
app_id = clean_app_id(app_id)
api = GoolabsAPI(app_id)
ret = api.similarity(
query_pair=query_pair,
request_id=request_id
)
if json_flag:
click.echo(format_json(api.response.json()))
return
click.echo('{0:.16f}'.format(ret['score'])) | def similarity(ctx, app_id, json_flag, query_pair, request_id):
# type: (Context, unicode, bool, List[unicode], unicode) -> None
""" Scoring the similarity of two words. """
app_id = clean_app_id(app_id)
api = GoolabsAPI(app_id)
ret = api.similarity(
query_pair=query_pair,
request_id=request_id
)
if json_flag:
click.echo(format_json(api.response.json()))
return
click.echo('{0:.16f}'.format(ret['score'])) | [
"Scoring",
"the",
"similarity",
"of",
"two",
"words",
"."
] | tell-k/goolabs | python | https://github.com/tell-k/goolabs/blob/3b87d0409e55c71290158ad6d5e2d8bb9a338c46/goolabs/commands.py#L144-L160 | [
"def",
"similarity",
"(",
"ctx",
",",
"app_id",
",",
"json_flag",
",",
"query_pair",
",",
"request_id",
")",
":",
"# type: (Context, unicode, bool, List[unicode], unicode) -> None",
"app_id",
"=",
"clean_app_id",
"(",
"app_id",
")",
"api",
"=",
"GoolabsAPI",
"(",
"app_id",
")",
"ret",
"=",
"api",
".",
"similarity",
"(",
"query_pair",
"=",
"query_pair",
",",
"request_id",
"=",
"request_id",
")",
"if",
"json_flag",
":",
"click",
".",
"echo",
"(",
"format_json",
"(",
"api",
".",
"response",
".",
"json",
"(",
")",
")",
")",
"return",
"click",
".",
"echo",
"(",
"'{0:.16f}'",
".",
"format",
"(",
"ret",
"[",
"'score'",
"]",
")",
")"
] | 3b87d0409e55c71290158ad6d5e2d8bb9a338c46 |
test | hiragana | Convert the Japanese to Hiragana or Katakana. | goolabs/commands.py | def hiragana(ctx, app_id, sentence_file,
json_flag, sentence, output_type, request_id):
# type: (Context, unicode, Optional[IO], bool, unicode, unicode, unicode) -> None # NOQA
""" Convert the Japanese to Hiragana or Katakana. """
app_id = clean_app_id(app_id)
sentence = clean_sentence(sentence, sentence_file)
api = GoolabsAPI(app_id)
ret = api.hiragana(
sentence=sentence,
output_type=output_type,
request_id=request_id
)
if json_flag:
click.echo(format_json(api.response.json()))
return
click.echo(ret['converted']) | def hiragana(ctx, app_id, sentence_file,
json_flag, sentence, output_type, request_id):
# type: (Context, unicode, Optional[IO], bool, unicode, unicode, unicode) -> None # NOQA
""" Convert the Japanese to Hiragana or Katakana. """
app_id = clean_app_id(app_id)
sentence = clean_sentence(sentence, sentence_file)
api = GoolabsAPI(app_id)
ret = api.hiragana(
sentence=sentence,
output_type=output_type,
request_id=request_id
)
if json_flag:
click.echo(format_json(api.response.json()))
return
click.echo(ret['converted']) | [
"Convert",
"the",
"Japanese",
"to",
"Hiragana",
"or",
"Katakana",
"."
] | tell-k/goolabs | python | https://github.com/tell-k/goolabs/blob/3b87d0409e55c71290158ad6d5e2d8bb9a338c46/goolabs/commands.py#L172-L191 | [
"def",
"hiragana",
"(",
"ctx",
",",
"app_id",
",",
"sentence_file",
",",
"json_flag",
",",
"sentence",
",",
"output_type",
",",
"request_id",
")",
":",
"# type: (Context, unicode, Optional[IO], bool, unicode, unicode, unicode) -> None # NOQA",
"app_id",
"=",
"clean_app_id",
"(",
"app_id",
")",
"sentence",
"=",
"clean_sentence",
"(",
"sentence",
",",
"sentence_file",
")",
"api",
"=",
"GoolabsAPI",
"(",
"app_id",
")",
"ret",
"=",
"api",
".",
"hiragana",
"(",
"sentence",
"=",
"sentence",
",",
"output_type",
"=",
"output_type",
",",
"request_id",
"=",
"request_id",
")",
"if",
"json_flag",
":",
"click",
".",
"echo",
"(",
"format_json",
"(",
"api",
".",
"response",
".",
"json",
"(",
")",
")",
")",
"return",
"click",
".",
"echo",
"(",
"ret",
"[",
"'converted'",
"]",
")"
] | 3b87d0409e55c71290158ad6d5e2d8bb9a338c46 |
test | entity | Extract unique representation from sentence. | goolabs/commands.py | def entity(ctx, app_id, sentence_file,
json_flag, sentence, class_filter, request_id):
# type: (Context, unicode, Optional[IO], bool, unicode, unicode, unicode) -> None # NOQA
""" Extract unique representation from sentence. """
app_id = clean_app_id(app_id)
sentence = clean_sentence(sentence, sentence_file)
if class_filter:
class_filter = class_filter.replace(',', '|')
api = GoolabsAPI(app_id)
ret = api.entity(
sentence=sentence,
class_filter=class_filter,
request_id=request_id
)
if json_flag:
click.echo(format_json(api.response.json()))
return
for ne in ret['ne_list']:
click.echo(','.join(ne)) | def entity(ctx, app_id, sentence_file,
json_flag, sentence, class_filter, request_id):
# type: (Context, unicode, Optional[IO], bool, unicode, unicode, unicode) -> None # NOQA
""" Extract unique representation from sentence. """
app_id = clean_app_id(app_id)
sentence = clean_sentence(sentence, sentence_file)
if class_filter:
class_filter = class_filter.replace(',', '|')
api = GoolabsAPI(app_id)
ret = api.entity(
sentence=sentence,
class_filter=class_filter,
request_id=request_id
)
if json_flag:
click.echo(format_json(api.response.json()))
return
for ne in ret['ne_list']:
click.echo(','.join(ne)) | [
"Extract",
"unique",
"representation",
"from",
"sentence",
"."
] | tell-k/goolabs | python | https://github.com/tell-k/goolabs/blob/3b87d0409e55c71290158ad6d5e2d8bb9a338c46/goolabs/commands.py#L203-L226 | [
"def",
"entity",
"(",
"ctx",
",",
"app_id",
",",
"sentence_file",
",",
"json_flag",
",",
"sentence",
",",
"class_filter",
",",
"request_id",
")",
":",
"# type: (Context, unicode, Optional[IO], bool, unicode, unicode, unicode) -> None # NOQA",
"app_id",
"=",
"clean_app_id",
"(",
"app_id",
")",
"sentence",
"=",
"clean_sentence",
"(",
"sentence",
",",
"sentence_file",
")",
"if",
"class_filter",
":",
"class_filter",
"=",
"class_filter",
".",
"replace",
"(",
"','",
",",
"'|'",
")",
"api",
"=",
"GoolabsAPI",
"(",
"app_id",
")",
"ret",
"=",
"api",
".",
"entity",
"(",
"sentence",
"=",
"sentence",
",",
"class_filter",
"=",
"class_filter",
",",
"request_id",
"=",
"request_id",
")",
"if",
"json_flag",
":",
"click",
".",
"echo",
"(",
"format_json",
"(",
"api",
".",
"response",
".",
"json",
"(",
")",
")",
")",
"return",
"for",
"ne",
"in",
"ret",
"[",
"'ne_list'",
"]",
":",
"click",
".",
"echo",
"(",
"','",
".",
"join",
"(",
"ne",
")",
")"
] | 3b87d0409e55c71290158ad6d5e2d8bb9a338c46 |
test | shortsum | Summarize reviews into a short summary. | goolabs/commands.py | def shortsum(ctx, app_id, review_file,
json_flag, review, length, request_id):
# type: (Context, unicode, Optional[IO], bool, unicode, unicode, unicode) -> None # NOQA
"""Summarize reviews into a short summary."""
app_id = clean_app_id(app_id)
review_list = clean_review(review, review_file)
length_int = clean_length(length) # type: Optional[int]
api = GoolabsAPI(app_id)
ret = api.shortsum(
review_list=review_list,
length=length_int,
request_id=request_id,
)
if json_flag:
click.echo(format_json(api.response.json()))
return
click.echo(ret['summary']) | def shortsum(ctx, app_id, review_file,
json_flag, review, length, request_id):
# type: (Context, unicode, Optional[IO], bool, unicode, unicode, unicode) -> None # NOQA
"""Summarize reviews into a short summary."""
app_id = clean_app_id(app_id)
review_list = clean_review(review, review_file)
length_int = clean_length(length) # type: Optional[int]
api = GoolabsAPI(app_id)
ret = api.shortsum(
review_list=review_list,
length=length_int,
request_id=request_id,
)
if json_flag:
click.echo(format_json(api.response.json()))
return
click.echo(ret['summary']) | [
"Summarize",
"reviews",
"into",
"a",
"short",
"summary",
"."
] | tell-k/goolabs | python | https://github.com/tell-k/goolabs/blob/3b87d0409e55c71290158ad6d5e2d8bb9a338c46/goolabs/commands.py#L237-L257 | [
"def",
"shortsum",
"(",
"ctx",
",",
"app_id",
",",
"review_file",
",",
"json_flag",
",",
"review",
",",
"length",
",",
"request_id",
")",
":",
"# type: (Context, unicode, Optional[IO], bool, unicode, unicode, unicode) -> None # NOQA",
"app_id",
"=",
"clean_app_id",
"(",
"app_id",
")",
"review_list",
"=",
"clean_review",
"(",
"review",
",",
"review_file",
")",
"length_int",
"=",
"clean_length",
"(",
"length",
")",
"# type: Optional[int]",
"api",
"=",
"GoolabsAPI",
"(",
"app_id",
")",
"ret",
"=",
"api",
".",
"shortsum",
"(",
"review_list",
"=",
"review_list",
",",
"length",
"=",
"length_int",
",",
"request_id",
"=",
"request_id",
",",
")",
"if",
"json_flag",
":",
"click",
".",
"echo",
"(",
"format_json",
"(",
"api",
".",
"response",
".",
"json",
"(",
")",
")",
")",
"return",
"click",
".",
"echo",
"(",
"ret",
"[",
"'summary'",
"]",
")"
] | 3b87d0409e55c71290158ad6d5e2d8bb9a338c46 |
test | keyword | Extract "keywords" from an input document. | goolabs/commands.py | def keyword(ctx, app_id, body_file, json_flag,
title, body, max_num, forcus, request_id):
# type: (Context, unicode, Optional[IO], bool, unicode, unicode, int, unicode, unicode) -> None # NOQA
"""Extract "keywords" from an input document. """
app_id = clean_app_id(app_id)
body = clean_body(body, body_file)
api = GoolabsAPI(app_id)
ret = api.keyword(
title=title,
body=body,
max_num=max_num,
forcus=forcus,
request_id=request_id,
)
if json_flag:
click.echo(format_json(api.response.json()))
return
for k in ret['keywords']:
k = dict((key.encode('utf-8'), k[key]) for key in k.keys())
for keyword, score in six.iteritems(k):
click.echo(u'{0},{1}'.format(text(keyword), score)) | def keyword(ctx, app_id, body_file, json_flag,
title, body, max_num, forcus, request_id):
# type: (Context, unicode, Optional[IO], bool, unicode, unicode, int, unicode, unicode) -> None # NOQA
"""Extract "keywords" from an input document. """
app_id = clean_app_id(app_id)
body = clean_body(body, body_file)
api = GoolabsAPI(app_id)
ret = api.keyword(
title=title,
body=body,
max_num=max_num,
forcus=forcus,
request_id=request_id,
)
if json_flag:
click.echo(format_json(api.response.json()))
return
for k in ret['keywords']:
k = dict((key.encode('utf-8'), k[key]) for key in k.keys())
for keyword, score in six.iteritems(k):
click.echo(u'{0},{1}'.format(text(keyword), score)) | [
"Extract",
"keywords",
"from",
"an",
"input",
"document",
"."
] | tell-k/goolabs | python | https://github.com/tell-k/goolabs/blob/3b87d0409e55c71290158ad6d5e2d8bb9a338c46/goolabs/commands.py#L270-L294 | [
"def",
"keyword",
"(",
"ctx",
",",
"app_id",
",",
"body_file",
",",
"json_flag",
",",
"title",
",",
"body",
",",
"max_num",
",",
"forcus",
",",
"request_id",
")",
":",
"# type: (Context, unicode, Optional[IO], bool, unicode, unicode, int, unicode, unicode) -> None # NOQA",
"app_id",
"=",
"clean_app_id",
"(",
"app_id",
")",
"body",
"=",
"clean_body",
"(",
"body",
",",
"body_file",
")",
"api",
"=",
"GoolabsAPI",
"(",
"app_id",
")",
"ret",
"=",
"api",
".",
"keyword",
"(",
"title",
"=",
"title",
",",
"body",
"=",
"body",
",",
"max_num",
"=",
"max_num",
",",
"forcus",
"=",
"forcus",
",",
"request_id",
"=",
"request_id",
",",
")",
"if",
"json_flag",
":",
"click",
".",
"echo",
"(",
"format_json",
"(",
"api",
".",
"response",
".",
"json",
"(",
")",
")",
")",
"return",
"for",
"k",
"in",
"ret",
"[",
"'keywords'",
"]",
":",
"k",
"=",
"dict",
"(",
"(",
"key",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"k",
"[",
"key",
"]",
")",
"for",
"key",
"in",
"k",
".",
"keys",
"(",
")",
")",
"for",
"keyword",
",",
"score",
"in",
"six",
".",
"iteritems",
"(",
"k",
")",
":",
"click",
".",
"echo",
"(",
"u'{0},{1}'",
".",
"format",
"(",
"text",
"(",
"keyword",
")",
",",
"score",
")",
")"
] | 3b87d0409e55c71290158ad6d5e2d8bb9a338c46 |
test | chrono | Extract expression expressing date and time and normalize its value | goolabs/commands.py | def chrono(ctx, app_id, sentence_file,
json_flag, sentence, doc_time, request_id):
# type: (Context, unicode, Optional[IO], bool, unicode, unicode, unicode) -> None # NOQA
"""Extract expression expressing date and time and normalize its value """
app_id = clean_app_id(app_id)
sentence = clean_sentence(sentence, sentence_file)
api = GoolabsAPI(app_id)
ret = api.chrono(
sentence=sentence,
doc_time=doc_time,
request_id=request_id,
)
if json_flag:
click.echo(format_json(api.response.json()))
return
for pair in ret['datetime_list']:
click.echo(u'{0}: {1}'.format(text(pair[0]), pair[1])) | def chrono(ctx, app_id, sentence_file,
json_flag, sentence, doc_time, request_id):
# type: (Context, unicode, Optional[IO], bool, unicode, unicode, unicode) -> None # NOQA
"""Extract expression expressing date and time and normalize its value """
app_id = clean_app_id(app_id)
sentence = clean_sentence(sentence, sentence_file)
api = GoolabsAPI(app_id)
ret = api.chrono(
sentence=sentence,
doc_time=doc_time,
request_id=request_id,
)
if json_flag:
click.echo(format_json(api.response.json()))
return
for pair in ret['datetime_list']:
click.echo(u'{0}: {1}'.format(text(pair[0]), pair[1])) | [
"Extract",
"expression",
"expressing",
"date",
"and",
"time",
"and",
"normalize",
"its",
"value"
] | tell-k/goolabs | python | https://github.com/tell-k/goolabs/blob/3b87d0409e55c71290158ad6d5e2d8bb9a338c46/goolabs/commands.py#L305-L325 | [
"def",
"chrono",
"(",
"ctx",
",",
"app_id",
",",
"sentence_file",
",",
"json_flag",
",",
"sentence",
",",
"doc_time",
",",
"request_id",
")",
":",
"# type: (Context, unicode, Optional[IO], bool, unicode, unicode, unicode) -> None # NOQA",
"app_id",
"=",
"clean_app_id",
"(",
"app_id",
")",
"sentence",
"=",
"clean_sentence",
"(",
"sentence",
",",
"sentence_file",
")",
"api",
"=",
"GoolabsAPI",
"(",
"app_id",
")",
"ret",
"=",
"api",
".",
"chrono",
"(",
"sentence",
"=",
"sentence",
",",
"doc_time",
"=",
"doc_time",
",",
"request_id",
"=",
"request_id",
",",
")",
"if",
"json_flag",
":",
"click",
".",
"echo",
"(",
"format_json",
"(",
"api",
".",
"response",
".",
"json",
"(",
")",
")",
")",
"return",
"for",
"pair",
"in",
"ret",
"[",
"'datetime_list'",
"]",
":",
"click",
".",
"echo",
"(",
"u'{0}: {1}'",
".",
"format",
"(",
"text",
"(",
"pair",
"[",
"0",
"]",
")",
",",
"pair",
"[",
"1",
"]",
")",
")"
] | 3b87d0409e55c71290158ad6d5e2d8bb9a338c46 |
test | PipelineFactory.create | Create a pipeline stage.
Instantiates `stage` with `config`. This essentially
translates to ``stage(config)``, except that two keys from
`scp_config` are injected into the configuration:
``tmp_dir_path`` is an execution-specific directory from
combining the top-level ``tmp_dir_path`` configuration with
:attr:`tmp_dir_suffix`; and ``third_dir_path`` is the same
path from the top-level configuration. `stage` may be either
a callable returning the stage (e.g. its class), or its name
in the configuration.
`scp_config` is the configuration for the pipeline as a
whole, and is required. `config` is the configuration for
the stage; if it is :const:`None` then it is extracted
from `scp_config`.
If you already have a fully formed configuration block
and want to create a stage, you can call
.. code-block:: python
factory.registry[stage](stage_config)
In most cases if you have a stage class object and want to
instantiate it with its defaults you can call
.. code-block:: python
stage = stage_cls(stage_cls.default_config)
.. note:: This mirrors
:meth:`yakonfig.factory.AutoFactory.create`, with
some thought that this factory class might migrate
to using that as a base in the future.
:param stage: pipeline stage class, or its name in the registry
:param dict scp_config: configuration block for the pipeline
:param dict config: configuration block for the stage, or
:const:`None` to get it from `scp_config` | streamcorpus_pipeline/_pipeline.py | def create(self, stage, scp_config, config=None):
'''Create a pipeline stage.
Instantiates `stage` with `config`. This essentially
translates to ``stage(config)``, except that two keys from
`scp_config` are injected into the configuration:
``tmp_dir_path`` is an execution-specific directory from
combining the top-level ``tmp_dir_path`` configuration with
:attr:`tmp_dir_suffix`; and ``third_dir_path`` is the same
path from the top-level configuration. `stage` may be either
a callable returning the stage (e.g. its class), or its name
in the configuration.
`scp_config` is the configuration for the pipeline as a
whole, and is required. `config` is the configuration for
the stage; if it is :const:`None` then it is extracted
from `scp_config`.
If you already have a fully formed configuration block
and want to create a stage, you can call
.. code-block:: python
factory.registry[stage](stage_config)
In most cases if you have a stage class object and want to
instantiate it with its defaults you can call
.. code-block:: python
stage = stage_cls(stage_cls.default_config)
.. note:: This mirrors
:meth:`yakonfig.factory.AutoFactory.create`, with
some thought that this factory class might migrate
to using that as a base in the future.
:param stage: pipeline stage class, or its name in the registry
:param dict scp_config: configuration block for the pipeline
:param dict config: configuration block for the stage, or
:const:`None` to get it from `scp_config`
'''
# Figure out what we have for a stage and its name
if isinstance(stage, basestring):
stage_name = stage
stage_obj = self.registry[stage_name]
else:
stage_name = getattr(stage, 'config_name', stage.__name__)
stage_obj = stage
# Find the configuration; get a copy we can mutate
if config is None:
config = scp_config.get(stage_name, None)
if config is None:
config = getattr(stage_obj, 'default_config', {})
config = dict(config)
# Fill in more values
if self.tmp_dir_suffix is None:
config['tmp_dir_path'] = scp_config['tmp_dir_path']
else:
config['tmp_dir_path'] = os.path.join(scp_config['tmp_dir_path'],
self.tmp_dir_suffix)
config['third_dir_path'] = scp_config['third_dir_path']
return stage_obj(config) | def create(self, stage, scp_config, config=None):
'''Create a pipeline stage.
Instantiates `stage` with `config`. This essentially
translates to ``stage(config)``, except that two keys from
`scp_config` are injected into the configuration:
``tmp_dir_path`` is an execution-specific directory from
combining the top-level ``tmp_dir_path`` configuration with
:attr:`tmp_dir_suffix`; and ``third_dir_path`` is the same
path from the top-level configuration. `stage` may be either
a callable returning the stage (e.g. its class), or its name
in the configuration.
`scp_config` is the configuration for the pipeline as a
whole, and is required. `config` is the configuration for
the stage; if it is :const:`None` then it is extracted
from `scp_config`.
If you already have a fully formed configuration block
and want to create a stage, you can call
.. code-block:: python
factory.registry[stage](stage_config)
In most cases if you have a stage class object and want to
instantiate it with its defaults you can call
.. code-block:: python
stage = stage_cls(stage_cls.default_config)
.. note:: This mirrors
:meth:`yakonfig.factory.AutoFactory.create`, with
some thought that this factory class might migrate
to using that as a base in the future.
:param stage: pipeline stage class, or its name in the registry
:param dict scp_config: configuration block for the pipeline
:param dict config: configuration block for the stage, or
:const:`None` to get it from `scp_config`
'''
# Figure out what we have for a stage and its name
if isinstance(stage, basestring):
stage_name = stage
stage_obj = self.registry[stage_name]
else:
stage_name = getattr(stage, 'config_name', stage.__name__)
stage_obj = stage
# Find the configuration; get a copy we can mutate
if config is None:
config = scp_config.get(stage_name, None)
if config is None:
config = getattr(stage_obj, 'default_config', {})
config = dict(config)
# Fill in more values
if self.tmp_dir_suffix is None:
config['tmp_dir_path'] = scp_config['tmp_dir_path']
else:
config['tmp_dir_path'] = os.path.join(scp_config['tmp_dir_path'],
self.tmp_dir_suffix)
config['third_dir_path'] = scp_config['third_dir_path']
return stage_obj(config) | [
"Create",
"a",
"pipeline",
"stage",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_pipeline.py#L233-L299 | [
"def",
"create",
"(",
"self",
",",
"stage",
",",
"scp_config",
",",
"config",
"=",
"None",
")",
":",
"# Figure out what we have for a stage and its name",
"if",
"isinstance",
"(",
"stage",
",",
"basestring",
")",
":",
"stage_name",
"=",
"stage",
"stage_obj",
"=",
"self",
".",
"registry",
"[",
"stage_name",
"]",
"else",
":",
"stage_name",
"=",
"getattr",
"(",
"stage",
",",
"'config_name'",
",",
"stage",
".",
"__name__",
")",
"stage_obj",
"=",
"stage",
"# Find the configuration; get a copy we can mutate",
"if",
"config",
"is",
"None",
":",
"config",
"=",
"scp_config",
".",
"get",
"(",
"stage_name",
",",
"None",
")",
"if",
"config",
"is",
"None",
":",
"config",
"=",
"getattr",
"(",
"stage_obj",
",",
"'default_config'",
",",
"{",
"}",
")",
"config",
"=",
"dict",
"(",
"config",
")",
"# Fill in more values",
"if",
"self",
".",
"tmp_dir_suffix",
"is",
"None",
":",
"config",
"[",
"'tmp_dir_path'",
"]",
"=",
"scp_config",
"[",
"'tmp_dir_path'",
"]",
"else",
":",
"config",
"[",
"'tmp_dir_path'",
"]",
"=",
"os",
".",
"path",
".",
"join",
"(",
"scp_config",
"[",
"'tmp_dir_path'",
"]",
",",
"self",
".",
"tmp_dir_suffix",
")",
"config",
"[",
"'third_dir_path'",
"]",
"=",
"scp_config",
"[",
"'third_dir_path'",
"]",
"return",
"stage_obj",
"(",
"config",
")"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | PipelineFactory._init_stages | Create a list of indirect stages.
`name` should be the name of a config item that holds a list
of names of stages, for instance, ``writers``. This looks up
the names of those stages, then creates and returns the
corresponding list of stage objects. For instance, if the
config says
.. code-block:: yaml
incremental_transforms: [clean_html, clean_visible]
then calling ``self._init_stages(scp_config,
'incremental_transforms')`` will return a list of the two
named stage instances.
:param dict config: `streamcorpus_pipeline` configuration block
:param str name: name of the stage name list entry
:return: list of new stage instances | streamcorpus_pipeline/_pipeline.py | def _init_stages(self, config, name):
'''Create a list of indirect stages.
`name` should be the name of a config item that holds a list
of names of stages, for instance, ``writers``. This looks up
the names of those stages, then creates and returns the
corresponding list of stage objects. For instance, if the
config says
.. code-block:: yaml
incremental_transforms: [clean_html, clean_visible]
then calling ``self._init_stages(scp_config,
'incremental_transforms')`` will return a list of the two
named stage instances.
:param dict config: `streamcorpus_pipeline` configuration block
:param str name: name of the stage name list entry
:return: list of new stage instances
'''
if name not in config:
return []
return [self.create(stage, config) for stage in config[name]] | def _init_stages(self, config, name):
'''Create a list of indirect stages.
`name` should be the name of a config item that holds a list
of names of stages, for instance, ``writers``. This looks up
the names of those stages, then creates and returns the
corresponding list of stage objects. For instance, if the
config says
.. code-block:: yaml
incremental_transforms: [clean_html, clean_visible]
then calling ``self._init_stages(scp_config,
'incremental_transforms')`` will return a list of the two
named stage instances.
:param dict config: `streamcorpus_pipeline` configuration block
:param str name: name of the stage name list entry
:return: list of new stage instances
'''
if name not in config:
return []
return [self.create(stage, config) for stage in config[name]] | [
"Create",
"a",
"list",
"of",
"indirect",
"stages",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_pipeline.py#L326-L350 | [
"def",
"_init_stages",
"(",
"self",
",",
"config",
",",
"name",
")",
":",
"if",
"name",
"not",
"in",
"config",
":",
"return",
"[",
"]",
"return",
"[",
"self",
".",
"create",
"(",
"stage",
",",
"config",
")",
"for",
"stage",
"in",
"config",
"[",
"name",
"]",
"]"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | PipelineFactory._init_all_stages | Create stages that are used for the pipeline.
:param dict config: `streamcorpus_pipeline` configuration
:return: tuple of (reader, incremental transforms, batch
transforms, post-batch incremental transforms, writers,
temporary directory) | streamcorpus_pipeline/_pipeline.py | def _init_all_stages(self, config):
'''Create stages that are used for the pipeline.
:param dict config: `streamcorpus_pipeline` configuration
:return: tuple of (reader, incremental transforms, batch
transforms, post-batch incremental transforms, writers,
temporary directory)
'''
reader = self._init_stage(config, 'reader')
incremental_transforms = self._init_stages(
config, 'incremental_transforms')
batch_transforms = self._init_stages(config, 'batch_transforms')
post_batch_incremental_transforms = self._init_stages(
config, 'post_batch_incremental_transforms')
writers = self._init_stages(config, 'writers')
tmp_dir_path = os.path.join(config['tmp_dir_path'],
self.tmp_dir_suffix)
return (reader, incremental_transforms, batch_transforms,
post_batch_incremental_transforms, writers, tmp_dir_path) | def _init_all_stages(self, config):
'''Create stages that are used for the pipeline.
:param dict config: `streamcorpus_pipeline` configuration
:return: tuple of (reader, incremental transforms, batch
transforms, post-batch incremental transforms, writers,
temporary directory)
'''
reader = self._init_stage(config, 'reader')
incremental_transforms = self._init_stages(
config, 'incremental_transforms')
batch_transforms = self._init_stages(config, 'batch_transforms')
post_batch_incremental_transforms = self._init_stages(
config, 'post_batch_incremental_transforms')
writers = self._init_stages(config, 'writers')
tmp_dir_path = os.path.join(config['tmp_dir_path'],
self.tmp_dir_suffix)
return (reader, incremental_transforms, batch_transforms,
post_batch_incremental_transforms, writers, tmp_dir_path) | [
"Create",
"stages",
"that",
"are",
"used",
"for",
"the",
"pipeline",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_pipeline.py#L352-L371 | [
"def",
"_init_all_stages",
"(",
"self",
",",
"config",
")",
":",
"reader",
"=",
"self",
".",
"_init_stage",
"(",
"config",
",",
"'reader'",
")",
"incremental_transforms",
"=",
"self",
".",
"_init_stages",
"(",
"config",
",",
"'incremental_transforms'",
")",
"batch_transforms",
"=",
"self",
".",
"_init_stages",
"(",
"config",
",",
"'batch_transforms'",
")",
"post_batch_incremental_transforms",
"=",
"self",
".",
"_init_stages",
"(",
"config",
",",
"'post_batch_incremental_transforms'",
")",
"writers",
"=",
"self",
".",
"_init_stages",
"(",
"config",
",",
"'writers'",
")",
"tmp_dir_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"config",
"[",
"'tmp_dir_path'",
"]",
",",
"self",
".",
"tmp_dir_suffix",
")",
"return",
"(",
"reader",
",",
"incremental_transforms",
",",
"batch_transforms",
",",
"post_batch_incremental_transforms",
",",
"writers",
",",
"tmp_dir_path",
")"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | Pipeline._process_task | Process a :class:`coordinate.WorkUnit`.
The work unit's key is taken as the input file name. The
data should have ``start_count`` and ``start_chunk_time``
values, which are passed on to :meth:`run`.
:param work_unit: work unit to process
:paramtype work_unit: :class:`coordinate.WorkUnit`
:return: number of stream items processed | streamcorpus_pipeline/_pipeline.py | def _process_task(self, work_unit):
'''Process a :class:`coordinate.WorkUnit`.
The work unit's key is taken as the input file name. The
data should have ``start_count`` and ``start_chunk_time``
values, which are passed on to :meth:`run`.
:param work_unit: work unit to process
:paramtype work_unit: :class:`coordinate.WorkUnit`
:return: number of stream items processed
'''
self.work_unit = work_unit
i_str = work_unit.key
start_count = work_unit.data['start_count']
start_chunk_time = work_unit.data['start_chunk_time']
self.run(i_str, start_count, start_chunk_time) | def _process_task(self, work_unit):
'''Process a :class:`coordinate.WorkUnit`.
The work unit's key is taken as the input file name. The
data should have ``start_count`` and ``start_chunk_time``
values, which are passed on to :meth:`run`.
:param work_unit: work unit to process
:paramtype work_unit: :class:`coordinate.WorkUnit`
:return: number of stream items processed
'''
self.work_unit = work_unit
i_str = work_unit.key
start_count = work_unit.data['start_count']
start_chunk_time = work_unit.data['start_chunk_time']
self.run(i_str, start_count, start_chunk_time) | [
"Process",
"a",
":",
"class",
":",
"coordinate",
".",
"WorkUnit",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_pipeline.py#L496-L512 | [
"def",
"_process_task",
"(",
"self",
",",
"work_unit",
")",
":",
"self",
".",
"work_unit",
"=",
"work_unit",
"i_str",
"=",
"work_unit",
".",
"key",
"start_count",
"=",
"work_unit",
".",
"data",
"[",
"'start_count'",
"]",
"start_chunk_time",
"=",
"work_unit",
".",
"data",
"[",
"'start_chunk_time'",
"]",
"self",
".",
"run",
"(",
"i_str",
",",
"start_count",
",",
"start_chunk_time",
")"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | Pipeline.run | Run the pipeline.
This runs all of the steps described in the pipeline constructor,
reading from some input and writing to some output.
:param str i_str: name of the input file, or other reader-specific
description of where to get input
:param int start_count: index of the first stream item
:param int start_chunk_time: timestamp for the first stream item | streamcorpus_pipeline/_pipeline.py | def run(self, i_str, start_count=0, start_chunk_time=None):
'''Run the pipeline.
This runs all of the steps described in the pipeline constructor,
reading from some input and writing to some output.
:param str i_str: name of the input file, or other reader-specific
description of where to get input
:param int start_count: index of the first stream item
:param int start_chunk_time: timestamp for the first stream item
'''
try:
if not os.path.exists(self.tmp_dir_path):
os.makedirs(self.tmp_dir_path)
if start_chunk_time is None:
start_chunk_time = time.time()
## the reader returns generators of StreamItems
i_chunk = self.reader(i_str)
## t_path points to the currently in-progress temp chunk
t_path = None
## loop over all docs in the chunk processing and cutting
## smaller chunks if needed
len_clean_visible = 0
sources = set()
next_idx = 0
## how many have we input and actually done processing on?
input_item_count = 0
for si in i_chunk:
# TODO: break out a _process_stream_item function?
next_idx += 1
## yield to the gevent hub to allow other things to run
if gevent:
gevent.sleep(0)
## skip forward until we reach start_count
if next_idx <= start_count:
continue
if next_idx % self.rate_log_interval == 0:
## indexing is zero-based, so next_idx corresponds
## to length of list of SIs processed so far
elapsed = time.time() - start_chunk_time
if elapsed > 0:
rate = float(next_idx) / elapsed
logger.info('%d in %.1f --> %.1f per sec on '
'(pre-partial_commit) %s',
next_idx - start_count, elapsed, rate,
i_str)
if not self.t_chunk:
## make a temporary chunk at a temporary path
# (Lazy allocation after we've read an item that might get processed out to the new chunk file)
# TODO: make this EVEN LAZIER by not opening the t_chunk until inside _run_incremental_transforms whe the first output si is ready
t_path = os.path.join(self.tmp_dir_path,
't_chunk-%s' % uuid.uuid4().hex)
self.t_chunk = streamcorpus.Chunk(path=t_path, mode='wb')
assert self.t_chunk.message == streamcorpus.StreamItem_v0_3_0, self.t_chunk.message
# TODO: a set of incremental transforms is equivalent
# to a batch transform. Make the pipeline explicitly
# configurable as such:
#
# batch_transforms: [[incr set 1], batch op, [incr set 2], ...]
#
# OR: for some list of transforms (mixed incremental
# and batch) pipeline can detect and batchify as needed
## incremental transforms populate t_chunk
## let the incremental transforms destroy the si by
## returning None
si = self._run_incremental_transforms(
si, self.incremental_transforms)
## insist that every chunk has only one source string
if si:
sources.add(si.source)
if self.assert_single_source and len(sources) != 1:
raise InvalidStreamItem(
'stream item %r had source %r, not %r '
'(set assert_single_source: false to suppress)' %
(si.stream_id, si.source, sources))
if si and si.body and si.body.clean_visible:
len_clean_visible += len(si.body.clean_visible)
## log binned clean_visible lengths, for quick stats estimates
#logger.debug('len(si.body.clean_visible)=%d' % int(10 * int(math.floor(float(len(si.body.clean_visible)) / 2**10)/10)))
#logger.debug('len(si.body.clean_visible)=%d' % len(si.body.clean_visible))
if ((self.output_chunk_max_count is not None and
len(self.t_chunk) == self.output_chunk_max_count)):
logger.info('reached output_chunk_max_count (%d) at: %d',
len(self.t_chunk), next_idx)
self._process_output_chunk(
start_count, next_idx, sources, i_str, t_path)
start_count = next_idx
elif (self.output_max_clean_visible_bytes is not None and
len_clean_visible >=
self.output_chunk_max_clean_visible_bytes):
logger.info(
'reached output_chunk_max_clean_visible_bytes '
'(%d) at: %d',
self.output_chunk_max_clean_visible_bytes,
len_clean_visible)
len_clean_visible = 0
self._process_output_chunk(
start_count, next_idx, sources, i_str, t_path)
start_count = next_idx
input_item_count += 1
if (((self.input_item_limit is not None) and
(input_item_count > self.input_item_limit))):
break
if self.t_chunk is not None:
self._process_output_chunk(
start_count, next_idx, sources, i_str, t_path)
## return how many stream items we processed
return next_idx
finally:
if self.t_chunk is not None:
self.t_chunk.close()
for transform in self.batch_transforms:
transform.shutdown()
if self.cleanup_tmp_files:
rmtree(self.tmp_dir_path) | def run(self, i_str, start_count=0, start_chunk_time=None):
'''Run the pipeline.
This runs all of the steps described in the pipeline constructor,
reading from some input and writing to some output.
:param str i_str: name of the input file, or other reader-specific
description of where to get input
:param int start_count: index of the first stream item
:param int start_chunk_time: timestamp for the first stream item
'''
try:
if not os.path.exists(self.tmp_dir_path):
os.makedirs(self.tmp_dir_path)
if start_chunk_time is None:
start_chunk_time = time.time()
## the reader returns generators of StreamItems
i_chunk = self.reader(i_str)
## t_path points to the currently in-progress temp chunk
t_path = None
## loop over all docs in the chunk processing and cutting
## smaller chunks if needed
len_clean_visible = 0
sources = set()
next_idx = 0
## how many have we input and actually done processing on?
input_item_count = 0
for si in i_chunk:
# TODO: break out a _process_stream_item function?
next_idx += 1
## yield to the gevent hub to allow other things to run
if gevent:
gevent.sleep(0)
## skip forward until we reach start_count
if next_idx <= start_count:
continue
if next_idx % self.rate_log_interval == 0:
## indexing is zero-based, so next_idx corresponds
## to length of list of SIs processed so far
elapsed = time.time() - start_chunk_time
if elapsed > 0:
rate = float(next_idx) / elapsed
logger.info('%d in %.1f --> %.1f per sec on '
'(pre-partial_commit) %s',
next_idx - start_count, elapsed, rate,
i_str)
if not self.t_chunk:
## make a temporary chunk at a temporary path
# (Lazy allocation after we've read an item that might get processed out to the new chunk file)
# TODO: make this EVEN LAZIER by not opening the t_chunk until inside _run_incremental_transforms whe the first output si is ready
t_path = os.path.join(self.tmp_dir_path,
't_chunk-%s' % uuid.uuid4().hex)
self.t_chunk = streamcorpus.Chunk(path=t_path, mode='wb')
assert self.t_chunk.message == streamcorpus.StreamItem_v0_3_0, self.t_chunk.message
# TODO: a set of incremental transforms is equivalent
# to a batch transform. Make the pipeline explicitly
# configurable as such:
#
# batch_transforms: [[incr set 1], batch op, [incr set 2], ...]
#
# OR: for some list of transforms (mixed incremental
# and batch) pipeline can detect and batchify as needed
## incremental transforms populate t_chunk
## let the incremental transforms destroy the si by
## returning None
si = self._run_incremental_transforms(
si, self.incremental_transforms)
## insist that every chunk has only one source string
if si:
sources.add(si.source)
if self.assert_single_source and len(sources) != 1:
raise InvalidStreamItem(
'stream item %r had source %r, not %r '
'(set assert_single_source: false to suppress)' %
(si.stream_id, si.source, sources))
if si and si.body and si.body.clean_visible:
len_clean_visible += len(si.body.clean_visible)
## log binned clean_visible lengths, for quick stats estimates
#logger.debug('len(si.body.clean_visible)=%d' % int(10 * int(math.floor(float(len(si.body.clean_visible)) / 2**10)/10)))
#logger.debug('len(si.body.clean_visible)=%d' % len(si.body.clean_visible))
if ((self.output_chunk_max_count is not None and
len(self.t_chunk) == self.output_chunk_max_count)):
logger.info('reached output_chunk_max_count (%d) at: %d',
len(self.t_chunk), next_idx)
self._process_output_chunk(
start_count, next_idx, sources, i_str, t_path)
start_count = next_idx
elif (self.output_max_clean_visible_bytes is not None and
len_clean_visible >=
self.output_chunk_max_clean_visible_bytes):
logger.info(
'reached output_chunk_max_clean_visible_bytes '
'(%d) at: %d',
self.output_chunk_max_clean_visible_bytes,
len_clean_visible)
len_clean_visible = 0
self._process_output_chunk(
start_count, next_idx, sources, i_str, t_path)
start_count = next_idx
input_item_count += 1
if (((self.input_item_limit is not None) and
(input_item_count > self.input_item_limit))):
break
if self.t_chunk is not None:
self._process_output_chunk(
start_count, next_idx, sources, i_str, t_path)
## return how many stream items we processed
return next_idx
finally:
if self.t_chunk is not None:
self.t_chunk.close()
for transform in self.batch_transforms:
transform.shutdown()
if self.cleanup_tmp_files:
rmtree(self.tmp_dir_path) | [
"Run",
"the",
"pipeline",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_pipeline.py#L514-L650 | [
"def",
"run",
"(",
"self",
",",
"i_str",
",",
"start_count",
"=",
"0",
",",
"start_chunk_time",
"=",
"None",
")",
":",
"try",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"tmp_dir_path",
")",
":",
"os",
".",
"makedirs",
"(",
"self",
".",
"tmp_dir_path",
")",
"if",
"start_chunk_time",
"is",
"None",
":",
"start_chunk_time",
"=",
"time",
".",
"time",
"(",
")",
"## the reader returns generators of StreamItems",
"i_chunk",
"=",
"self",
".",
"reader",
"(",
"i_str",
")",
"## t_path points to the currently in-progress temp chunk",
"t_path",
"=",
"None",
"## loop over all docs in the chunk processing and cutting",
"## smaller chunks if needed",
"len_clean_visible",
"=",
"0",
"sources",
"=",
"set",
"(",
")",
"next_idx",
"=",
"0",
"## how many have we input and actually done processing on?",
"input_item_count",
"=",
"0",
"for",
"si",
"in",
"i_chunk",
":",
"# TODO: break out a _process_stream_item function?",
"next_idx",
"+=",
"1",
"## yield to the gevent hub to allow other things to run",
"if",
"gevent",
":",
"gevent",
".",
"sleep",
"(",
"0",
")",
"## skip forward until we reach start_count",
"if",
"next_idx",
"<=",
"start_count",
":",
"continue",
"if",
"next_idx",
"%",
"self",
".",
"rate_log_interval",
"==",
"0",
":",
"## indexing is zero-based, so next_idx corresponds",
"## to length of list of SIs processed so far",
"elapsed",
"=",
"time",
".",
"time",
"(",
")",
"-",
"start_chunk_time",
"if",
"elapsed",
">",
"0",
":",
"rate",
"=",
"float",
"(",
"next_idx",
")",
"/",
"elapsed",
"logger",
".",
"info",
"(",
"'%d in %.1f --> %.1f per sec on '",
"'(pre-partial_commit) %s'",
",",
"next_idx",
"-",
"start_count",
",",
"elapsed",
",",
"rate",
",",
"i_str",
")",
"if",
"not",
"self",
".",
"t_chunk",
":",
"## make a temporary chunk at a temporary path",
"# (Lazy allocation after we've read an item that might get processed out to the new chunk file)",
"# TODO: make this EVEN LAZIER by not opening the t_chunk until inside _run_incremental_transforms whe the first output si is ready",
"t_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"tmp_dir_path",
",",
"'t_chunk-%s'",
"%",
"uuid",
".",
"uuid4",
"(",
")",
".",
"hex",
")",
"self",
".",
"t_chunk",
"=",
"streamcorpus",
".",
"Chunk",
"(",
"path",
"=",
"t_path",
",",
"mode",
"=",
"'wb'",
")",
"assert",
"self",
".",
"t_chunk",
".",
"message",
"==",
"streamcorpus",
".",
"StreamItem_v0_3_0",
",",
"self",
".",
"t_chunk",
".",
"message",
"# TODO: a set of incremental transforms is equivalent",
"# to a batch transform. Make the pipeline explicitly",
"# configurable as such:",
"#",
"# batch_transforms: [[incr set 1], batch op, [incr set 2], ...]",
"#",
"# OR: for some list of transforms (mixed incremental",
"# and batch) pipeline can detect and batchify as needed",
"## incremental transforms populate t_chunk",
"## let the incremental transforms destroy the si by",
"## returning None",
"si",
"=",
"self",
".",
"_run_incremental_transforms",
"(",
"si",
",",
"self",
".",
"incremental_transforms",
")",
"## insist that every chunk has only one source string",
"if",
"si",
":",
"sources",
".",
"add",
"(",
"si",
".",
"source",
")",
"if",
"self",
".",
"assert_single_source",
"and",
"len",
"(",
"sources",
")",
"!=",
"1",
":",
"raise",
"InvalidStreamItem",
"(",
"'stream item %r had source %r, not %r '",
"'(set assert_single_source: false to suppress)'",
"%",
"(",
"si",
".",
"stream_id",
",",
"si",
".",
"source",
",",
"sources",
")",
")",
"if",
"si",
"and",
"si",
".",
"body",
"and",
"si",
".",
"body",
".",
"clean_visible",
":",
"len_clean_visible",
"+=",
"len",
"(",
"si",
".",
"body",
".",
"clean_visible",
")",
"## log binned clean_visible lengths, for quick stats estimates",
"#logger.debug('len(si.body.clean_visible)=%d' % int(10 * int(math.floor(float(len(si.body.clean_visible)) / 2**10)/10)))",
"#logger.debug('len(si.body.clean_visible)=%d' % len(si.body.clean_visible))",
"if",
"(",
"(",
"self",
".",
"output_chunk_max_count",
"is",
"not",
"None",
"and",
"len",
"(",
"self",
".",
"t_chunk",
")",
"==",
"self",
".",
"output_chunk_max_count",
")",
")",
":",
"logger",
".",
"info",
"(",
"'reached output_chunk_max_count (%d) at: %d'",
",",
"len",
"(",
"self",
".",
"t_chunk",
")",
",",
"next_idx",
")",
"self",
".",
"_process_output_chunk",
"(",
"start_count",
",",
"next_idx",
",",
"sources",
",",
"i_str",
",",
"t_path",
")",
"start_count",
"=",
"next_idx",
"elif",
"(",
"self",
".",
"output_max_clean_visible_bytes",
"is",
"not",
"None",
"and",
"len_clean_visible",
">=",
"self",
".",
"output_chunk_max_clean_visible_bytes",
")",
":",
"logger",
".",
"info",
"(",
"'reached output_chunk_max_clean_visible_bytes '",
"'(%d) at: %d'",
",",
"self",
".",
"output_chunk_max_clean_visible_bytes",
",",
"len_clean_visible",
")",
"len_clean_visible",
"=",
"0",
"self",
".",
"_process_output_chunk",
"(",
"start_count",
",",
"next_idx",
",",
"sources",
",",
"i_str",
",",
"t_path",
")",
"start_count",
"=",
"next_idx",
"input_item_count",
"+=",
"1",
"if",
"(",
"(",
"(",
"self",
".",
"input_item_limit",
"is",
"not",
"None",
")",
"and",
"(",
"input_item_count",
">",
"self",
".",
"input_item_limit",
")",
")",
")",
":",
"break",
"if",
"self",
".",
"t_chunk",
"is",
"not",
"None",
":",
"self",
".",
"_process_output_chunk",
"(",
"start_count",
",",
"next_idx",
",",
"sources",
",",
"i_str",
",",
"t_path",
")",
"## return how many stream items we processed",
"return",
"next_idx",
"finally",
":",
"if",
"self",
".",
"t_chunk",
"is",
"not",
"None",
":",
"self",
".",
"t_chunk",
".",
"close",
"(",
")",
"for",
"transform",
"in",
"self",
".",
"batch_transforms",
":",
"transform",
".",
"shutdown",
"(",
")",
"if",
"self",
".",
"cleanup_tmp_files",
":",
"rmtree",
"(",
"self",
".",
"tmp_dir_path",
")"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | Pipeline._process_output_chunk | for the current output chunk (which should be open):
1. run batch transforms
2. run post-batch incremental transforms
3. run 'writers' to load-out the data to files or other storage
return list of paths that writers wrote to | streamcorpus_pipeline/_pipeline.py | def _process_output_chunk(self, start_count, next_idx, sources, i_str,
t_path):
'''
for the current output chunk (which should be open):
1. run batch transforms
2. run post-batch incremental transforms
3. run 'writers' to load-out the data to files or other storage
return list of paths that writers wrote to
'''
if not self.t_chunk:
# nothing to do
return []
self.t_chunk.close()
# gather the paths as the writers run
o_paths = None
if len(self.t_chunk) > 0:
# only batch transform and load if the chunk
# isn't empty, which can happen when filtering
# with stages like "find"
# batch transforms act on the whole chunk in-place
logger.info('running batch transforms on %d StreamItems',
len(self.t_chunk))
self._run_batch_transforms(t_path)
self._maybe_run_post_batch_incremental_transforms(t_path)
# only proceed if above transforms left us with something
if (self.t_chunk) and (len(self.t_chunk) >= 0):
o_paths = self._run_writers(start_count, next_idx, sources,
i_str, t_path)
# we're now officially done with the chunk
self.t_chunk = None
# If we wrote some paths, update the data dictionary of outputs
if self.work_unit and o_paths:
old_o_paths = self.work_unit.data.get('output', [])
o_paths = old_o_paths + o_paths
self.work_unit.data['start_count'] = next_idx
self.work_unit.data['output'] = o_paths
self.work_unit.update() | def _process_output_chunk(self, start_count, next_idx, sources, i_str,
t_path):
'''
for the current output chunk (which should be open):
1. run batch transforms
2. run post-batch incremental transforms
3. run 'writers' to load-out the data to files or other storage
return list of paths that writers wrote to
'''
if not self.t_chunk:
# nothing to do
return []
self.t_chunk.close()
# gather the paths as the writers run
o_paths = None
if len(self.t_chunk) > 0:
# only batch transform and load if the chunk
# isn't empty, which can happen when filtering
# with stages like "find"
# batch transforms act on the whole chunk in-place
logger.info('running batch transforms on %d StreamItems',
len(self.t_chunk))
self._run_batch_transforms(t_path)
self._maybe_run_post_batch_incremental_transforms(t_path)
# only proceed if above transforms left us with something
if (self.t_chunk) and (len(self.t_chunk) >= 0):
o_paths = self._run_writers(start_count, next_idx, sources,
i_str, t_path)
# we're now officially done with the chunk
self.t_chunk = None
# If we wrote some paths, update the data dictionary of outputs
if self.work_unit and o_paths:
old_o_paths = self.work_unit.data.get('output', [])
o_paths = old_o_paths + o_paths
self.work_unit.data['start_count'] = next_idx
self.work_unit.data['output'] = o_paths
self.work_unit.update() | [
"for",
"the",
"current",
"output",
"chunk",
"(",
"which",
"should",
"be",
"open",
")",
":",
"1",
".",
"run",
"batch",
"transforms",
"2",
".",
"run",
"post",
"-",
"batch",
"incremental",
"transforms",
"3",
".",
"run",
"writers",
"to",
"load",
"-",
"out",
"the",
"data",
"to",
"files",
"or",
"other",
"storage",
"return",
"list",
"of",
"paths",
"that",
"writers",
"wrote",
"to"
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_pipeline.py#L652-L694 | [
"def",
"_process_output_chunk",
"(",
"self",
",",
"start_count",
",",
"next_idx",
",",
"sources",
",",
"i_str",
",",
"t_path",
")",
":",
"if",
"not",
"self",
".",
"t_chunk",
":",
"# nothing to do",
"return",
"[",
"]",
"self",
".",
"t_chunk",
".",
"close",
"(",
")",
"# gather the paths as the writers run",
"o_paths",
"=",
"None",
"if",
"len",
"(",
"self",
".",
"t_chunk",
")",
">",
"0",
":",
"# only batch transform and load if the chunk",
"# isn't empty, which can happen when filtering",
"# with stages like \"find\"",
"# batch transforms act on the whole chunk in-place",
"logger",
".",
"info",
"(",
"'running batch transforms on %d StreamItems'",
",",
"len",
"(",
"self",
".",
"t_chunk",
")",
")",
"self",
".",
"_run_batch_transforms",
"(",
"t_path",
")",
"self",
".",
"_maybe_run_post_batch_incremental_transforms",
"(",
"t_path",
")",
"# only proceed if above transforms left us with something",
"if",
"(",
"self",
".",
"t_chunk",
")",
"and",
"(",
"len",
"(",
"self",
".",
"t_chunk",
")",
">=",
"0",
")",
":",
"o_paths",
"=",
"self",
".",
"_run_writers",
"(",
"start_count",
",",
"next_idx",
",",
"sources",
",",
"i_str",
",",
"t_path",
")",
"# we're now officially done with the chunk",
"self",
".",
"t_chunk",
"=",
"None",
"# If we wrote some paths, update the data dictionary of outputs",
"if",
"self",
".",
"work_unit",
"and",
"o_paths",
":",
"old_o_paths",
"=",
"self",
".",
"work_unit",
".",
"data",
".",
"get",
"(",
"'output'",
",",
"[",
"]",
")",
"o_paths",
"=",
"old_o_paths",
"+",
"o_paths",
"self",
".",
"work_unit",
".",
"data",
"[",
"'start_count'",
"]",
"=",
"next_idx",
"self",
".",
"work_unit",
".",
"data",
"[",
"'output'",
"]",
"=",
"o_paths",
"self",
".",
"work_unit",
".",
"update",
"(",
")"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | Pipeline._run_writers | Run all of the writers over some intermediate chunk.
:param int start_count: index of the first item
:param int next_idx: index of the next item (after the last
item in this chunk)
:param list sources: source strings included in this chunk
(usually only one source)
:param str i_str: name of input file or other input
:param str t_path: location of intermediate chunk on disk
:return: list of output file paths or other outputs | streamcorpus_pipeline/_pipeline.py | def _run_writers(self, start_count, next_idx, sources, i_str, t_path):
'''Run all of the writers over some intermediate chunk.
:param int start_count: index of the first item
:param int next_idx: index of the next item (after the last
item in this chunk)
:param list sources: source strings included in this chunk
(usually only one source)
:param str i_str: name of input file or other input
:param str t_path: location of intermediate chunk on disk
:return: list of output file paths or other outputs
'''
# writers put the chunk somewhere, and could delete it
name_info = dict(
first=start_count,
# num and md5 computed in each writers
source=sources.pop(),
)
all_o_paths = []
for writer in self.writers:
logger.debug('running %r on %r: %r', writer, i_str, name_info)
o_paths = writer(t_path, name_info, i_str)
logger.debug('loaded (%d, %d) of %r into %r',
start_count, next_idx - 1, i_str, o_paths)
all_o_paths += o_paths
return all_o_paths | def _run_writers(self, start_count, next_idx, sources, i_str, t_path):
'''Run all of the writers over some intermediate chunk.
:param int start_count: index of the first item
:param int next_idx: index of the next item (after the last
item in this chunk)
:param list sources: source strings included in this chunk
(usually only one source)
:param str i_str: name of input file or other input
:param str t_path: location of intermediate chunk on disk
:return: list of output file paths or other outputs
'''
# writers put the chunk somewhere, and could delete it
name_info = dict(
first=start_count,
# num and md5 computed in each writers
source=sources.pop(),
)
all_o_paths = []
for writer in self.writers:
logger.debug('running %r on %r: %r', writer, i_str, name_info)
o_paths = writer(t_path, name_info, i_str)
logger.debug('loaded (%d, %d) of %r into %r',
start_count, next_idx - 1, i_str, o_paths)
all_o_paths += o_paths
return all_o_paths | [
"Run",
"all",
"of",
"the",
"writers",
"over",
"some",
"intermediate",
"chunk",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_pipeline.py#L718-L745 | [
"def",
"_run_writers",
"(",
"self",
",",
"start_count",
",",
"next_idx",
",",
"sources",
",",
"i_str",
",",
"t_path",
")",
":",
"# writers put the chunk somewhere, and could delete it",
"name_info",
"=",
"dict",
"(",
"first",
"=",
"start_count",
",",
"# num and md5 computed in each writers",
"source",
"=",
"sources",
".",
"pop",
"(",
")",
",",
")",
"all_o_paths",
"=",
"[",
"]",
"for",
"writer",
"in",
"self",
".",
"writers",
":",
"logger",
".",
"debug",
"(",
"'running %r on %r: %r'",
",",
"writer",
",",
"i_str",
",",
"name_info",
")",
"o_paths",
"=",
"writer",
"(",
"t_path",
",",
"name_info",
",",
"i_str",
")",
"logger",
".",
"debug",
"(",
"'loaded (%d, %d) of %r into %r'",
",",
"start_count",
",",
"next_idx",
"-",
"1",
",",
"i_str",
",",
"o_paths",
")",
"all_o_paths",
"+=",
"o_paths",
"return",
"all_o_paths"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | Pipeline._run_incremental_transforms | Run transforms on stream item.
Item may be discarded by some transform.
Writes successful items out to current self.t_chunk
Returns transformed item or None. | streamcorpus_pipeline/_pipeline.py | def _run_incremental_transforms(self, si, transforms):
'''
Run transforms on stream item.
Item may be discarded by some transform.
Writes successful items out to current self.t_chunk
Returns transformed item or None.
'''
## operate each transform on this one StreamItem
for transform in transforms:
try:
stream_id = si.stream_id
si_new = transform(si, context=self.context)
if si_new is None:
logger.warn('transform %r deleted %s abs_url=%r',
transform, stream_id, si and si.abs_url)
return None
si = si_new
except TransformGivingUp:
## do nothing
logger.info('transform %r giving up on %r',
transform, si.stream_id)
except Exception, exc:
logger.critical(
'transform %r failed on %r from i_str=%r abs_url=%r',
transform, si and si.stream_id, self.context.get('i_str'),
si and si.abs_url, exc_info=True)
assert si is not None
## expect to always have a stream_time
if not si.stream_time:
raise InvalidStreamItem('empty stream_time: %s' % si)
if si.stream_id is None:
raise InvalidStreamItem('empty stream_id: %r' % si)
## put the StreamItem into the output
if type(si) != streamcorpus.StreamItem_v0_3_0:
raise InvalidStreamItem('incorrect stream item object %r' %
type(si))
self.t_chunk.add(si)
return si | def _run_incremental_transforms(self, si, transforms):
'''
Run transforms on stream item.
Item may be discarded by some transform.
Writes successful items out to current self.t_chunk
Returns transformed item or None.
'''
## operate each transform on this one StreamItem
for transform in transforms:
try:
stream_id = si.stream_id
si_new = transform(si, context=self.context)
if si_new is None:
logger.warn('transform %r deleted %s abs_url=%r',
transform, stream_id, si and si.abs_url)
return None
si = si_new
except TransformGivingUp:
## do nothing
logger.info('transform %r giving up on %r',
transform, si.stream_id)
except Exception, exc:
logger.critical(
'transform %r failed on %r from i_str=%r abs_url=%r',
transform, si and si.stream_id, self.context.get('i_str'),
si and si.abs_url, exc_info=True)
assert si is not None
## expect to always have a stream_time
if not si.stream_time:
raise InvalidStreamItem('empty stream_time: %s' % si)
if si.stream_id is None:
raise InvalidStreamItem('empty stream_id: %r' % si)
## put the StreamItem into the output
if type(si) != streamcorpus.StreamItem_v0_3_0:
raise InvalidStreamItem('incorrect stream item object %r' %
type(si))
self.t_chunk.add(si)
return si | [
"Run",
"transforms",
"on",
"stream",
"item",
".",
"Item",
"may",
"be",
"discarded",
"by",
"some",
"transform",
".",
"Writes",
"successful",
"items",
"out",
"to",
"current",
"self",
".",
"t_chunk",
"Returns",
"transformed",
"item",
"or",
"None",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_pipeline.py#L747-L791 | [
"def",
"_run_incremental_transforms",
"(",
"self",
",",
"si",
",",
"transforms",
")",
":",
"## operate each transform on this one StreamItem",
"for",
"transform",
"in",
"transforms",
":",
"try",
":",
"stream_id",
"=",
"si",
".",
"stream_id",
"si_new",
"=",
"transform",
"(",
"si",
",",
"context",
"=",
"self",
".",
"context",
")",
"if",
"si_new",
"is",
"None",
":",
"logger",
".",
"warn",
"(",
"'transform %r deleted %s abs_url=%r'",
",",
"transform",
",",
"stream_id",
",",
"si",
"and",
"si",
".",
"abs_url",
")",
"return",
"None",
"si",
"=",
"si_new",
"except",
"TransformGivingUp",
":",
"## do nothing",
"logger",
".",
"info",
"(",
"'transform %r giving up on %r'",
",",
"transform",
",",
"si",
".",
"stream_id",
")",
"except",
"Exception",
",",
"exc",
":",
"logger",
".",
"critical",
"(",
"'transform %r failed on %r from i_str=%r abs_url=%r'",
",",
"transform",
",",
"si",
"and",
"si",
".",
"stream_id",
",",
"self",
".",
"context",
".",
"get",
"(",
"'i_str'",
")",
",",
"si",
"and",
"si",
".",
"abs_url",
",",
"exc_info",
"=",
"True",
")",
"assert",
"si",
"is",
"not",
"None",
"## expect to always have a stream_time",
"if",
"not",
"si",
".",
"stream_time",
":",
"raise",
"InvalidStreamItem",
"(",
"'empty stream_time: %s'",
"%",
"si",
")",
"if",
"si",
".",
"stream_id",
"is",
"None",
":",
"raise",
"InvalidStreamItem",
"(",
"'empty stream_id: %r'",
"%",
"si",
")",
"## put the StreamItem into the output",
"if",
"type",
"(",
"si",
")",
"!=",
"streamcorpus",
".",
"StreamItem_v0_3_0",
":",
"raise",
"InvalidStreamItem",
"(",
"'incorrect stream item object %r'",
"%",
"type",
"(",
"si",
")",
")",
"self",
".",
"t_chunk",
".",
"add",
"(",
"si",
")",
"return",
"si"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | get_name_info | takes a chunk blob and obtains the date_hour, md5, num
makes fields:
i_str
input_fname
input_md5 - parsed from input filename if it contains '-%(md5)s-'
md5
num
epoch_ticks
target_names
doc_ids_8
date_hour
rand8
date_now
time_now
date_time_now | streamcorpus_pipeline/_get_name_info.py | def get_name_info(chunk_path, assert_one_date_hour=False, i_str=None,
chunk_type=Chunk):
'''
takes a chunk blob and obtains the date_hour, md5, num
makes fields:
i_str
input_fname
input_md5 - parsed from input filename if it contains '-%(md5)s-'
md5
num
epoch_ticks
target_names
doc_ids_8
date_hour
rand8
date_now
time_now
date_time_now
'''
assert i_str is not None, 'must provide i_str as keyword arg'
name_info = dict()
if i_str:
name_info['i_str'] = i_str
else:
name_info['i_str'] = ''
i_fname = i_str.split('/')[-1]
i_fname = i_fname.split('.')[0] ## strip off .sc[.xz[.gpg]]
name_info['input_fname'] = i_fname
input_md5s = []
for part in i_fname.split('-'):
if len(part) == 32 and is_hex_32.match(part):
input_md5s.append(part)
name_info['input_md5'] = '-'.join(input_md5s)
# TODO: return a dict-like object that does the expensive
# calculation lazily, the name format might not even need that
# value.
ch = chunk_type(path=chunk_path, mode='rb')
date_hours = set()
target_names = set()
doc_ids = set()
epoch_ticks = None
count = 0
try:
for si in ch:
if chunk_type is Chunk:
if epoch_ticks is None:
epoch_ticks = si.stream_time.epoch_ticks
date_hours.add( si.stream_time.zulu_timestamp[:13] )
doc_ids.add( si.doc_id )
for annotator_id, ratings in si.ratings.items():
for rating in ratings:
target_name = rating.target.target_id.split('/')[-1]
target_names.add( target_name )
count += 1
except Exception, exc:
logger.critical('failed to iter over chunk', exc_info=True)
## create the md5 property, so we can use it in the filename
if hasattr(ch, 'md5_hexdigest'):
name_info['md5'] = ch.md5_hexdigest
else:
try:
data = open(chunk_path).read()
name_info['md5'] = hashlib.md5(data).hexdigest()
except Exception, exc:
logger.critical('failed to compute md5', exc_info=True)
name_info['md5'] = 'broken'
name_info['num'] = count
name_info['epoch_ticks'] = epoch_ticks
name_info['target_names'] = '-'.join( target_names )
name_info['doc_ids_8'] = '-'.join( [di[:8] for di in doc_ids] )
if chunk_type is Chunk:
if assert_one_date_hour:
assert len(date_hours) == 1, \
'got a chunk with other than one data_hour! ' + \
repr(date_hours)
if len(date_hours) > 0:
date_hour = list(date_hours)[0]
date_hour = date_hour.replace('T', '-')
else:
assert count == 0, (date_hours, count)
date_hour = None
name_info['date_hour'] = date_hour
else:
name_info['date_hour'] = 'NO-DATE-HOUR-FOR-FC'
# TODO: in future lazy evaluation world, rand8 should return a
# different value every time it is accessed so that a format could
# be 'foo-{rand8}{rand8}'
name_info['rand8'] = '%08x' % (random.randint(0, 0x7fffffff),)
name_info['date_now'] = datetime.datetime.utcnow().strftime('%Y-%m-%d')
name_info['time_now'] = datetime.datetime.utcnow().strftime('%H-%M-%S')
name_info['date_time_now'] = datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S')
return name_info | def get_name_info(chunk_path, assert_one_date_hour=False, i_str=None,
chunk_type=Chunk):
'''
takes a chunk blob and obtains the date_hour, md5, num
makes fields:
i_str
input_fname
input_md5 - parsed from input filename if it contains '-%(md5)s-'
md5
num
epoch_ticks
target_names
doc_ids_8
date_hour
rand8
date_now
time_now
date_time_now
'''
assert i_str is not None, 'must provide i_str as keyword arg'
name_info = dict()
if i_str:
name_info['i_str'] = i_str
else:
name_info['i_str'] = ''
i_fname = i_str.split('/')[-1]
i_fname = i_fname.split('.')[0] ## strip off .sc[.xz[.gpg]]
name_info['input_fname'] = i_fname
input_md5s = []
for part in i_fname.split('-'):
if len(part) == 32 and is_hex_32.match(part):
input_md5s.append(part)
name_info['input_md5'] = '-'.join(input_md5s)
# TODO: return a dict-like object that does the expensive
# calculation lazily, the name format might not even need that
# value.
ch = chunk_type(path=chunk_path, mode='rb')
date_hours = set()
target_names = set()
doc_ids = set()
epoch_ticks = None
count = 0
try:
for si in ch:
if chunk_type is Chunk:
if epoch_ticks is None:
epoch_ticks = si.stream_time.epoch_ticks
date_hours.add( si.stream_time.zulu_timestamp[:13] )
doc_ids.add( si.doc_id )
for annotator_id, ratings in si.ratings.items():
for rating in ratings:
target_name = rating.target.target_id.split('/')[-1]
target_names.add( target_name )
count += 1
except Exception, exc:
logger.critical('failed to iter over chunk', exc_info=True)
## create the md5 property, so we can use it in the filename
if hasattr(ch, 'md5_hexdigest'):
name_info['md5'] = ch.md5_hexdigest
else:
try:
data = open(chunk_path).read()
name_info['md5'] = hashlib.md5(data).hexdigest()
except Exception, exc:
logger.critical('failed to compute md5', exc_info=True)
name_info['md5'] = 'broken'
name_info['num'] = count
name_info['epoch_ticks'] = epoch_ticks
name_info['target_names'] = '-'.join( target_names )
name_info['doc_ids_8'] = '-'.join( [di[:8] for di in doc_ids] )
if chunk_type is Chunk:
if assert_one_date_hour:
assert len(date_hours) == 1, \
'got a chunk with other than one data_hour! ' + \
repr(date_hours)
if len(date_hours) > 0:
date_hour = list(date_hours)[0]
date_hour = date_hour.replace('T', '-')
else:
assert count == 0, (date_hours, count)
date_hour = None
name_info['date_hour'] = date_hour
else:
name_info['date_hour'] = 'NO-DATE-HOUR-FOR-FC'
# TODO: in future lazy evaluation world, rand8 should return a
# different value every time it is accessed so that a format could
# be 'foo-{rand8}{rand8}'
name_info['rand8'] = '%08x' % (random.randint(0, 0x7fffffff),)
name_info['date_now'] = datetime.datetime.utcnow().strftime('%Y-%m-%d')
name_info['time_now'] = datetime.datetime.utcnow().strftime('%H-%M-%S')
name_info['date_time_now'] = datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S')
return name_info | [
"takes",
"a",
"chunk",
"blob",
"and",
"obtains",
"the",
"date_hour",
"md5",
"num"
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_get_name_info.py#L19-L123 | [
"def",
"get_name_info",
"(",
"chunk_path",
",",
"assert_one_date_hour",
"=",
"False",
",",
"i_str",
"=",
"None",
",",
"chunk_type",
"=",
"Chunk",
")",
":",
"assert",
"i_str",
"is",
"not",
"None",
",",
"'must provide i_str as keyword arg'",
"name_info",
"=",
"dict",
"(",
")",
"if",
"i_str",
":",
"name_info",
"[",
"'i_str'",
"]",
"=",
"i_str",
"else",
":",
"name_info",
"[",
"'i_str'",
"]",
"=",
"''",
"i_fname",
"=",
"i_str",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"i_fname",
"=",
"i_fname",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"## strip off .sc[.xz[.gpg]]",
"name_info",
"[",
"'input_fname'",
"]",
"=",
"i_fname",
"input_md5s",
"=",
"[",
"]",
"for",
"part",
"in",
"i_fname",
".",
"split",
"(",
"'-'",
")",
":",
"if",
"len",
"(",
"part",
")",
"==",
"32",
"and",
"is_hex_32",
".",
"match",
"(",
"part",
")",
":",
"input_md5s",
".",
"append",
"(",
"part",
")",
"name_info",
"[",
"'input_md5'",
"]",
"=",
"'-'",
".",
"join",
"(",
"input_md5s",
")",
"# TODO: return a dict-like object that does the expensive",
"# calculation lazily, the name format might not even need that",
"# value.",
"ch",
"=",
"chunk_type",
"(",
"path",
"=",
"chunk_path",
",",
"mode",
"=",
"'rb'",
")",
"date_hours",
"=",
"set",
"(",
")",
"target_names",
"=",
"set",
"(",
")",
"doc_ids",
"=",
"set",
"(",
")",
"epoch_ticks",
"=",
"None",
"count",
"=",
"0",
"try",
":",
"for",
"si",
"in",
"ch",
":",
"if",
"chunk_type",
"is",
"Chunk",
":",
"if",
"epoch_ticks",
"is",
"None",
":",
"epoch_ticks",
"=",
"si",
".",
"stream_time",
".",
"epoch_ticks",
"date_hours",
".",
"add",
"(",
"si",
".",
"stream_time",
".",
"zulu_timestamp",
"[",
":",
"13",
"]",
")",
"doc_ids",
".",
"add",
"(",
"si",
".",
"doc_id",
")",
"for",
"annotator_id",
",",
"ratings",
"in",
"si",
".",
"ratings",
".",
"items",
"(",
")",
":",
"for",
"rating",
"in",
"ratings",
":",
"target_name",
"=",
"rating",
".",
"target",
".",
"target_id",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"target_names",
".",
"add",
"(",
"target_name",
")",
"count",
"+=",
"1",
"except",
"Exception",
",",
"exc",
":",
"logger",
".",
"critical",
"(",
"'failed to iter over chunk'",
",",
"exc_info",
"=",
"True",
")",
"## create the md5 property, so we can use it in the filename",
"if",
"hasattr",
"(",
"ch",
",",
"'md5_hexdigest'",
")",
":",
"name_info",
"[",
"'md5'",
"]",
"=",
"ch",
".",
"md5_hexdigest",
"else",
":",
"try",
":",
"data",
"=",
"open",
"(",
"chunk_path",
")",
".",
"read",
"(",
")",
"name_info",
"[",
"'md5'",
"]",
"=",
"hashlib",
".",
"md5",
"(",
"data",
")",
".",
"hexdigest",
"(",
")",
"except",
"Exception",
",",
"exc",
":",
"logger",
".",
"critical",
"(",
"'failed to compute md5'",
",",
"exc_info",
"=",
"True",
")",
"name_info",
"[",
"'md5'",
"]",
"=",
"'broken'",
"name_info",
"[",
"'num'",
"]",
"=",
"count",
"name_info",
"[",
"'epoch_ticks'",
"]",
"=",
"epoch_ticks",
"name_info",
"[",
"'target_names'",
"]",
"=",
"'-'",
".",
"join",
"(",
"target_names",
")",
"name_info",
"[",
"'doc_ids_8'",
"]",
"=",
"'-'",
".",
"join",
"(",
"[",
"di",
"[",
":",
"8",
"]",
"for",
"di",
"in",
"doc_ids",
"]",
")",
"if",
"chunk_type",
"is",
"Chunk",
":",
"if",
"assert_one_date_hour",
":",
"assert",
"len",
"(",
"date_hours",
")",
"==",
"1",
",",
"'got a chunk with other than one data_hour! '",
"+",
"repr",
"(",
"date_hours",
")",
"if",
"len",
"(",
"date_hours",
")",
">",
"0",
":",
"date_hour",
"=",
"list",
"(",
"date_hours",
")",
"[",
"0",
"]",
"date_hour",
"=",
"date_hour",
".",
"replace",
"(",
"'T'",
",",
"'-'",
")",
"else",
":",
"assert",
"count",
"==",
"0",
",",
"(",
"date_hours",
",",
"count",
")",
"date_hour",
"=",
"None",
"name_info",
"[",
"'date_hour'",
"]",
"=",
"date_hour",
"else",
":",
"name_info",
"[",
"'date_hour'",
"]",
"=",
"'NO-DATE-HOUR-FOR-FC'",
"# TODO: in future lazy evaluation world, rand8 should return a",
"# different value every time it is accessed so that a format could",
"# be 'foo-{rand8}{rand8}'",
"name_info",
"[",
"'rand8'",
"]",
"=",
"'%08x'",
"%",
"(",
"random",
".",
"randint",
"(",
"0",
",",
"0x7fffffff",
")",
",",
")",
"name_info",
"[",
"'date_now'",
"]",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
".",
"strftime",
"(",
"'%Y-%m-%d'",
")",
"name_info",
"[",
"'time_now'",
"]",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
".",
"strftime",
"(",
"'%H-%M-%S'",
")",
"name_info",
"[",
"'date_time_now'",
"]",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
".",
"strftime",
"(",
"'%Y-%m-%d-%H-%M-%S'",
")",
"return",
"name_info"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | replace_config | Replace the top-level pipeline configurable object.
This investigates a number of sources, including
`external_stages_path` and `external_stages_modules` configuration
and `streamcorpus_pipeline.stages` entry points, and uses these to
find the actual :data:`sub_modules` for
:mod:`streamcorpus_pipeline`. | streamcorpus_pipeline/config.py | def replace_config(config, name):
'''Replace the top-level pipeline configurable object.
This investigates a number of sources, including
`external_stages_path` and `external_stages_modules` configuration
and `streamcorpus_pipeline.stages` entry points, and uses these to
find the actual :data:`sub_modules` for
:mod:`streamcorpus_pipeline`.
'''
global static_stages
if static_stages is None:
static_stages = PipelineStages()
stages = static_stages
if 'external_stages_path' in config:
path = config['external_stages_path']
if not os.path.isabs(path) and config.get('root_path'):
path = os.path.join(config['root_path'], path)
try:
stages.load_external_stages(config['external_stages_path'])
except IOError:
return streamcorpus_pipeline # let check_config re-raise this
if 'external_stages_modules' in config:
for mod in config['external_stages_modules']:
try:
stages.load_module_stages(mod)
except ImportError:
return streamcorpus_pipeline # let check_config re-raise this
else:
stages = static_stages
new_sub_modules = set(stage
for stage in stages.itervalues()
if hasattr(stage, 'config_name'))
return NewSubModules(streamcorpus_pipeline, new_sub_modules) | def replace_config(config, name):
'''Replace the top-level pipeline configurable object.
This investigates a number of sources, including
`external_stages_path` and `external_stages_modules` configuration
and `streamcorpus_pipeline.stages` entry points, and uses these to
find the actual :data:`sub_modules` for
:mod:`streamcorpus_pipeline`.
'''
global static_stages
if static_stages is None:
static_stages = PipelineStages()
stages = static_stages
if 'external_stages_path' in config:
path = config['external_stages_path']
if not os.path.isabs(path) and config.get('root_path'):
path = os.path.join(config['root_path'], path)
try:
stages.load_external_stages(config['external_stages_path'])
except IOError:
return streamcorpus_pipeline # let check_config re-raise this
if 'external_stages_modules' in config:
for mod in config['external_stages_modules']:
try:
stages.load_module_stages(mod)
except ImportError:
return streamcorpus_pipeline # let check_config re-raise this
else:
stages = static_stages
new_sub_modules = set(stage
for stage in stages.itervalues()
if hasattr(stage, 'config_name'))
return NewSubModules(streamcorpus_pipeline, new_sub_modules) | [
"Replace",
"the",
"top",
"-",
"level",
"pipeline",
"configurable",
"object",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/config.py#L62-L96 | [
"def",
"replace_config",
"(",
"config",
",",
"name",
")",
":",
"global",
"static_stages",
"if",
"static_stages",
"is",
"None",
":",
"static_stages",
"=",
"PipelineStages",
"(",
")",
"stages",
"=",
"static_stages",
"if",
"'external_stages_path'",
"in",
"config",
":",
"path",
"=",
"config",
"[",
"'external_stages_path'",
"]",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"path",
")",
"and",
"config",
".",
"get",
"(",
"'root_path'",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"config",
"[",
"'root_path'",
"]",
",",
"path",
")",
"try",
":",
"stages",
".",
"load_external_stages",
"(",
"config",
"[",
"'external_stages_path'",
"]",
")",
"except",
"IOError",
":",
"return",
"streamcorpus_pipeline",
"# let check_config re-raise this",
"if",
"'external_stages_modules'",
"in",
"config",
":",
"for",
"mod",
"in",
"config",
"[",
"'external_stages_modules'",
"]",
":",
"try",
":",
"stages",
".",
"load_module_stages",
"(",
"mod",
")",
"except",
"ImportError",
":",
"return",
"streamcorpus_pipeline",
"# let check_config re-raise this",
"else",
":",
"stages",
"=",
"static_stages",
"new_sub_modules",
"=",
"set",
"(",
"stage",
"for",
"stage",
"in",
"stages",
".",
"itervalues",
"(",
")",
"if",
"hasattr",
"(",
"stage",
",",
"'config_name'",
")",
")",
"return",
"NewSubModules",
"(",
"streamcorpus_pipeline",
",",
"new_sub_modules",
")"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | make_app | Make a WSGI app that has all the HTTPie pieces baked in. | httpony/application.py | def make_app():
"""Make a WSGI app that has all the HTTPie pieces baked in."""
env = Environment()
# STDIN is ignored because HTTPony runs a server that doesn't care.
# Additionally, it is needed or else pytest blows up.
args = parser.parse_args(args=['/', '--ignore-stdin'], env=env)
args.output_options = 'HB' # Output only requests.
server = 'HTTPony/{0}'.format(__version__)
def application(environ, start_response):
# The WSGI server puts content length and type in the environment
# even when not provided with the request. Drop them if they are empty.
if environ.get('CONTENT_LENGTH') == '':
del environ['CONTENT_LENGTH']
if environ.get('CONTENT_TYPE') == '':
del environ['CONTENT_TYPE']
wrequest = WerkzeugRequest(environ)
data = wrequest.get_data()
request = Request(
method=wrequest.method,
url=wrequest.url,
headers=wrequest.headers,
data=data,
)
prepared = request.prepare()
stream = streams.build_output_stream(
args, env, prepared, response=None,
output_options=args.output_options)
streams.write_stream(stream, env.stdout, env.stdout_isatty)
# When there is data in the request, give the next one breathing room.
if data:
print("\n", file=env.stdout)
# Make dreams come true.
response = Response(headers={'Server': server})
return response(environ, start_response)
return application | def make_app():
"""Make a WSGI app that has all the HTTPie pieces baked in."""
env = Environment()
# STDIN is ignored because HTTPony runs a server that doesn't care.
# Additionally, it is needed or else pytest blows up.
args = parser.parse_args(args=['/', '--ignore-stdin'], env=env)
args.output_options = 'HB' # Output only requests.
server = 'HTTPony/{0}'.format(__version__)
def application(environ, start_response):
# The WSGI server puts content length and type in the environment
# even when not provided with the request. Drop them if they are empty.
if environ.get('CONTENT_LENGTH') == '':
del environ['CONTENT_LENGTH']
if environ.get('CONTENT_TYPE') == '':
del environ['CONTENT_TYPE']
wrequest = WerkzeugRequest(environ)
data = wrequest.get_data()
request = Request(
method=wrequest.method,
url=wrequest.url,
headers=wrequest.headers,
data=data,
)
prepared = request.prepare()
stream = streams.build_output_stream(
args, env, prepared, response=None,
output_options=args.output_options)
streams.write_stream(stream, env.stdout, env.stdout_isatty)
# When there is data in the request, give the next one breathing room.
if data:
print("\n", file=env.stdout)
# Make dreams come true.
response = Response(headers={'Server': server})
return response(environ, start_response)
return application | [
"Make",
"a",
"WSGI",
"app",
"that",
"has",
"all",
"the",
"HTTPie",
"pieces",
"baked",
"in",
"."
] | mblayman/httpony | python | https://github.com/mblayman/httpony/blob/5af404d647a8dac8a043b64ea09882589b3b5247/httpony/application.py#L15-L55 | [
"def",
"make_app",
"(",
")",
":",
"env",
"=",
"Environment",
"(",
")",
"# STDIN is ignored because HTTPony runs a server that doesn't care.",
"# Additionally, it is needed or else pytest blows up.",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"args",
"=",
"[",
"'/'",
",",
"'--ignore-stdin'",
"]",
",",
"env",
"=",
"env",
")",
"args",
".",
"output_options",
"=",
"'HB'",
"# Output only requests.",
"server",
"=",
"'HTTPony/{0}'",
".",
"format",
"(",
"__version__",
")",
"def",
"application",
"(",
"environ",
",",
"start_response",
")",
":",
"# The WSGI server puts content length and type in the environment",
"# even when not provided with the request. Drop them if they are empty.",
"if",
"environ",
".",
"get",
"(",
"'CONTENT_LENGTH'",
")",
"==",
"''",
":",
"del",
"environ",
"[",
"'CONTENT_LENGTH'",
"]",
"if",
"environ",
".",
"get",
"(",
"'CONTENT_TYPE'",
")",
"==",
"''",
":",
"del",
"environ",
"[",
"'CONTENT_TYPE'",
"]",
"wrequest",
"=",
"WerkzeugRequest",
"(",
"environ",
")",
"data",
"=",
"wrequest",
".",
"get_data",
"(",
")",
"request",
"=",
"Request",
"(",
"method",
"=",
"wrequest",
".",
"method",
",",
"url",
"=",
"wrequest",
".",
"url",
",",
"headers",
"=",
"wrequest",
".",
"headers",
",",
"data",
"=",
"data",
",",
")",
"prepared",
"=",
"request",
".",
"prepare",
"(",
")",
"stream",
"=",
"streams",
".",
"build_output_stream",
"(",
"args",
",",
"env",
",",
"prepared",
",",
"response",
"=",
"None",
",",
"output_options",
"=",
"args",
".",
"output_options",
")",
"streams",
".",
"write_stream",
"(",
"stream",
",",
"env",
".",
"stdout",
",",
"env",
".",
"stdout_isatty",
")",
"# When there is data in the request, give the next one breathing room.",
"if",
"data",
":",
"print",
"(",
"\"\\n\"",
",",
"file",
"=",
"env",
".",
"stdout",
")",
"# Make dreams come true.",
"response",
"=",
"Response",
"(",
"headers",
"=",
"{",
"'Server'",
":",
"server",
"}",
")",
"return",
"response",
"(",
"environ",
",",
"start_response",
")",
"return",
"application"
] | 5af404d647a8dac8a043b64ea09882589b3b5247 |
test | make_chains_with_names | assemble in-doc coref chains by mapping equiv_id to tokens and
their cleansed name strings
:param sentences: iterator over token generators
:returns dict:
keys are equiv_ids,
values are tuple(concatentated name string, list of tokens) | streamcorpus_pipeline/_taggers.py | def make_chains_with_names(sentences):
'''
assemble in-doc coref chains by mapping equiv_id to tokens and
their cleansed name strings
:param sentences: iterator over token generators
:returns dict:
keys are equiv_ids,
values are tuple(concatentated name string, list of tokens)
'''
## if an equiv_id is -1, then the token is classified into some
## entity_type but has not other tokens in its chain. We don't
## want these all lumped together, so we give them distinct "fake"
## equiv_id other than -1 -- counting negatively to avoid
## collisions with "real" equiv_ids
fake_equiv_ids = -2
## use a default dictionary
equiv_ids = collections.defaultdict(lambda: (set(), set()))
for tagger_id, sents in sentences.items():
for sent in sents:
for tok in sent.tokens:
if tok.entity_type is not None:
## get an appropriate equiv_id
if tok.equiv_id == -1:
eqid = fake_equiv_ids
fake_equiv_ids -= 1
else:
eqid = tok.equiv_id
## store the name parts initially as a set
equiv_ids[eqid][0].add(cleanse(tok.token.decode('utf8')))
## carry a *reference* to the entire Token object
equiv_ids[eqid][1].add(tok)
return equiv_ids | def make_chains_with_names(sentences):
'''
assemble in-doc coref chains by mapping equiv_id to tokens and
their cleansed name strings
:param sentences: iterator over token generators
:returns dict:
keys are equiv_ids,
values are tuple(concatentated name string, list of tokens)
'''
## if an equiv_id is -1, then the token is classified into some
## entity_type but has not other tokens in its chain. We don't
## want these all lumped together, so we give them distinct "fake"
## equiv_id other than -1 -- counting negatively to avoid
## collisions with "real" equiv_ids
fake_equiv_ids = -2
## use a default dictionary
equiv_ids = collections.defaultdict(lambda: (set(), set()))
for tagger_id, sents in sentences.items():
for sent in sents:
for tok in sent.tokens:
if tok.entity_type is not None:
## get an appropriate equiv_id
if tok.equiv_id == -1:
eqid = fake_equiv_ids
fake_equiv_ids -= 1
else:
eqid = tok.equiv_id
## store the name parts initially as a set
equiv_ids[eqid][0].add(cleanse(tok.token.decode('utf8')))
## carry a *reference* to the entire Token object
equiv_ids[eqid][1].add(tok)
return equiv_ids | [
"assemble",
"in",
"-",
"doc",
"coref",
"chains",
"by",
"mapping",
"equiv_id",
"to",
"tokens",
"and",
"their",
"cleansed",
"name",
"strings"
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_taggers.py#L39-L76 | [
"def",
"make_chains_with_names",
"(",
"sentences",
")",
":",
"## if an equiv_id is -1, then the token is classified into some",
"## entity_type but has not other tokens in its chain. We don't",
"## want these all lumped together, so we give them distinct \"fake\"",
"## equiv_id other than -1 -- counting negatively to avoid",
"## collisions with \"real\" equiv_ids",
"fake_equiv_ids",
"=",
"-",
"2",
"## use a default dictionary",
"equiv_ids",
"=",
"collections",
".",
"defaultdict",
"(",
"lambda",
":",
"(",
"set",
"(",
")",
",",
"set",
"(",
")",
")",
")",
"for",
"tagger_id",
",",
"sents",
"in",
"sentences",
".",
"items",
"(",
")",
":",
"for",
"sent",
"in",
"sents",
":",
"for",
"tok",
"in",
"sent",
".",
"tokens",
":",
"if",
"tok",
".",
"entity_type",
"is",
"not",
"None",
":",
"## get an appropriate equiv_id",
"if",
"tok",
".",
"equiv_id",
"==",
"-",
"1",
":",
"eqid",
"=",
"fake_equiv_ids",
"fake_equiv_ids",
"-=",
"1",
"else",
":",
"eqid",
"=",
"tok",
".",
"equiv_id",
"## store the name parts initially as a set",
"equiv_ids",
"[",
"eqid",
"]",
"[",
"0",
"]",
".",
"add",
"(",
"cleanse",
"(",
"tok",
".",
"token",
".",
"decode",
"(",
"'utf8'",
")",
")",
")",
"## carry a *reference* to the entire Token object",
"equiv_ids",
"[",
"eqid",
"]",
"[",
"1",
"]",
".",
"add",
"(",
"tok",
")",
"return",
"equiv_ids"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | ALL_mentions | For each name string in the target_mentions list, searches through
all chain_mentions looking for any cleansed Token.token that
contains the name. Returns True only if all of the target_mention
strings appeared as substrings of at least one cleansed
Token.token. Otherwise, returns False.
:type target_mentions: list of basestring
:type chain_mentions: list of basestring
:returns bool: | streamcorpus_pipeline/_taggers.py | def ALL_mentions(target_mentions, chain_mentions):
'''
For each name string in the target_mentions list, searches through
all chain_mentions looking for any cleansed Token.token that
contains the name. Returns True only if all of the target_mention
strings appeared as substrings of at least one cleansed
Token.token. Otherwise, returns False.
:type target_mentions: list of basestring
:type chain_mentions: list of basestring
:returns bool:
'''
found_all = True
for name in target_mentions:
found_one = False
for chain_ment in chain_mentions:
if name in chain_ment:
found_one = True
break
if not found_one:
found_all = False
break
return found_all | def ALL_mentions(target_mentions, chain_mentions):
'''
For each name string in the target_mentions list, searches through
all chain_mentions looking for any cleansed Token.token that
contains the name. Returns True only if all of the target_mention
strings appeared as substrings of at least one cleansed
Token.token. Otherwise, returns False.
:type target_mentions: list of basestring
:type chain_mentions: list of basestring
:returns bool:
'''
found_all = True
for name in target_mentions:
found_one = False
for chain_ment in chain_mentions:
if name in chain_ment:
found_one = True
break
if not found_one:
found_all = False
break
return found_all | [
"For",
"each",
"name",
"string",
"in",
"the",
"target_mentions",
"list",
"searches",
"through",
"all",
"chain_mentions",
"looking",
"for",
"any",
"cleansed",
"Token",
".",
"token",
"that",
"contains",
"the",
"name",
".",
"Returns",
"True",
"only",
"if",
"all",
"of",
"the",
"target_mention",
"strings",
"appeared",
"as",
"substrings",
"of",
"at",
"least",
"one",
"cleansed",
"Token",
".",
"token",
".",
"Otherwise",
"returns",
"False",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_taggers.py#L78-L101 | [
"def",
"ALL_mentions",
"(",
"target_mentions",
",",
"chain_mentions",
")",
":",
"found_all",
"=",
"True",
"for",
"name",
"in",
"target_mentions",
":",
"found_one",
"=",
"False",
"for",
"chain_ment",
"in",
"chain_mentions",
":",
"if",
"name",
"in",
"chain_ment",
":",
"found_one",
"=",
"True",
"break",
"if",
"not",
"found_one",
":",
"found_all",
"=",
"False",
"break",
"return",
"found_all"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | ANY_MULTI_TOKEN_mentions | For each name string (potentially consisting of multiple tokens) in the
target_mentions list, searches through all chain_mentions looking for any
cleansed Token.token that contains all the tokens in the name. Returns
True only if all of the target_mention strings appeared as substrings of at
least one cleansed Token.token. Otherwise, returns False.
:type target_mentions: list of basestring
:type chain_mentions: list of basestring
:returns bool: | streamcorpus_pipeline/_taggers.py | def ANY_MULTI_TOKEN_mentions(multi_token_target_mentions, chain_mentions):
'''
For each name string (potentially consisting of multiple tokens) in the
target_mentions list, searches through all chain_mentions looking for any
cleansed Token.token that contains all the tokens in the name. Returns
True only if all of the target_mention strings appeared as substrings of at
least one cleansed Token.token. Otherwise, returns False.
:type target_mentions: list of basestring
:type chain_mentions: list of basestring
:returns bool:
'''
for multi_token_name in multi_token_target_mentions:
if ALL_mentions(multi_token_name.split(), chain_mentions):
return True
return False | def ANY_MULTI_TOKEN_mentions(multi_token_target_mentions, chain_mentions):
'''
For each name string (potentially consisting of multiple tokens) in the
target_mentions list, searches through all chain_mentions looking for any
cleansed Token.token that contains all the tokens in the name. Returns
True only if all of the target_mention strings appeared as substrings of at
least one cleansed Token.token. Otherwise, returns False.
:type target_mentions: list of basestring
:type chain_mentions: list of basestring
:returns bool:
'''
for multi_token_name in multi_token_target_mentions:
if ALL_mentions(multi_token_name.split(), chain_mentions):
return True
return False | [
"For",
"each",
"name",
"string",
"(",
"potentially",
"consisting",
"of",
"multiple",
"tokens",
")",
"in",
"the",
"target_mentions",
"list",
"searches",
"through",
"all",
"chain_mentions",
"looking",
"for",
"any",
"cleansed",
"Token",
".",
"token",
"that",
"contains",
"all",
"the",
"tokens",
"in",
"the",
"name",
".",
"Returns",
"True",
"only",
"if",
"all",
"of",
"the",
"target_mention",
"strings",
"appeared",
"as",
"substrings",
"of",
"at",
"least",
"one",
"cleansed",
"Token",
".",
"token",
".",
"Otherwise",
"returns",
"False",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_taggers.py#L104-L120 | [
"def",
"ANY_MULTI_TOKEN_mentions",
"(",
"multi_token_target_mentions",
",",
"chain_mentions",
")",
":",
"for",
"multi_token_name",
"in",
"multi_token_target_mentions",
":",
"if",
"ALL_mentions",
"(",
"multi_token_name",
".",
"split",
"(",
")",
",",
"chain_mentions",
")",
":",
"return",
"True",
"return",
"False"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | ANY_mentions | For each name string in the target_mentions list, searches through
all chain_mentions looking for any cleansed Token.token that
contains the name. Returns True if any of the target_mention
strings appeared as substrings of any cleansed Token.token.
Otherwise, returns False.
:type target_mentions: list of basestring
:type chain_mentions: list of basestring
:returns bool: | streamcorpus_pipeline/_taggers.py | def ANY_mentions(target_mentions, chain_mentions):
'''
For each name string in the target_mentions list, searches through
all chain_mentions looking for any cleansed Token.token that
contains the name. Returns True if any of the target_mention
strings appeared as substrings of any cleansed Token.token.
Otherwise, returns False.
:type target_mentions: list of basestring
:type chain_mentions: list of basestring
:returns bool:
'''
for name in target_mentions:
for chain_ment in chain_mentions:
if name in chain_ment:
return True
return False | def ANY_mentions(target_mentions, chain_mentions):
'''
For each name string in the target_mentions list, searches through
all chain_mentions looking for any cleansed Token.token that
contains the name. Returns True if any of the target_mention
strings appeared as substrings of any cleansed Token.token.
Otherwise, returns False.
:type target_mentions: list of basestring
:type chain_mentions: list of basestring
:returns bool:
'''
for name in target_mentions:
for chain_ment in chain_mentions:
if name in chain_ment:
return True
return False | [
"For",
"each",
"name",
"string",
"in",
"the",
"target_mentions",
"list",
"searches",
"through",
"all",
"chain_mentions",
"looking",
"for",
"any",
"cleansed",
"Token",
".",
"token",
"that",
"contains",
"the",
"name",
".",
"Returns",
"True",
"if",
"any",
"of",
"the",
"target_mention",
"strings",
"appeared",
"as",
"substrings",
"of",
"any",
"cleansed",
"Token",
".",
"token",
".",
"Otherwise",
"returns",
"False",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_taggers.py#L123-L140 | [
"def",
"ANY_mentions",
"(",
"target_mentions",
",",
"chain_mentions",
")",
":",
"for",
"name",
"in",
"target_mentions",
":",
"for",
"chain_ment",
"in",
"chain_mentions",
":",
"if",
"name",
"in",
"chain_ment",
":",
"return",
"True",
"return",
"False"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | names_in_chains | Convert doc-level Rating object into a Label, and add that Label
to all Token in all coref chains identified by
aligner_data["chain_selector"]
:param stream_item: document that has a doc-level Rating to translate into token-level Labels.
:param aligner_data: dict containing:
chain_selector: ALL or ANY
annotator_id: string to find at stream_item.Ratings[i].annotator.annotator_id
If chain_selector==ALL, then only apply Label to chains in which
all of the Rating.mentions strings appear as substrings within at
least one of the Token.token strings.
If chain_selector==ANY, then apply Label to chains in which any of
the Rating.mentions strings appear as a substring within at least
one of the Token.token strings.
If chain_selector==ANY_MULTI_TOKEN, then apply Label to chains in which all
the names in any of the Rating.mentions strings appear as a substring within at least
one of the Token.token strings. | streamcorpus_pipeline/_taggers.py | def names_in_chains(stream_item, aligner_data):
'''
Convert doc-level Rating object into a Label, and add that Label
to all Token in all coref chains identified by
aligner_data["chain_selector"]
:param stream_item: document that has a doc-level Rating to translate into token-level Labels.
:param aligner_data: dict containing:
chain_selector: ALL or ANY
annotator_id: string to find at stream_item.Ratings[i].annotator.annotator_id
If chain_selector==ALL, then only apply Label to chains in which
all of the Rating.mentions strings appear as substrings within at
least one of the Token.token strings.
If chain_selector==ANY, then apply Label to chains in which any of
the Rating.mentions strings appear as a substring within at least
one of the Token.token strings.
If chain_selector==ANY_MULTI_TOKEN, then apply Label to chains in which all
the names in any of the Rating.mentions strings appear as a substring within at least
one of the Token.token strings.
'''
chain_selector = aligner_data.get('chain_selector', '')
assert chain_selector in _CHAIN_SELECTORS, \
'chain_selector: %r not in %r' % (chain_selector, _CHAIN_SELECTORS.keys())
## convert chain_selector to a function
chain_selector = _CHAIN_SELECTORS[chain_selector]
## make inverted index equiv_id --> (names, tokens)
equiv_ids = make_chains_with_names( stream_item.body.sentences )
required_annotator_id = aligner_data.get('annotator_id')
for annotator_id, ratings in stream_item.ratings.items():
if (required_annotator_id is not None) and (annotator_id != required_annotator_id):
continue
else:
for rating in ratings:
label = Label(annotator=rating.annotator,
target=rating.target)
for eqid, (chain_mentions, chain_tokens) in equiv_ids.items():
if chain_selector(rating.mentions, chain_mentions):
## apply the label
for tok in chain_tokens:
add_annotation(tok, label) | def names_in_chains(stream_item, aligner_data):
'''
Convert doc-level Rating object into a Label, and add that Label
to all Token in all coref chains identified by
aligner_data["chain_selector"]
:param stream_item: document that has a doc-level Rating to translate into token-level Labels.
:param aligner_data: dict containing:
chain_selector: ALL or ANY
annotator_id: string to find at stream_item.Ratings[i].annotator.annotator_id
If chain_selector==ALL, then only apply Label to chains in which
all of the Rating.mentions strings appear as substrings within at
least one of the Token.token strings.
If chain_selector==ANY, then apply Label to chains in which any of
the Rating.mentions strings appear as a substring within at least
one of the Token.token strings.
If chain_selector==ANY_MULTI_TOKEN, then apply Label to chains in which all
the names in any of the Rating.mentions strings appear as a substring within at least
one of the Token.token strings.
'''
chain_selector = aligner_data.get('chain_selector', '')
assert chain_selector in _CHAIN_SELECTORS, \
'chain_selector: %r not in %r' % (chain_selector, _CHAIN_SELECTORS.keys())
## convert chain_selector to a function
chain_selector = _CHAIN_SELECTORS[chain_selector]
## make inverted index equiv_id --> (names, tokens)
equiv_ids = make_chains_with_names( stream_item.body.sentences )
required_annotator_id = aligner_data.get('annotator_id')
for annotator_id, ratings in stream_item.ratings.items():
if (required_annotator_id is not None) and (annotator_id != required_annotator_id):
continue
else:
for rating in ratings:
label = Label(annotator=rating.annotator,
target=rating.target)
for eqid, (chain_mentions, chain_tokens) in equiv_ids.items():
if chain_selector(rating.mentions, chain_mentions):
## apply the label
for tok in chain_tokens:
add_annotation(tok, label) | [
"Convert",
"doc",
"-",
"level",
"Rating",
"object",
"into",
"a",
"Label",
"and",
"add",
"that",
"Label",
"to",
"all",
"Token",
"in",
"all",
"coref",
"chains",
"identified",
"by",
"aligner_data",
"[",
"chain_selector",
"]"
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_taggers.py#L148-L195 | [
"def",
"names_in_chains",
"(",
"stream_item",
",",
"aligner_data",
")",
":",
"chain_selector",
"=",
"aligner_data",
".",
"get",
"(",
"'chain_selector'",
",",
"''",
")",
"assert",
"chain_selector",
"in",
"_CHAIN_SELECTORS",
",",
"'chain_selector: %r not in %r'",
"%",
"(",
"chain_selector",
",",
"_CHAIN_SELECTORS",
".",
"keys",
"(",
")",
")",
"## convert chain_selector to a function",
"chain_selector",
"=",
"_CHAIN_SELECTORS",
"[",
"chain_selector",
"]",
"## make inverted index equiv_id --> (names, tokens)",
"equiv_ids",
"=",
"make_chains_with_names",
"(",
"stream_item",
".",
"body",
".",
"sentences",
")",
"required_annotator_id",
"=",
"aligner_data",
".",
"get",
"(",
"'annotator_id'",
")",
"for",
"annotator_id",
",",
"ratings",
"in",
"stream_item",
".",
"ratings",
".",
"items",
"(",
")",
":",
"if",
"(",
"required_annotator_id",
"is",
"not",
"None",
")",
"and",
"(",
"annotator_id",
"!=",
"required_annotator_id",
")",
":",
"continue",
"else",
":",
"for",
"rating",
"in",
"ratings",
":",
"label",
"=",
"Label",
"(",
"annotator",
"=",
"rating",
".",
"annotator",
",",
"target",
"=",
"rating",
".",
"target",
")",
"for",
"eqid",
",",
"(",
"chain_mentions",
",",
"chain_tokens",
")",
"in",
"equiv_ids",
".",
"items",
"(",
")",
":",
"if",
"chain_selector",
"(",
"rating",
".",
"mentions",
",",
"chain_mentions",
")",
":",
"## apply the label",
"for",
"tok",
"in",
"chain_tokens",
":",
"add_annotation",
"(",
"tok",
",",
"label",
")"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | look_ahead_match | iterate through all tokens looking for matches of cleansed tokens
or token regexes, skipping tokens left empty by cleansing and
coping with Token objects that produce multiple space-separated
strings when cleansed. Yields tokens that match. | streamcorpus_pipeline/_taggers.py | def look_ahead_match(rating, tokens):
'''iterate through all tokens looking for matches of cleansed tokens
or token regexes, skipping tokens left empty by cleansing and
coping with Token objects that produce multiple space-separated
strings when cleansed. Yields tokens that match.
'''
## this ensures that all cleansed tokens are non-zero length
all_mregexes = []
for m in rating.mentions:
mregexes = []
mpatterns = m.decode('utf8').split(' ')
for mpat in mpatterns:
if mpat.startswith('ur"^') and mpat.endswith('$"'): # is not regex
## chop out the meat of the regex so we can reconstitute it below
mpat = mpat[4:-2]
else:
mpat = cleanse(mpat)
if mpat:
## make a unicode raw string
## https://docs.python.org/2/reference/lexical_analysis.html#string-literals
mpat = ur'^%s$' % mpat
logger.debug('look_ahead_match compiling regex: %s', mpat)
mregexes.append(re.compile(mpat, re.UNICODE | re.IGNORECASE))
if not mregexes:
logger.warn('got empty cleansed mention: %r\nrating=%r' % (m, rating))
all_mregexes.append(mregexes)
## now that we have all_mregexes, go through all the tokens
for i in range(len(tokens)):
for mregexes in all_mregexes:
if mregexes[0].match(tokens[i][0][0]):
## found the start of a possible match, so iterate
## through the tuples of cleansed strings for each
## Token while stepping through the cleansed strings
## for this mention.
m_j = 1
i_j = 0
last_token_matched = 0
matched = True
while m_j < len(mregexes):
i_j += 1
if i_j == len(tokens[i + last_token_matched][0]):
i_j = 0
last_token_matched += 1
if i + last_token_matched == len(tokens):
matched = False
break
target_token = tokens[i + last_token_matched][0][i_j]
## this next line is the actual string comparison
if mregexes[m_j].match(target_token):
m_j += 1
elif target_token == '':
continue
else:
matched = False
break
if matched:
## yield each matched token only once
toks = set()
for j in xrange(last_token_matched + 1):
toks.add(tokens[i + j][1])
for tok in toks:
yield tok | def look_ahead_match(rating, tokens):
'''iterate through all tokens looking for matches of cleansed tokens
or token regexes, skipping tokens left empty by cleansing and
coping with Token objects that produce multiple space-separated
strings when cleansed. Yields tokens that match.
'''
## this ensures that all cleansed tokens are non-zero length
all_mregexes = []
for m in rating.mentions:
mregexes = []
mpatterns = m.decode('utf8').split(' ')
for mpat in mpatterns:
if mpat.startswith('ur"^') and mpat.endswith('$"'): # is not regex
## chop out the meat of the regex so we can reconstitute it below
mpat = mpat[4:-2]
else:
mpat = cleanse(mpat)
if mpat:
## make a unicode raw string
## https://docs.python.org/2/reference/lexical_analysis.html#string-literals
mpat = ur'^%s$' % mpat
logger.debug('look_ahead_match compiling regex: %s', mpat)
mregexes.append(re.compile(mpat, re.UNICODE | re.IGNORECASE))
if not mregexes:
logger.warn('got empty cleansed mention: %r\nrating=%r' % (m, rating))
all_mregexes.append(mregexes)
## now that we have all_mregexes, go through all the tokens
for i in range(len(tokens)):
for mregexes in all_mregexes:
if mregexes[0].match(tokens[i][0][0]):
## found the start of a possible match, so iterate
## through the tuples of cleansed strings for each
## Token while stepping through the cleansed strings
## for this mention.
m_j = 1
i_j = 0
last_token_matched = 0
matched = True
while m_j < len(mregexes):
i_j += 1
if i_j == len(tokens[i + last_token_matched][0]):
i_j = 0
last_token_matched += 1
if i + last_token_matched == len(tokens):
matched = False
break
target_token = tokens[i + last_token_matched][0][i_j]
## this next line is the actual string comparison
if mregexes[m_j].match(target_token):
m_j += 1
elif target_token == '':
continue
else:
matched = False
break
if matched:
## yield each matched token only once
toks = set()
for j in xrange(last_token_matched + 1):
toks.add(tokens[i + j][1])
for tok in toks:
yield tok | [
"iterate",
"through",
"all",
"tokens",
"looking",
"for",
"matches",
"of",
"cleansed",
"tokens",
"or",
"token",
"regexes",
"skipping",
"tokens",
"left",
"empty",
"by",
"cleansing",
"and",
"coping",
"with",
"Token",
"objects",
"that",
"produce",
"multiple",
"space",
"-",
"separated",
"strings",
"when",
"cleansed",
".",
"Yields",
"tokens",
"that",
"match",
"."
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_taggers.py#L348-L413 | [
"def",
"look_ahead_match",
"(",
"rating",
",",
"tokens",
")",
":",
"## this ensures that all cleansed tokens are non-zero length",
"all_mregexes",
"=",
"[",
"]",
"for",
"m",
"in",
"rating",
".",
"mentions",
":",
"mregexes",
"=",
"[",
"]",
"mpatterns",
"=",
"m",
".",
"decode",
"(",
"'utf8'",
")",
".",
"split",
"(",
"' '",
")",
"for",
"mpat",
"in",
"mpatterns",
":",
"if",
"mpat",
".",
"startswith",
"(",
"'ur\"^'",
")",
"and",
"mpat",
".",
"endswith",
"(",
"'$\"'",
")",
":",
"# is not regex",
"## chop out the meat of the regex so we can reconstitute it below",
"mpat",
"=",
"mpat",
"[",
"4",
":",
"-",
"2",
"]",
"else",
":",
"mpat",
"=",
"cleanse",
"(",
"mpat",
")",
"if",
"mpat",
":",
"## make a unicode raw string",
"## https://docs.python.org/2/reference/lexical_analysis.html#string-literals",
"mpat",
"=",
"ur'^%s$'",
"%",
"mpat",
"logger",
".",
"debug",
"(",
"'look_ahead_match compiling regex: %s'",
",",
"mpat",
")",
"mregexes",
".",
"append",
"(",
"re",
".",
"compile",
"(",
"mpat",
",",
"re",
".",
"UNICODE",
"|",
"re",
".",
"IGNORECASE",
")",
")",
"if",
"not",
"mregexes",
":",
"logger",
".",
"warn",
"(",
"'got empty cleansed mention: %r\\nrating=%r'",
"%",
"(",
"m",
",",
"rating",
")",
")",
"all_mregexes",
".",
"append",
"(",
"mregexes",
")",
"## now that we have all_mregexes, go through all the tokens",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"tokens",
")",
")",
":",
"for",
"mregexes",
"in",
"all_mregexes",
":",
"if",
"mregexes",
"[",
"0",
"]",
".",
"match",
"(",
"tokens",
"[",
"i",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
")",
":",
"## found the start of a possible match, so iterate",
"## through the tuples of cleansed strings for each",
"## Token while stepping through the cleansed strings",
"## for this mention.",
"m_j",
"=",
"1",
"i_j",
"=",
"0",
"last_token_matched",
"=",
"0",
"matched",
"=",
"True",
"while",
"m_j",
"<",
"len",
"(",
"mregexes",
")",
":",
"i_j",
"+=",
"1",
"if",
"i_j",
"==",
"len",
"(",
"tokens",
"[",
"i",
"+",
"last_token_matched",
"]",
"[",
"0",
"]",
")",
":",
"i_j",
"=",
"0",
"last_token_matched",
"+=",
"1",
"if",
"i",
"+",
"last_token_matched",
"==",
"len",
"(",
"tokens",
")",
":",
"matched",
"=",
"False",
"break",
"target_token",
"=",
"tokens",
"[",
"i",
"+",
"last_token_matched",
"]",
"[",
"0",
"]",
"[",
"i_j",
"]",
"## this next line is the actual string comparison",
"if",
"mregexes",
"[",
"m_j",
"]",
".",
"match",
"(",
"target_token",
")",
":",
"m_j",
"+=",
"1",
"elif",
"target_token",
"==",
"''",
":",
"continue",
"else",
":",
"matched",
"=",
"False",
"break",
"if",
"matched",
":",
"## yield each matched token only once",
"toks",
"=",
"set",
"(",
")",
"for",
"j",
"in",
"xrange",
"(",
"last_token_matched",
"+",
"1",
")",
":",
"toks",
".",
"add",
"(",
"tokens",
"[",
"i",
"+",
"j",
"]",
"[",
"1",
"]",
")",
"for",
"tok",
"in",
"toks",
":",
"yield",
"tok"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | multi_token_match | iterate through tokens looking for near-exact matches to strings
in si.ratings...mentions | streamcorpus_pipeline/_taggers.py | def multi_token_match(stream_item, aligner_data):
'''
iterate through tokens looking for near-exact matches to strings
in si.ratings...mentions
'''
tagger_id = _get_tagger_id(stream_item, aligner_data)
sentences = stream_item.body.sentences.get(tagger_id)
if not sentences:
return
## construct a list of tuples, where the first part of each tuple
## is a tuple of cleansed strings, and the second part is the
## Token object from which it came.
tokens = map(lambda tok: (cleanse(tok.token.decode('utf8')).split(' '), tok),
itertools.chain(*[sent.tokens for sent in sentences]))
required_annotator_id = aligner_data['annotator_id']
for annotator_id, ratings in stream_item.ratings.items():
if (required_annotator_id is None) or (annotator_id == required_annotator_id):
for rating in ratings:
label = Label(annotator=rating.annotator,
target=rating.target)
num_tokens_matched = 0
for tok in look_ahead_match(rating, tokens):
if aligner_data.get('update_labels'):
tok.labels.pop(annotator_id, None)
add_annotation(tok, label)
num_tokens_matched += 1
if num_tokens_matched == 0:
logger.warning('multi_token_match didn\'t actually match '
'entity %r in stream_id %r',
rating.target.target_id,
stream_item.stream_id)
else:
logger.debug('matched %d tokens for %r in %r',
num_tokens_matched, rating.target.target_id,
stream_item.stream_id) | def multi_token_match(stream_item, aligner_data):
'''
iterate through tokens looking for near-exact matches to strings
in si.ratings...mentions
'''
tagger_id = _get_tagger_id(stream_item, aligner_data)
sentences = stream_item.body.sentences.get(tagger_id)
if not sentences:
return
## construct a list of tuples, where the first part of each tuple
## is a tuple of cleansed strings, and the second part is the
## Token object from which it came.
tokens = map(lambda tok: (cleanse(tok.token.decode('utf8')).split(' '), tok),
itertools.chain(*[sent.tokens for sent in sentences]))
required_annotator_id = aligner_data['annotator_id']
for annotator_id, ratings in stream_item.ratings.items():
if (required_annotator_id is None) or (annotator_id == required_annotator_id):
for rating in ratings:
label = Label(annotator=rating.annotator,
target=rating.target)
num_tokens_matched = 0
for tok in look_ahead_match(rating, tokens):
if aligner_data.get('update_labels'):
tok.labels.pop(annotator_id, None)
add_annotation(tok, label)
num_tokens_matched += 1
if num_tokens_matched == 0:
logger.warning('multi_token_match didn\'t actually match '
'entity %r in stream_id %r',
rating.target.target_id,
stream_item.stream_id)
else:
logger.debug('matched %d tokens for %r in %r',
num_tokens_matched, rating.target.target_id,
stream_item.stream_id) | [
"iterate",
"through",
"tokens",
"looking",
"for",
"near",
"-",
"exact",
"matches",
"to",
"strings",
"in",
"si",
".",
"ratings",
"...",
"mentions"
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_taggers.py#L415-L451 | [
"def",
"multi_token_match",
"(",
"stream_item",
",",
"aligner_data",
")",
":",
"tagger_id",
"=",
"_get_tagger_id",
"(",
"stream_item",
",",
"aligner_data",
")",
"sentences",
"=",
"stream_item",
".",
"body",
".",
"sentences",
".",
"get",
"(",
"tagger_id",
")",
"if",
"not",
"sentences",
":",
"return",
"## construct a list of tuples, where the first part of each tuple",
"## is a tuple of cleansed strings, and the second part is the",
"## Token object from which it came.",
"tokens",
"=",
"map",
"(",
"lambda",
"tok",
":",
"(",
"cleanse",
"(",
"tok",
".",
"token",
".",
"decode",
"(",
"'utf8'",
")",
")",
".",
"split",
"(",
"' '",
")",
",",
"tok",
")",
",",
"itertools",
".",
"chain",
"(",
"*",
"[",
"sent",
".",
"tokens",
"for",
"sent",
"in",
"sentences",
"]",
")",
")",
"required_annotator_id",
"=",
"aligner_data",
"[",
"'annotator_id'",
"]",
"for",
"annotator_id",
",",
"ratings",
"in",
"stream_item",
".",
"ratings",
".",
"items",
"(",
")",
":",
"if",
"(",
"required_annotator_id",
"is",
"None",
")",
"or",
"(",
"annotator_id",
"==",
"required_annotator_id",
")",
":",
"for",
"rating",
"in",
"ratings",
":",
"label",
"=",
"Label",
"(",
"annotator",
"=",
"rating",
".",
"annotator",
",",
"target",
"=",
"rating",
".",
"target",
")",
"num_tokens_matched",
"=",
"0",
"for",
"tok",
"in",
"look_ahead_match",
"(",
"rating",
",",
"tokens",
")",
":",
"if",
"aligner_data",
".",
"get",
"(",
"'update_labels'",
")",
":",
"tok",
".",
"labels",
".",
"pop",
"(",
"annotator_id",
",",
"None",
")",
"add_annotation",
"(",
"tok",
",",
"label",
")",
"num_tokens_matched",
"+=",
"1",
"if",
"num_tokens_matched",
"==",
"0",
":",
"logger",
".",
"warning",
"(",
"'multi_token_match didn\\'t actually match '",
"'entity %r in stream_id %r'",
",",
"rating",
".",
"target",
".",
"target_id",
",",
"stream_item",
".",
"stream_id",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"'matched %d tokens for %r in %r'",
",",
"num_tokens_matched",
",",
"rating",
".",
"target",
".",
"target_id",
",",
"stream_item",
".",
"stream_id",
")"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | TaggerBatchTransform.make_ner_file | run tagger a child process to get XML output | streamcorpus_pipeline/_taggers.py | def make_ner_file(self, clean_visible_path, ner_xml_path):
'''run tagger a child process to get XML output'''
if self.template is None:
raise exceptions.NotImplementedError('''
Subclasses must specify a class property "template" that provides
command string format for running a tagger. It should take
%(tagger_root_path)s as the path from the config file,
%(clean_visible_path)s as the input XML file, and %(ner_xml_path)s as
the output path to create.
''')
tagger_config = dict(
tagger_root_path=self.config['tagger_root_path'],
clean_visible_path=clean_visible_path,
ner_xml_path=ner_xml_path)
## get a java_heap_size or default to 1GB
tagger_config['java_heap_size'] = self.config.get('java_heap_size', '')
cmd = self.template % tagger_config
start_time = time.time()
## make sure we are using as little memory as possible
gc.collect()
try:
self._child = subprocess.Popen(cmd, stderr=subprocess.PIPE, shell=True)
except OSError, exc:
msg = traceback.format_exc(exc)
msg += make_memory_info_msg(clean_visible_path, ner_xml_path)
raise PipelineOutOfMemory(msg)
s_out, errors = self._child.communicate()
if not self._child.returncode == 0:
if 'java.lang.OutOfMemoryError' in errors:
msg = errors + make_memory_info_msg(clean_visible_path, ner_xml_path)
raise PipelineOutOfMemory(msg)
elif self._child.returncode == 137:
msg = 'tagger returncode = 137\n' + errors
msg += make_memory_info_msg(clean_visible_path, ner_xml_path)
# maybe get a tail of /var/log/messages
raise PipelineOutOfMemory(msg)
elif 'Exception' in errors:
raise PipelineBaseException(errors)
else:
raise PipelineBaseException('tagger exited with %r' % self._child.returncode)
elapsed = time.time() - start_time
logger.info('finished tagging in %.1f seconds' % elapsed)
return elapsed | def make_ner_file(self, clean_visible_path, ner_xml_path):
'''run tagger a child process to get XML output'''
if self.template is None:
raise exceptions.NotImplementedError('''
Subclasses must specify a class property "template" that provides
command string format for running a tagger. It should take
%(tagger_root_path)s as the path from the config file,
%(clean_visible_path)s as the input XML file, and %(ner_xml_path)s as
the output path to create.
''')
tagger_config = dict(
tagger_root_path=self.config['tagger_root_path'],
clean_visible_path=clean_visible_path,
ner_xml_path=ner_xml_path)
## get a java_heap_size or default to 1GB
tagger_config['java_heap_size'] = self.config.get('java_heap_size', '')
cmd = self.template % tagger_config
start_time = time.time()
## make sure we are using as little memory as possible
gc.collect()
try:
self._child = subprocess.Popen(cmd, stderr=subprocess.PIPE, shell=True)
except OSError, exc:
msg = traceback.format_exc(exc)
msg += make_memory_info_msg(clean_visible_path, ner_xml_path)
raise PipelineOutOfMemory(msg)
s_out, errors = self._child.communicate()
if not self._child.returncode == 0:
if 'java.lang.OutOfMemoryError' in errors:
msg = errors + make_memory_info_msg(clean_visible_path, ner_xml_path)
raise PipelineOutOfMemory(msg)
elif self._child.returncode == 137:
msg = 'tagger returncode = 137\n' + errors
msg += make_memory_info_msg(clean_visible_path, ner_xml_path)
# maybe get a tail of /var/log/messages
raise PipelineOutOfMemory(msg)
elif 'Exception' in errors:
raise PipelineBaseException(errors)
else:
raise PipelineBaseException('tagger exited with %r' % self._child.returncode)
elapsed = time.time() - start_time
logger.info('finished tagging in %.1f seconds' % elapsed)
return elapsed | [
"run",
"tagger",
"a",
"child",
"process",
"to",
"get",
"XML",
"output"
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_taggers.py#L619-L664 | [
"def",
"make_ner_file",
"(",
"self",
",",
"clean_visible_path",
",",
"ner_xml_path",
")",
":",
"if",
"self",
".",
"template",
"is",
"None",
":",
"raise",
"exceptions",
".",
"NotImplementedError",
"(",
"'''\nSubclasses must specify a class property \"template\" that provides\ncommand string format for running a tagger. It should take\n%(tagger_root_path)s as the path from the config file,\n%(clean_visible_path)s as the input XML file, and %(ner_xml_path)s as\nthe output path to create.\n'''",
")",
"tagger_config",
"=",
"dict",
"(",
"tagger_root_path",
"=",
"self",
".",
"config",
"[",
"'tagger_root_path'",
"]",
",",
"clean_visible_path",
"=",
"clean_visible_path",
",",
"ner_xml_path",
"=",
"ner_xml_path",
")",
"## get a java_heap_size or default to 1GB",
"tagger_config",
"[",
"'java_heap_size'",
"]",
"=",
"self",
".",
"config",
".",
"get",
"(",
"'java_heap_size'",
",",
"''",
")",
"cmd",
"=",
"self",
".",
"template",
"%",
"tagger_config",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"## make sure we are using as little memory as possible",
"gc",
".",
"collect",
"(",
")",
"try",
":",
"self",
".",
"_child",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
",",
"shell",
"=",
"True",
")",
"except",
"OSError",
",",
"exc",
":",
"msg",
"=",
"traceback",
".",
"format_exc",
"(",
"exc",
")",
"msg",
"+=",
"make_memory_info_msg",
"(",
"clean_visible_path",
",",
"ner_xml_path",
")",
"raise",
"PipelineOutOfMemory",
"(",
"msg",
")",
"s_out",
",",
"errors",
"=",
"self",
".",
"_child",
".",
"communicate",
"(",
")",
"if",
"not",
"self",
".",
"_child",
".",
"returncode",
"==",
"0",
":",
"if",
"'java.lang.OutOfMemoryError'",
"in",
"errors",
":",
"msg",
"=",
"errors",
"+",
"make_memory_info_msg",
"(",
"clean_visible_path",
",",
"ner_xml_path",
")",
"raise",
"PipelineOutOfMemory",
"(",
"msg",
")",
"elif",
"self",
".",
"_child",
".",
"returncode",
"==",
"137",
":",
"msg",
"=",
"'tagger returncode = 137\\n'",
"+",
"errors",
"msg",
"+=",
"make_memory_info_msg",
"(",
"clean_visible_path",
",",
"ner_xml_path",
")",
"# maybe get a tail of /var/log/messages",
"raise",
"PipelineOutOfMemory",
"(",
"msg",
")",
"elif",
"'Exception'",
"in",
"errors",
":",
"raise",
"PipelineBaseException",
"(",
"errors",
")",
"else",
":",
"raise",
"PipelineBaseException",
"(",
"'tagger exited with %r'",
"%",
"self",
".",
"_child",
".",
"returncode",
")",
"elapsed",
"=",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
"logger",
".",
"info",
"(",
"'finished tagging in %.1f seconds'",
"%",
"elapsed",
")",
"return",
"elapsed"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
test | TaggerBatchTransform.align_chunk_with_ner | iterate through ner_xml_path to fuse with i_chunk into o_chunk | streamcorpus_pipeline/_taggers.py | def align_chunk_with_ner(self, ner_xml_path, i_chunk, o_chunk):
''' iterate through ner_xml_path to fuse with i_chunk into o_chunk '''
## prepare to iterate over the input chunk
input_iter = i_chunk.__iter__()
all_ner = xml.dom.minidom.parse(open(ner_xml_path))
## this converts our UTF-8 data into unicode strings, so when
## we want to compute byte offsets or construct tokens, we
## must .encode('utf8')
for ner_dom in all_ner.getElementsByTagName('FILENAME'):
#for stream_id, raw_ner in files(open(ner_xml_path).read().decode('utf8')):
stream_item = input_iter.next()
## get stream_id out of the XML
stream_id = ner_dom.attributes.get('stream_id').value
if stream_item.stream_id is None:
assert not stream_id, 'out of sync: None != %r' % stream_id
logger.critical('si.stream_id is None... ignoring')
continue
assert stream_id and stream_id == stream_item.stream_id, \
'%s != %s' % (stream_id, stream_item.stream_id)
if not stream_item.body:
## the XML better have had an empty clean_visible too...
#assert not ner_dom....something
continue
tagging = Tagging()
tagging.tagger_id = self.tagger_id # pylint: disable=E1101
'''
## get this one file out of its FILENAME tags
tagged_doc_parts = list(files(ner_dom.toxml()))
if not tagged_doc_parts:
continue
tagged_doc = tagged_doc_parts[0][1]
## hack
hope_original = make_clean_visible(tagged_doc, '')
open(ner_xml_path + '-clean', 'wb').write(hope_original.encode('utf-8'))
print ner_xml_path + '-clean'
'''
#tagging.raw_tagging = tagged_doc
tagging.generation_time = streamcorpus.make_stream_time()
stream_item.body.taggings[self.tagger_id] = tagging # pylint: disable=E1101
## could consume lots of memory here by instantiating everything
sentences, relations, attributes = self.get_sentences(ner_dom)
stream_item.body.sentences[self.tagger_id] = sentences # pylint: disable=E1101
stream_item.body.relations[self.tagger_id] = relations # pylint: disable=E1101
stream_item.body.attributes[self.tagger_id] = attributes # pylint: disable=E1101
logger.debug('finished aligning tokens %s' % stream_item.stream_id)
'''
for num, sent in enumerate(sentences):
for tok in sent.tokens:
print '%d\t%d\t%s' % (num, tok.offsets[OffsetType.LINES].first, repr(tok.token))
'''
if 'align_labels_by' in self.config and self.config['align_labels_by']:
assert 'aligner_data' in self.config, 'config missing "aligner_data"'
aligner = AlignmentStrategies[ self.config['align_labels_by'] ]
aligner( stream_item, self.config['aligner_data'] )
## forcibly collect dereferenced objects
gc.collect()
try:
o_chunk.add(stream_item)
except MemoryError, exc:
msg = traceback.format_exc(exc)
msg += make_memory_info_msg()
logger.critical(msg)
raise PipelineOutOfMemory(msg)
## all done, so close the o_chunk
try:
o_chunk.close()
logger.info('finished chunk for %r' % ner_xml_path)
except MemoryError, exc:
msg = traceback.format_exc(exc)
msg += make_memory_info_msg()
logger.critical(msg)
raise PipelineOutOfMemory(msg) | def align_chunk_with_ner(self, ner_xml_path, i_chunk, o_chunk):
''' iterate through ner_xml_path to fuse with i_chunk into o_chunk '''
## prepare to iterate over the input chunk
input_iter = i_chunk.__iter__()
all_ner = xml.dom.minidom.parse(open(ner_xml_path))
## this converts our UTF-8 data into unicode strings, so when
## we want to compute byte offsets or construct tokens, we
## must .encode('utf8')
for ner_dom in all_ner.getElementsByTagName('FILENAME'):
#for stream_id, raw_ner in files(open(ner_xml_path).read().decode('utf8')):
stream_item = input_iter.next()
## get stream_id out of the XML
stream_id = ner_dom.attributes.get('stream_id').value
if stream_item.stream_id is None:
assert not stream_id, 'out of sync: None != %r' % stream_id
logger.critical('si.stream_id is None... ignoring')
continue
assert stream_id and stream_id == stream_item.stream_id, \
'%s != %s' % (stream_id, stream_item.stream_id)
if not stream_item.body:
## the XML better have had an empty clean_visible too...
#assert not ner_dom....something
continue
tagging = Tagging()
tagging.tagger_id = self.tagger_id # pylint: disable=E1101
'''
## get this one file out of its FILENAME tags
tagged_doc_parts = list(files(ner_dom.toxml()))
if not tagged_doc_parts:
continue
tagged_doc = tagged_doc_parts[0][1]
## hack
hope_original = make_clean_visible(tagged_doc, '')
open(ner_xml_path + '-clean', 'wb').write(hope_original.encode('utf-8'))
print ner_xml_path + '-clean'
'''
#tagging.raw_tagging = tagged_doc
tagging.generation_time = streamcorpus.make_stream_time()
stream_item.body.taggings[self.tagger_id] = tagging # pylint: disable=E1101
## could consume lots of memory here by instantiating everything
sentences, relations, attributes = self.get_sentences(ner_dom)
stream_item.body.sentences[self.tagger_id] = sentences # pylint: disable=E1101
stream_item.body.relations[self.tagger_id] = relations # pylint: disable=E1101
stream_item.body.attributes[self.tagger_id] = attributes # pylint: disable=E1101
logger.debug('finished aligning tokens %s' % stream_item.stream_id)
'''
for num, sent in enumerate(sentences):
for tok in sent.tokens:
print '%d\t%d\t%s' % (num, tok.offsets[OffsetType.LINES].first, repr(tok.token))
'''
if 'align_labels_by' in self.config and self.config['align_labels_by']:
assert 'aligner_data' in self.config, 'config missing "aligner_data"'
aligner = AlignmentStrategies[ self.config['align_labels_by'] ]
aligner( stream_item, self.config['aligner_data'] )
## forcibly collect dereferenced objects
gc.collect()
try:
o_chunk.add(stream_item)
except MemoryError, exc:
msg = traceback.format_exc(exc)
msg += make_memory_info_msg()
logger.critical(msg)
raise PipelineOutOfMemory(msg)
## all done, so close the o_chunk
try:
o_chunk.close()
logger.info('finished chunk for %r' % ner_xml_path)
except MemoryError, exc:
msg = traceback.format_exc(exc)
msg += make_memory_info_msg()
logger.critical(msg)
raise PipelineOutOfMemory(msg) | [
"iterate",
"through",
"ner_xml_path",
"to",
"fuse",
"with",
"i_chunk",
"into",
"o_chunk"
] | trec-kba/streamcorpus-pipeline | python | https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_taggers.py#L669-L757 | [
"def",
"align_chunk_with_ner",
"(",
"self",
",",
"ner_xml_path",
",",
"i_chunk",
",",
"o_chunk",
")",
":",
"## prepare to iterate over the input chunk",
"input_iter",
"=",
"i_chunk",
".",
"__iter__",
"(",
")",
"all_ner",
"=",
"xml",
".",
"dom",
".",
"minidom",
".",
"parse",
"(",
"open",
"(",
"ner_xml_path",
")",
")",
"## this converts our UTF-8 data into unicode strings, so when",
"## we want to compute byte offsets or construct tokens, we",
"## must .encode('utf8')",
"for",
"ner_dom",
"in",
"all_ner",
".",
"getElementsByTagName",
"(",
"'FILENAME'",
")",
":",
"#for stream_id, raw_ner in files(open(ner_xml_path).read().decode('utf8')):",
"stream_item",
"=",
"input_iter",
".",
"next",
"(",
")",
"## get stream_id out of the XML",
"stream_id",
"=",
"ner_dom",
".",
"attributes",
".",
"get",
"(",
"'stream_id'",
")",
".",
"value",
"if",
"stream_item",
".",
"stream_id",
"is",
"None",
":",
"assert",
"not",
"stream_id",
",",
"'out of sync: None != %r'",
"%",
"stream_id",
"logger",
".",
"critical",
"(",
"'si.stream_id is None... ignoring'",
")",
"continue",
"assert",
"stream_id",
"and",
"stream_id",
"==",
"stream_item",
".",
"stream_id",
",",
"'%s != %s'",
"%",
"(",
"stream_id",
",",
"stream_item",
".",
"stream_id",
")",
"if",
"not",
"stream_item",
".",
"body",
":",
"## the XML better have had an empty clean_visible too...",
"#assert not ner_dom....something",
"continue",
"tagging",
"=",
"Tagging",
"(",
")",
"tagging",
".",
"tagger_id",
"=",
"self",
".",
"tagger_id",
"# pylint: disable=E1101",
"'''\n ## get this one file out of its FILENAME tags\n tagged_doc_parts = list(files(ner_dom.toxml()))\n if not tagged_doc_parts:\n continue\n\n tagged_doc = tagged_doc_parts[0][1]\n\n ## hack\n hope_original = make_clean_visible(tagged_doc, '')\n open(ner_xml_path + '-clean', 'wb').write(hope_original.encode('utf-8'))\n print ner_xml_path + '-clean'\n '''",
"#tagging.raw_tagging = tagged_doc",
"tagging",
".",
"generation_time",
"=",
"streamcorpus",
".",
"make_stream_time",
"(",
")",
"stream_item",
".",
"body",
".",
"taggings",
"[",
"self",
".",
"tagger_id",
"]",
"=",
"tagging",
"# pylint: disable=E1101",
"## could consume lots of memory here by instantiating everything",
"sentences",
",",
"relations",
",",
"attributes",
"=",
"self",
".",
"get_sentences",
"(",
"ner_dom",
")",
"stream_item",
".",
"body",
".",
"sentences",
"[",
"self",
".",
"tagger_id",
"]",
"=",
"sentences",
"# pylint: disable=E1101",
"stream_item",
".",
"body",
".",
"relations",
"[",
"self",
".",
"tagger_id",
"]",
"=",
"relations",
"# pylint: disable=E1101",
"stream_item",
".",
"body",
".",
"attributes",
"[",
"self",
".",
"tagger_id",
"]",
"=",
"attributes",
"# pylint: disable=E1101",
"logger",
".",
"debug",
"(",
"'finished aligning tokens %s'",
"%",
"stream_item",
".",
"stream_id",
")",
"'''\n for num, sent in enumerate(sentences):\n for tok in sent.tokens:\n print '%d\\t%d\\t%s' % (num, tok.offsets[OffsetType.LINES].first, repr(tok.token))\n '''",
"if",
"'align_labels_by'",
"in",
"self",
".",
"config",
"and",
"self",
".",
"config",
"[",
"'align_labels_by'",
"]",
":",
"assert",
"'aligner_data'",
"in",
"self",
".",
"config",
",",
"'config missing \"aligner_data\"'",
"aligner",
"=",
"AlignmentStrategies",
"[",
"self",
".",
"config",
"[",
"'align_labels_by'",
"]",
"]",
"aligner",
"(",
"stream_item",
",",
"self",
".",
"config",
"[",
"'aligner_data'",
"]",
")",
"## forcibly collect dereferenced objects",
"gc",
".",
"collect",
"(",
")",
"try",
":",
"o_chunk",
".",
"add",
"(",
"stream_item",
")",
"except",
"MemoryError",
",",
"exc",
":",
"msg",
"=",
"traceback",
".",
"format_exc",
"(",
"exc",
")",
"msg",
"+=",
"make_memory_info_msg",
"(",
")",
"logger",
".",
"critical",
"(",
"msg",
")",
"raise",
"PipelineOutOfMemory",
"(",
"msg",
")",
"## all done, so close the o_chunk",
"try",
":",
"o_chunk",
".",
"close",
"(",
")",
"logger",
".",
"info",
"(",
"'finished chunk for %r'",
"%",
"ner_xml_path",
")",
"except",
"MemoryError",
",",
"exc",
":",
"msg",
"=",
"traceback",
".",
"format_exc",
"(",
"exc",
")",
"msg",
"+=",
"make_memory_info_msg",
"(",
")",
"logger",
".",
"critical",
"(",
"msg",
")",
"raise",
"PipelineOutOfMemory",
"(",
"msg",
")"
] | 8bb82ea1beb83c6b40ed03fa1659df2897c2292a |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.