repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
IceflowRE/unidown
unidown/plugin/a_plugin.py
APlugin.get_plugins
def get_plugins() -> Dict[str, pkg_resources.EntryPoint]: """ Get all available plugins for unidown. :return: plugin name list :rtype: Dict[str, ~pkg_resources.EntryPoint] """ return {entry.name: entry for entry in pkg_resources.iter_entry_points('unidown.plugin')}
python
def get_plugins() -> Dict[str, pkg_resources.EntryPoint]: """ Get all available plugins for unidown. :return: plugin name list :rtype: Dict[str, ~pkg_resources.EntryPoint] """ return {entry.name: entry for entry in pkg_resources.iter_entry_points('unidown.plugin')}
[ "def", "get_plugins", "(", ")", "->", "Dict", "[", "str", ",", "pkg_resources", ".", "EntryPoint", "]", ":", "return", "{", "entry", ".", "name", ":", "entry", "for", "entry", "in", "pkg_resources", ".", "iter_entry_points", "(", "'unidown.plugin'", ")", "}" ]
Get all available plugins for unidown. :return: plugin name list :rtype: Dict[str, ~pkg_resources.EntryPoint]
[ "Get", "all", "available", "plugins", "for", "unidown", "." ]
train
https://github.com/IceflowRE/unidown/blob/2a6f82ab780bb825668bfc55b67c11c4f72ec05c/unidown/plugin/a_plugin.py#L440-L447
HiPERCAM/hcam_widgets
hcam_widgets/astro.py
_equation_of_time
def _equation_of_time(t): """ Find the difference between apparent and mean solar time Parameters ---------- t : `~astropy.time.Time` times (array) Returns ---------- ret1 : `~astropy.units.Quantity` the equation of time """ # Julian centuries since J2000.0 T = (t - Time("J2000")).to(u.year).value / 100 # obliquity of ecliptic (Meeus 1998, eq 22.2) poly_pars = (84381.448, 46.8150, 0.00059, 0.001813) eps = u.Quantity(polyval(T, poly_pars), u.arcsec) y = np.tan(eps/2)**2 # Sun's mean longitude (Meeus 1998, eq 25.2) poly_pars = (280.46646, 36000.76983, 0.0003032) L0 = u.Quantity(polyval(T, poly_pars), u.deg) # Sun's mean anomaly (Meeus 1998, eq 25.3) poly_pars = (357.52911, 35999.05029, 0.0001537) M = u.Quantity(polyval(T, poly_pars), u.deg) # eccentricity of Earth's orbit (Meeus 1998, eq 25.4) poly_pars = (0.016708634, -0.000042037, -0.0000001267) e = polyval(T, poly_pars) # equation of time, radians (Meeus 1998, eq 28.3) eot = (y * np.sin(2*L0) - 2*e*np.sin(M) + 4*e*y*np.sin(M)*np.cos(2*L0) - 0.5*y**2 * np.sin(4*L0) - 5*e**2 * np.sin(2*M)/4) * u.rad return eot.to(u.hourangle)
python
def _equation_of_time(t): """ Find the difference between apparent and mean solar time Parameters ---------- t : `~astropy.time.Time` times (array) Returns ---------- ret1 : `~astropy.units.Quantity` the equation of time """ # Julian centuries since J2000.0 T = (t - Time("J2000")).to(u.year).value / 100 # obliquity of ecliptic (Meeus 1998, eq 22.2) poly_pars = (84381.448, 46.8150, 0.00059, 0.001813) eps = u.Quantity(polyval(T, poly_pars), u.arcsec) y = np.tan(eps/2)**2 # Sun's mean longitude (Meeus 1998, eq 25.2) poly_pars = (280.46646, 36000.76983, 0.0003032) L0 = u.Quantity(polyval(T, poly_pars), u.deg) # Sun's mean anomaly (Meeus 1998, eq 25.3) poly_pars = (357.52911, 35999.05029, 0.0001537) M = u.Quantity(polyval(T, poly_pars), u.deg) # eccentricity of Earth's orbit (Meeus 1998, eq 25.4) poly_pars = (0.016708634, -0.000042037, -0.0000001267) e = polyval(T, poly_pars) # equation of time, radians (Meeus 1998, eq 28.3) eot = (y * np.sin(2*L0) - 2*e*np.sin(M) + 4*e*y*np.sin(M)*np.cos(2*L0) - 0.5*y**2 * np.sin(4*L0) - 5*e**2 * np.sin(2*M)/4) * u.rad return eot.to(u.hourangle)
[ "def", "_equation_of_time", "(", "t", ")", ":", "# Julian centuries since J2000.0", "T", "=", "(", "t", "-", "Time", "(", "\"J2000\"", ")", ")", ".", "to", "(", "u", ".", "year", ")", ".", "value", "/", "100", "# obliquity of ecliptic (Meeus 1998, eq 22.2)", "poly_pars", "=", "(", "84381.448", ",", "46.8150", ",", "0.00059", ",", "0.001813", ")", "eps", "=", "u", ".", "Quantity", "(", "polyval", "(", "T", ",", "poly_pars", ")", ",", "u", ".", "arcsec", ")", "y", "=", "np", ".", "tan", "(", "eps", "/", "2", ")", "**", "2", "# Sun's mean longitude (Meeus 1998, eq 25.2)", "poly_pars", "=", "(", "280.46646", ",", "36000.76983", ",", "0.0003032", ")", "L0", "=", "u", ".", "Quantity", "(", "polyval", "(", "T", ",", "poly_pars", ")", ",", "u", ".", "deg", ")", "# Sun's mean anomaly (Meeus 1998, eq 25.3)", "poly_pars", "=", "(", "357.52911", ",", "35999.05029", ",", "0.0001537", ")", "M", "=", "u", ".", "Quantity", "(", "polyval", "(", "T", ",", "poly_pars", ")", ",", "u", ".", "deg", ")", "# eccentricity of Earth's orbit (Meeus 1998, eq 25.4)", "poly_pars", "=", "(", "0.016708634", ",", "-", "0.000042037", ",", "-", "0.0000001267", ")", "e", "=", "polyval", "(", "T", ",", "poly_pars", ")", "# equation of time, radians (Meeus 1998, eq 28.3)", "eot", "=", "(", "y", "*", "np", ".", "sin", "(", "2", "*", "L0", ")", "-", "2", "*", "e", "*", "np", ".", "sin", "(", "M", ")", "+", "4", "*", "e", "*", "y", "*", "np", ".", "sin", "(", "M", ")", "*", "np", ".", "cos", "(", "2", "*", "L0", ")", "-", "0.5", "*", "y", "**", "2", "*", "np", ".", "sin", "(", "4", "*", "L0", ")", "-", "5", "*", "e", "**", "2", "*", "np", ".", "sin", "(", "2", "*", "M", ")", "/", "4", ")", "*", "u", ".", "rad", "return", "eot", ".", "to", "(", "u", ".", "hourangle", ")" ]
Find the difference between apparent and mean solar time Parameters ---------- t : `~astropy.time.Time` times (array) Returns ---------- ret1 : `~astropy.units.Quantity` the equation of time
[ "Find", "the", "difference", "between", "apparent", "and", "mean", "solar", "time" ]
train
https://github.com/HiPERCAM/hcam_widgets/blob/7219f0d96dd3a8ebe3139c7f542a72c02d02fce8/hcam_widgets/astro.py#L15-L53
HiPERCAM/hcam_widgets
hcam_widgets/astro.py
_astropy_time_from_LST
def _astropy_time_from_LST(t, LST, location, prev_next): """ Convert a Local Sidereal Time to an astropy Time object. The local time is related to the LST through the RA of the Sun. This routine uses this relationship to convert a LST to an astropy time object. Returns ------- ret1 : `~astropy.time.Time` time corresponding to LST """ # now we need to figure out time to return from LST raSun = coord.get_sun(t).ra # calculate Greenwich Apparent Solar Time, which we will use as ~UTC for now with warnings.catch_warnings(): warnings.simplefilter('ignore') # ignore astropy deprecation warnings lon = location.longitude solarTime = LST - raSun + 12*u.hourangle - lon # assume this is on the same day as supplied time, and fix later first_guess = Time( u.d*int(t.mjd) + u.hour*solarTime.wrap_at('360d').hour, format='mjd' ) # Equation of time is difference between GAST and UTC eot = _equation_of_time(first_guess) first_guess = first_guess - u.hour * eot.value if prev_next == 'next': # if 'next', we want time to be greater than given time mask = first_guess < t rise_set_time = first_guess + mask * u.sday else: # if 'previous', we want time to be less than given time mask = first_guess > t rise_set_time = first_guess - mask * u.sday return rise_set_time
python
def _astropy_time_from_LST(t, LST, location, prev_next): """ Convert a Local Sidereal Time to an astropy Time object. The local time is related to the LST through the RA of the Sun. This routine uses this relationship to convert a LST to an astropy time object. Returns ------- ret1 : `~astropy.time.Time` time corresponding to LST """ # now we need to figure out time to return from LST raSun = coord.get_sun(t).ra # calculate Greenwich Apparent Solar Time, which we will use as ~UTC for now with warnings.catch_warnings(): warnings.simplefilter('ignore') # ignore astropy deprecation warnings lon = location.longitude solarTime = LST - raSun + 12*u.hourangle - lon # assume this is on the same day as supplied time, and fix later first_guess = Time( u.d*int(t.mjd) + u.hour*solarTime.wrap_at('360d').hour, format='mjd' ) # Equation of time is difference between GAST and UTC eot = _equation_of_time(first_guess) first_guess = first_guess - u.hour * eot.value if prev_next == 'next': # if 'next', we want time to be greater than given time mask = first_guess < t rise_set_time = first_guess + mask * u.sday else: # if 'previous', we want time to be less than given time mask = first_guess > t rise_set_time = first_guess - mask * u.sday return rise_set_time
[ "def", "_astropy_time_from_LST", "(", "t", ",", "LST", ",", "location", ",", "prev_next", ")", ":", "# now we need to figure out time to return from LST", "raSun", "=", "coord", ".", "get_sun", "(", "t", ")", ".", "ra", "# calculate Greenwich Apparent Solar Time, which we will use as ~UTC for now", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "'ignore'", ")", "# ignore astropy deprecation warnings", "lon", "=", "location", ".", "longitude", "solarTime", "=", "LST", "-", "raSun", "+", "12", "*", "u", ".", "hourangle", "-", "lon", "# assume this is on the same day as supplied time, and fix later", "first_guess", "=", "Time", "(", "u", ".", "d", "*", "int", "(", "t", ".", "mjd", ")", "+", "u", ".", "hour", "*", "solarTime", ".", "wrap_at", "(", "'360d'", ")", ".", "hour", ",", "format", "=", "'mjd'", ")", "# Equation of time is difference between GAST and UTC", "eot", "=", "_equation_of_time", "(", "first_guess", ")", "first_guess", "=", "first_guess", "-", "u", ".", "hour", "*", "eot", ".", "value", "if", "prev_next", "==", "'next'", ":", "# if 'next', we want time to be greater than given time", "mask", "=", "first_guess", "<", "t", "rise_set_time", "=", "first_guess", "+", "mask", "*", "u", ".", "sday", "else", ":", "# if 'previous', we want time to be less than given time", "mask", "=", "first_guess", ">", "t", "rise_set_time", "=", "first_guess", "-", "mask", "*", "u", ".", "sday", "return", "rise_set_time" ]
Convert a Local Sidereal Time to an astropy Time object. The local time is related to the LST through the RA of the Sun. This routine uses this relationship to convert a LST to an astropy time object. Returns ------- ret1 : `~astropy.time.Time` time corresponding to LST
[ "Convert", "a", "Local", "Sidereal", "Time", "to", "an", "astropy", "Time", "object", "." ]
train
https://github.com/HiPERCAM/hcam_widgets/blob/7219f0d96dd3a8ebe3139c7f542a72c02d02fce8/hcam_widgets/astro.py#L56-L97
HiPERCAM/hcam_widgets
hcam_widgets/astro.py
_rise_set_trig
def _rise_set_trig(t, target, location, prev_next, rise_set): """ Crude time at next rise/set of ``target`` using spherical trig. This method is ~15 times faster than `_calcriseset`, and inherently does *not* take the atmosphere into account. The time returned should not be used in calculations; the purpose of this routine is to supply a guess to `_calcriseset`. Parameters ---------- t : `~astropy.time.Time` or other (see below) Time of observation. This will be passed in as the first argument to the `~astropy.time.Time` initializer, so it can be anything that `~astropy.time.Time` will accept (including a `~astropy.time.Time` object) target : `~astropy.coordinates.SkyCoord` Position of target or multiple positions of that target at multiple times (if target moves, like the Sun) location : `~astropy.coordinates.EarthLocation` Observatory location prev_next : str - either 'previous' or 'next' Test next rise/set or previous rise/set rise_set : str - either 'rising' or 'setting' Compute prev/next rise or prev/next set Returns ------- ret1 : `~astropy.time.Time` Time of rise/set """ dec = target.transform_to(coord.ICRS).dec with warnings.catch_warnings(): warnings.simplefilter('ignore') # ignore astropy deprecation warnings lat = location.latitude cosHA = -np.tan(dec)*np.tan(lat.radian) # find the absolute value of the hour Angle HA = coord.Longitude(np.fabs(np.arccos(cosHA))) # if rise, HA is -ve and vice versa if rise_set == 'rising': HA = -HA # LST = HA + RA LST = HA + target.ra return _astropy_time_from_LST(t, LST, location, prev_next)
python
def _rise_set_trig(t, target, location, prev_next, rise_set): """ Crude time at next rise/set of ``target`` using spherical trig. This method is ~15 times faster than `_calcriseset`, and inherently does *not* take the atmosphere into account. The time returned should not be used in calculations; the purpose of this routine is to supply a guess to `_calcriseset`. Parameters ---------- t : `~astropy.time.Time` or other (see below) Time of observation. This will be passed in as the first argument to the `~astropy.time.Time` initializer, so it can be anything that `~astropy.time.Time` will accept (including a `~astropy.time.Time` object) target : `~astropy.coordinates.SkyCoord` Position of target or multiple positions of that target at multiple times (if target moves, like the Sun) location : `~astropy.coordinates.EarthLocation` Observatory location prev_next : str - either 'previous' or 'next' Test next rise/set or previous rise/set rise_set : str - either 'rising' or 'setting' Compute prev/next rise or prev/next set Returns ------- ret1 : `~astropy.time.Time` Time of rise/set """ dec = target.transform_to(coord.ICRS).dec with warnings.catch_warnings(): warnings.simplefilter('ignore') # ignore astropy deprecation warnings lat = location.latitude cosHA = -np.tan(dec)*np.tan(lat.radian) # find the absolute value of the hour Angle HA = coord.Longitude(np.fabs(np.arccos(cosHA))) # if rise, HA is -ve and vice versa if rise_set == 'rising': HA = -HA # LST = HA + RA LST = HA + target.ra return _astropy_time_from_LST(t, LST, location, prev_next)
[ "def", "_rise_set_trig", "(", "t", ",", "target", ",", "location", ",", "prev_next", ",", "rise_set", ")", ":", "dec", "=", "target", ".", "transform_to", "(", "coord", ".", "ICRS", ")", ".", "dec", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "'ignore'", ")", "# ignore astropy deprecation warnings", "lat", "=", "location", ".", "latitude", "cosHA", "=", "-", "np", ".", "tan", "(", "dec", ")", "*", "np", ".", "tan", "(", "lat", ".", "radian", ")", "# find the absolute value of the hour Angle", "HA", "=", "coord", ".", "Longitude", "(", "np", ".", "fabs", "(", "np", ".", "arccos", "(", "cosHA", ")", ")", ")", "# if rise, HA is -ve and vice versa", "if", "rise_set", "==", "'rising'", ":", "HA", "=", "-", "HA", "# LST = HA + RA", "LST", "=", "HA", "+", "target", ".", "ra", "return", "_astropy_time_from_LST", "(", "t", ",", "LST", ",", "location", ",", "prev_next", ")" ]
Crude time at next rise/set of ``target`` using spherical trig. This method is ~15 times faster than `_calcriseset`, and inherently does *not* take the atmosphere into account. The time returned should not be used in calculations; the purpose of this routine is to supply a guess to `_calcriseset`. Parameters ---------- t : `~astropy.time.Time` or other (see below) Time of observation. This will be passed in as the first argument to the `~astropy.time.Time` initializer, so it can be anything that `~astropy.time.Time` will accept (including a `~astropy.time.Time` object) target : `~astropy.coordinates.SkyCoord` Position of target or multiple positions of that target at multiple times (if target moves, like the Sun) location : `~astropy.coordinates.EarthLocation` Observatory location prev_next : str - either 'previous' or 'next' Test next rise/set or previous rise/set rise_set : str - either 'rising' or 'setting' Compute prev/next rise or prev/next set Returns ------- ret1 : `~astropy.time.Time` Time of rise/set
[ "Crude", "time", "at", "next", "rise", "/", "set", "of", "target", "using", "spherical", "trig", "." ]
train
https://github.com/HiPERCAM/hcam_widgets/blob/7219f0d96dd3a8ebe3139c7f542a72c02d02fce8/hcam_widgets/astro.py#L100-L150
HiPERCAM/hcam_widgets
hcam_widgets/astro.py
calc_riseset
def calc_riseset(t, target_name, location, prev_next, rise_set, horizon): """ Time at next rise/set of ``target``. Parameters ---------- t : `~astropy.time.Time` or other (see below) Time of observation. This will be passed in as the first argument to the `~astropy.time.Time` initializer, so it can be anything that `~astropy.time.Time` will accept (including a `~astropy.time.Time` object) target_name : str 'moon' or 'sun' location : `~astropy.coordinates.EarthLocation` Observatory location prev_next : str - either 'previous' or 'next' Test next rise/set or previous rise/set rise_set : str - either 'rising' or 'setting' Compute prev/next rise or prev/next set location : `~astropy.coordinates.EarthLocation` Location of observer horizon : `~astropy.units.Quantity` Degrees above/below actual horizon to use for calculating rise/set times (i.e., -6 deg horizon = civil twilight, etc.) Returns ------- ret1 : `~astropy.time.Time` Time of rise/set """ target = coord.get_body(target_name, t) t0 = _rise_set_trig(t, target, location, prev_next, rise_set) grid = t0 + np.linspace(-4*u.hour, 4*u.hour, 10) altaz_frame = coord.AltAz(obstime=grid, location=location) target = coord.get_body(target_name, grid) altaz = target.transform_to(altaz_frame) time_limits, altitude_limits = _horiz_cross(altaz.obstime, altaz.alt, rise_set, horizon) return _two_point_interp(time_limits, altitude_limits, horizon)
python
def calc_riseset(t, target_name, location, prev_next, rise_set, horizon): """ Time at next rise/set of ``target``. Parameters ---------- t : `~astropy.time.Time` or other (see below) Time of observation. This will be passed in as the first argument to the `~astropy.time.Time` initializer, so it can be anything that `~astropy.time.Time` will accept (including a `~astropy.time.Time` object) target_name : str 'moon' or 'sun' location : `~astropy.coordinates.EarthLocation` Observatory location prev_next : str - either 'previous' or 'next' Test next rise/set or previous rise/set rise_set : str - either 'rising' or 'setting' Compute prev/next rise or prev/next set location : `~astropy.coordinates.EarthLocation` Location of observer horizon : `~astropy.units.Quantity` Degrees above/below actual horizon to use for calculating rise/set times (i.e., -6 deg horizon = civil twilight, etc.) Returns ------- ret1 : `~astropy.time.Time` Time of rise/set """ target = coord.get_body(target_name, t) t0 = _rise_set_trig(t, target, location, prev_next, rise_set) grid = t0 + np.linspace(-4*u.hour, 4*u.hour, 10) altaz_frame = coord.AltAz(obstime=grid, location=location) target = coord.get_body(target_name, grid) altaz = target.transform_to(altaz_frame) time_limits, altitude_limits = _horiz_cross(altaz.obstime, altaz.alt, rise_set, horizon) return _two_point_interp(time_limits, altitude_limits, horizon)
[ "def", "calc_riseset", "(", "t", ",", "target_name", ",", "location", ",", "prev_next", ",", "rise_set", ",", "horizon", ")", ":", "target", "=", "coord", ".", "get_body", "(", "target_name", ",", "t", ")", "t0", "=", "_rise_set_trig", "(", "t", ",", "target", ",", "location", ",", "prev_next", ",", "rise_set", ")", "grid", "=", "t0", "+", "np", ".", "linspace", "(", "-", "4", "*", "u", ".", "hour", ",", "4", "*", "u", ".", "hour", ",", "10", ")", "altaz_frame", "=", "coord", ".", "AltAz", "(", "obstime", "=", "grid", ",", "location", "=", "location", ")", "target", "=", "coord", ".", "get_body", "(", "target_name", ",", "grid", ")", "altaz", "=", "target", ".", "transform_to", "(", "altaz_frame", ")", "time_limits", ",", "altitude_limits", "=", "_horiz_cross", "(", "altaz", ".", "obstime", ",", "altaz", ".", "alt", ",", "rise_set", ",", "horizon", ")", "return", "_two_point_interp", "(", "time_limits", ",", "altitude_limits", ",", "horizon", ")" ]
Time at next rise/set of ``target``. Parameters ---------- t : `~astropy.time.Time` or other (see below) Time of observation. This will be passed in as the first argument to the `~astropy.time.Time` initializer, so it can be anything that `~astropy.time.Time` will accept (including a `~astropy.time.Time` object) target_name : str 'moon' or 'sun' location : `~astropy.coordinates.EarthLocation` Observatory location prev_next : str - either 'previous' or 'next' Test next rise/set or previous rise/set rise_set : str - either 'rising' or 'setting' Compute prev/next rise or prev/next set location : `~astropy.coordinates.EarthLocation` Location of observer horizon : `~astropy.units.Quantity` Degrees above/below actual horizon to use for calculating rise/set times (i.e., -6 deg horizon = civil twilight, etc.) Returns ------- ret1 : `~astropy.time.Time` Time of rise/set
[ "Time", "at", "next", "rise", "/", "set", "of", "target", "." ]
train
https://github.com/HiPERCAM/hcam_widgets/blob/7219f0d96dd3a8ebe3139c7f542a72c02d02fce8/hcam_widgets/astro.py#L153-L198
HiPERCAM/hcam_widgets
hcam_widgets/astro.py
_horiz_cross
def _horiz_cross(t, alt, rise_set, horizon=0*u.degree): """ Find time ``t`` when values in array ``a`` go from negative to positive or positive to negative (exclude endpoints) ``return_limits`` will return nearest times to zero-crossing. Parameters ---------- t : `~astropy.time.Time` Grid of times alt : `~astropy.units.Quantity` Grid of altitudes rise_set : {"rising", "setting"} Calculate either rising or setting across the horizon horizon : float Number of degrees above/below actual horizon to use for calculating rise/set times (i.e., -6 deg horizon = civil twilight, etc.) Returns ------- Returns the lower and upper limits on the time and altitudes of the horizon crossing. """ if rise_set == 'rising': # Find index where altitude goes from below to above horizon condition = (alt[:-1] < horizon) * (alt[1:] > horizon) elif rise_set == 'setting': # Find index where altitude goes from above to below horizon condition = (alt[:-1] > horizon) * (alt[1:] < horizon) if np.count_nonzero(condition) == 0: warnmsg = ('Target does not cross horizon={} within ' '8 hours of trigonometric estimate'.format(horizon)) warnings.warn(warnmsg) # Fill in missing time with MAGIC_TIME time_inds = np.nan times = [np.nan, np.nan] altitudes = [np.nan, np.nan] else: time_inds = np.nonzero(condition)[0][0] times = t[time_inds:time_inds+2] altitudes = alt[time_inds:time_inds+2] return times, altitudes
python
def _horiz_cross(t, alt, rise_set, horizon=0*u.degree): """ Find time ``t`` when values in array ``a`` go from negative to positive or positive to negative (exclude endpoints) ``return_limits`` will return nearest times to zero-crossing. Parameters ---------- t : `~astropy.time.Time` Grid of times alt : `~astropy.units.Quantity` Grid of altitudes rise_set : {"rising", "setting"} Calculate either rising or setting across the horizon horizon : float Number of degrees above/below actual horizon to use for calculating rise/set times (i.e., -6 deg horizon = civil twilight, etc.) Returns ------- Returns the lower and upper limits on the time and altitudes of the horizon crossing. """ if rise_set == 'rising': # Find index where altitude goes from below to above horizon condition = (alt[:-1] < horizon) * (alt[1:] > horizon) elif rise_set == 'setting': # Find index where altitude goes from above to below horizon condition = (alt[:-1] > horizon) * (alt[1:] < horizon) if np.count_nonzero(condition) == 0: warnmsg = ('Target does not cross horizon={} within ' '8 hours of trigonometric estimate'.format(horizon)) warnings.warn(warnmsg) # Fill in missing time with MAGIC_TIME time_inds = np.nan times = [np.nan, np.nan] altitudes = [np.nan, np.nan] else: time_inds = np.nonzero(condition)[0][0] times = t[time_inds:time_inds+2] altitudes = alt[time_inds:time_inds+2] return times, altitudes
[ "def", "_horiz_cross", "(", "t", ",", "alt", ",", "rise_set", ",", "horizon", "=", "0", "*", "u", ".", "degree", ")", ":", "if", "rise_set", "==", "'rising'", ":", "# Find index where altitude goes from below to above horizon", "condition", "=", "(", "alt", "[", ":", "-", "1", "]", "<", "horizon", ")", "*", "(", "alt", "[", "1", ":", "]", ">", "horizon", ")", "elif", "rise_set", "==", "'setting'", ":", "# Find index where altitude goes from above to below horizon", "condition", "=", "(", "alt", "[", ":", "-", "1", "]", ">", "horizon", ")", "*", "(", "alt", "[", "1", ":", "]", "<", "horizon", ")", "if", "np", ".", "count_nonzero", "(", "condition", ")", "==", "0", ":", "warnmsg", "=", "(", "'Target does not cross horizon={} within '", "'8 hours of trigonometric estimate'", ".", "format", "(", "horizon", ")", ")", "warnings", ".", "warn", "(", "warnmsg", ")", "# Fill in missing time with MAGIC_TIME", "time_inds", "=", "np", ".", "nan", "times", "=", "[", "np", ".", "nan", ",", "np", ".", "nan", "]", "altitudes", "=", "[", "np", ".", "nan", ",", "np", ".", "nan", "]", "else", ":", "time_inds", "=", "np", ".", "nonzero", "(", "condition", ")", "[", "0", "]", "[", "0", "]", "times", "=", "t", "[", "time_inds", ":", "time_inds", "+", "2", "]", "altitudes", "=", "alt", "[", "time_inds", ":", "time_inds", "+", "2", "]", "return", "times", ",", "altitudes" ]
Find time ``t`` when values in array ``a`` go from negative to positive or positive to negative (exclude endpoints) ``return_limits`` will return nearest times to zero-crossing. Parameters ---------- t : `~astropy.time.Time` Grid of times alt : `~astropy.units.Quantity` Grid of altitudes rise_set : {"rising", "setting"} Calculate either rising or setting across the horizon horizon : float Number of degrees above/below actual horizon to use for calculating rise/set times (i.e., -6 deg horizon = civil twilight, etc.) Returns ------- Returns the lower and upper limits on the time and altitudes of the horizon crossing.
[ "Find", "time", "t", "when", "values", "in", "array", "a", "go", "from", "negative", "to", "positive", "or", "positive", "to", "negative", "(", "exclude", "endpoints", ")" ]
train
https://github.com/HiPERCAM/hcam_widgets/blob/7219f0d96dd3a8ebe3139c7f542a72c02d02fce8/hcam_widgets/astro.py#L202-L248
HiPERCAM/hcam_widgets
hcam_widgets/astro.py
_two_point_interp
def _two_point_interp(times, altitudes, horizon=0*u.deg): """ Do linear interpolation between two ``altitudes`` at two ``times`` to determine the time where the altitude goes through zero. Parameters ---------- times : `~astropy.time.Time` Two times for linear interpolation between altitudes : array of `~astropy.units.Quantity` Two altitudes for linear interpolation between horizon : `~astropy.units.Quantity` Solve for the time when the altitude is equal to reference_alt. Returns ------- t : `~astropy.time.Time` Time when target crosses the horizon """ if not isinstance(times, Time): return MAGIC_TIME else: slope = (altitudes[1] - altitudes[0])/(times[1].jd - times[0].jd) return Time(times[1].jd - ((altitudes[1] - horizon)/slope).value, format='jd')
python
def _two_point_interp(times, altitudes, horizon=0*u.deg): """ Do linear interpolation between two ``altitudes`` at two ``times`` to determine the time where the altitude goes through zero. Parameters ---------- times : `~astropy.time.Time` Two times for linear interpolation between altitudes : array of `~astropy.units.Quantity` Two altitudes for linear interpolation between horizon : `~astropy.units.Quantity` Solve for the time when the altitude is equal to reference_alt. Returns ------- t : `~astropy.time.Time` Time when target crosses the horizon """ if not isinstance(times, Time): return MAGIC_TIME else: slope = (altitudes[1] - altitudes[0])/(times[1].jd - times[0].jd) return Time(times[1].jd - ((altitudes[1] - horizon)/slope).value, format='jd')
[ "def", "_two_point_interp", "(", "times", ",", "altitudes", ",", "horizon", "=", "0", "*", "u", ".", "deg", ")", ":", "if", "not", "isinstance", "(", "times", ",", "Time", ")", ":", "return", "MAGIC_TIME", "else", ":", "slope", "=", "(", "altitudes", "[", "1", "]", "-", "altitudes", "[", "0", "]", ")", "/", "(", "times", "[", "1", "]", ".", "jd", "-", "times", "[", "0", "]", ".", "jd", ")", "return", "Time", "(", "times", "[", "1", "]", ".", "jd", "-", "(", "(", "altitudes", "[", "1", "]", "-", "horizon", ")", "/", "slope", ")", ".", "value", ",", "format", "=", "'jd'", ")" ]
Do linear interpolation between two ``altitudes`` at two ``times`` to determine the time where the altitude goes through zero. Parameters ---------- times : `~astropy.time.Time` Two times for linear interpolation between altitudes : array of `~astropy.units.Quantity` Two altitudes for linear interpolation between horizon : `~astropy.units.Quantity` Solve for the time when the altitude is equal to reference_alt. Returns ------- t : `~astropy.time.Time` Time when target crosses the horizon
[ "Do", "linear", "interpolation", "between", "two", "altitudes", "at", "two", "times", "to", "determine", "the", "time", "where", "the", "altitude", "goes", "through", "zero", "." ]
train
https://github.com/HiPERCAM/hcam_widgets/blob/7219f0d96dd3a8ebe3139c7f542a72c02d02fce8/hcam_widgets/astro.py#L252-L281
IceflowRE/unidown
unidown/dynamic_data.py
init_dirs
def init_dirs(main_dir: Path, logfilepath: Path): """ Initialize the main directories. :param main_dir: main directory :type main_dir: ~pathlib.Path :param logfilepath: log file :type logfilepath: ~pathlib.Path """ global MAIN_DIR, TEMP_DIR, DOWNLOAD_DIR, SAVESTAT_DIR, LOGFILE_PATH MAIN_DIR = main_dir TEMP_DIR = MAIN_DIR.joinpath(Path('temp/')) DOWNLOAD_DIR = MAIN_DIR.joinpath(Path('downloads/')) SAVESTAT_DIR = MAIN_DIR.joinpath(Path('savestates/')) LOGFILE_PATH = MAIN_DIR.joinpath(logfilepath)
python
def init_dirs(main_dir: Path, logfilepath: Path): """ Initialize the main directories. :param main_dir: main directory :type main_dir: ~pathlib.Path :param logfilepath: log file :type logfilepath: ~pathlib.Path """ global MAIN_DIR, TEMP_DIR, DOWNLOAD_DIR, SAVESTAT_DIR, LOGFILE_PATH MAIN_DIR = main_dir TEMP_DIR = MAIN_DIR.joinpath(Path('temp/')) DOWNLOAD_DIR = MAIN_DIR.joinpath(Path('downloads/')) SAVESTAT_DIR = MAIN_DIR.joinpath(Path('savestates/')) LOGFILE_PATH = MAIN_DIR.joinpath(logfilepath)
[ "def", "init_dirs", "(", "main_dir", ":", "Path", ",", "logfilepath", ":", "Path", ")", ":", "global", "MAIN_DIR", ",", "TEMP_DIR", ",", "DOWNLOAD_DIR", ",", "SAVESTAT_DIR", ",", "LOGFILE_PATH", "MAIN_DIR", "=", "main_dir", "TEMP_DIR", "=", "MAIN_DIR", ".", "joinpath", "(", "Path", "(", "'temp/'", ")", ")", "DOWNLOAD_DIR", "=", "MAIN_DIR", ".", "joinpath", "(", "Path", "(", "'downloads/'", ")", ")", "SAVESTAT_DIR", "=", "MAIN_DIR", ".", "joinpath", "(", "Path", "(", "'savestates/'", ")", ")", "LOGFILE_PATH", "=", "MAIN_DIR", ".", "joinpath", "(", "logfilepath", ")" ]
Initialize the main directories. :param main_dir: main directory :type main_dir: ~pathlib.Path :param logfilepath: log file :type logfilepath: ~pathlib.Path
[ "Initialize", "the", "main", "directories", "." ]
train
https://github.com/IceflowRE/unidown/blob/2a6f82ab780bb825668bfc55b67c11c4f72ec05c/unidown/dynamic_data.py#L37-L51
IceflowRE/unidown
unidown/dynamic_data.py
reset
def reset(): """ Reset all dynamic variables to the default values. """ global MAIN_DIR, TEMP_DIR, DOWNLOAD_DIR, SAVESTAT_DIR, LOGFILE_PATH, USING_CORES, LOG_LEVEL, DISABLE_TQDM, \ SAVE_STATE_VERSION MAIN_DIR = Path('./') TEMP_DIR = MAIN_DIR.joinpath(Path('temp/')) DOWNLOAD_DIR = MAIN_DIR.joinpath(Path('downloads/')) SAVESTAT_DIR = MAIN_DIR.joinpath(Path('savestates/')) LOGFILE_PATH = MAIN_DIR.joinpath(Path('UniDown.log')) USING_CORES = 1 LOG_LEVEL = 'INFO' DISABLE_TQDM = False SAVE_STATE_VERSION = Version('1')
python
def reset(): """ Reset all dynamic variables to the default values. """ global MAIN_DIR, TEMP_DIR, DOWNLOAD_DIR, SAVESTAT_DIR, LOGFILE_PATH, USING_CORES, LOG_LEVEL, DISABLE_TQDM, \ SAVE_STATE_VERSION MAIN_DIR = Path('./') TEMP_DIR = MAIN_DIR.joinpath(Path('temp/')) DOWNLOAD_DIR = MAIN_DIR.joinpath(Path('downloads/')) SAVESTAT_DIR = MAIN_DIR.joinpath(Path('savestates/')) LOGFILE_PATH = MAIN_DIR.joinpath(Path('UniDown.log')) USING_CORES = 1 LOG_LEVEL = 'INFO' DISABLE_TQDM = False SAVE_STATE_VERSION = Version('1')
[ "def", "reset", "(", ")", ":", "global", "MAIN_DIR", ",", "TEMP_DIR", ",", "DOWNLOAD_DIR", ",", "SAVESTAT_DIR", ",", "LOGFILE_PATH", ",", "USING_CORES", ",", "LOG_LEVEL", ",", "DISABLE_TQDM", ",", "SAVE_STATE_VERSION", "MAIN_DIR", "=", "Path", "(", "'./'", ")", "TEMP_DIR", "=", "MAIN_DIR", ".", "joinpath", "(", "Path", "(", "'temp/'", ")", ")", "DOWNLOAD_DIR", "=", "MAIN_DIR", ".", "joinpath", "(", "Path", "(", "'downloads/'", ")", ")", "SAVESTAT_DIR", "=", "MAIN_DIR", ".", "joinpath", "(", "Path", "(", "'savestates/'", ")", ")", "LOGFILE_PATH", "=", "MAIN_DIR", ".", "joinpath", "(", "Path", "(", "'UniDown.log'", ")", ")", "USING_CORES", "=", "1", "LOG_LEVEL", "=", "'INFO'", "DISABLE_TQDM", "=", "False", "SAVE_STATE_VERSION", "=", "Version", "(", "'1'", ")" ]
Reset all dynamic variables to the default values.
[ "Reset", "all", "dynamic", "variables", "to", "the", "default", "values", "." ]
train
https://github.com/IceflowRE/unidown/blob/2a6f82ab780bb825668bfc55b67c11c4f72ec05c/unidown/dynamic_data.py#L54-L70
IceflowRE/unidown
unidown/dynamic_data.py
check_dirs
def check_dirs(): """ Check the directories if they exist. :raises FileExistsError: if a file exists but is not a directory """ dirs = [MAIN_DIR, TEMP_DIR, DOWNLOAD_DIR, SAVESTAT_DIR] for directory in dirs: if directory.exists() and not directory.is_dir(): raise FileExistsError(str(directory.resolve()) + " cannot be used as a directory.")
python
def check_dirs(): """ Check the directories if they exist. :raises FileExistsError: if a file exists but is not a directory """ dirs = [MAIN_DIR, TEMP_DIR, DOWNLOAD_DIR, SAVESTAT_DIR] for directory in dirs: if directory.exists() and not directory.is_dir(): raise FileExistsError(str(directory.resolve()) + " cannot be used as a directory.")
[ "def", "check_dirs", "(", ")", ":", "dirs", "=", "[", "MAIN_DIR", ",", "TEMP_DIR", ",", "DOWNLOAD_DIR", ",", "SAVESTAT_DIR", "]", "for", "directory", "in", "dirs", ":", "if", "directory", ".", "exists", "(", ")", "and", "not", "directory", ".", "is_dir", "(", ")", ":", "raise", "FileExistsError", "(", "str", "(", "directory", ".", "resolve", "(", ")", ")", "+", "\" cannot be used as a directory.\"", ")" ]
Check the directories if they exist. :raises FileExistsError: if a file exists but is not a directory
[ "Check", "the", "directories", "if", "they", "exist", "." ]
train
https://github.com/IceflowRE/unidown/blob/2a6f82ab780bb825668bfc55b67c11c4f72ec05c/unidown/dynamic_data.py#L73-L82
HiPERCAM/hcam_widgets
hcam_widgets/gtc/headers.py
parse_hstring
def parse_hstring(hs): """ Parse a single item from the telescope server into name, value, comment. """ # split the string on = and /, also stripping whitespace and annoying quotes name, value, comment = yield_three( [val.strip().strip("'") for val in filter(None, re.split("[=/]+", hs))] ) # if comment has a slash in it, put it back together try: len(comment) except: pass else: comment = '/'.join(comment) return name, value, comment
python
def parse_hstring(hs): """ Parse a single item from the telescope server into name, value, comment. """ # split the string on = and /, also stripping whitespace and annoying quotes name, value, comment = yield_three( [val.strip().strip("'") for val in filter(None, re.split("[=/]+", hs))] ) # if comment has a slash in it, put it back together try: len(comment) except: pass else: comment = '/'.join(comment) return name, value, comment
[ "def", "parse_hstring", "(", "hs", ")", ":", "# split the string on = and /, also stripping whitespace and annoying quotes", "name", ",", "value", ",", "comment", "=", "yield_three", "(", "[", "val", ".", "strip", "(", ")", ".", "strip", "(", "\"'\"", ")", "for", "val", "in", "filter", "(", "None", ",", "re", ".", "split", "(", "\"[=/]+\"", ",", "hs", ")", ")", "]", ")", "# if comment has a slash in it, put it back together", "try", ":", "len", "(", "comment", ")", "except", ":", "pass", "else", ":", "comment", "=", "'/'", ".", "join", "(", "comment", ")", "return", "name", ",", "value", ",", "comment" ]
Parse a single item from the telescope server into name, value, comment.
[ "Parse", "a", "single", "item", "from", "the", "telescope", "server", "into", "name", "value", "comment", "." ]
train
https://github.com/HiPERCAM/hcam_widgets/blob/7219f0d96dd3a8ebe3139c7f542a72c02d02fce8/hcam_widgets/gtc/headers.py#L27-L43
HiPERCAM/hcam_widgets
hcam_widgets/gtc/headers.py
create_header_from_telpars
def create_header_from_telpars(telpars): """ Create a list of fits header items from GTC telescope pars. The GTC telescope server gives a list of string describing FITS header items such as RA, DEC, etc. Arguments --------- telpars : list list returned by server call to getTelescopeParams """ # pars is a list of strings describing tel info in FITS # style, each entry in the list is a different class of # thing (weather, telescope, instrument etc). # first, we munge it into a single list of strings, each one # describing a single item whilst also stripping whitespace pars = [val.strip() for val in (';').join(telpars).split(';') if val.strip() != ''] # apply parse_hstring to everything in pars with warnings.catch_warnings(): warnings.simplefilter('ignore', fits.verify.VerifyWarning) hdr = fits.Header(map(parse_hstring, pars)) return hdr
python
def create_header_from_telpars(telpars): """ Create a list of fits header items from GTC telescope pars. The GTC telescope server gives a list of string describing FITS header items such as RA, DEC, etc. Arguments --------- telpars : list list returned by server call to getTelescopeParams """ # pars is a list of strings describing tel info in FITS # style, each entry in the list is a different class of # thing (weather, telescope, instrument etc). # first, we munge it into a single list of strings, each one # describing a single item whilst also stripping whitespace pars = [val.strip() for val in (';').join(telpars).split(';') if val.strip() != ''] # apply parse_hstring to everything in pars with warnings.catch_warnings(): warnings.simplefilter('ignore', fits.verify.VerifyWarning) hdr = fits.Header(map(parse_hstring, pars)) return hdr
[ "def", "create_header_from_telpars", "(", "telpars", ")", ":", "# pars is a list of strings describing tel info in FITS", "# style, each entry in the list is a different class of", "# thing (weather, telescope, instrument etc).", "# first, we munge it into a single list of strings, each one", "# describing a single item whilst also stripping whitespace", "pars", "=", "[", "val", ".", "strip", "(", ")", "for", "val", "in", "(", "';'", ")", ".", "join", "(", "telpars", ")", ".", "split", "(", "';'", ")", "if", "val", ".", "strip", "(", ")", "!=", "''", "]", "# apply parse_hstring to everything in pars", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "'ignore'", ",", "fits", ".", "verify", ".", "VerifyWarning", ")", "hdr", "=", "fits", ".", "Header", "(", "map", "(", "parse_hstring", ",", "pars", ")", ")", "return", "hdr" ]
Create a list of fits header items from GTC telescope pars. The GTC telescope server gives a list of string describing FITS header items such as RA, DEC, etc. Arguments --------- telpars : list list returned by server call to getTelescopeParams
[ "Create", "a", "list", "of", "fits", "header", "items", "from", "GTC", "telescope", "pars", "." ]
train
https://github.com/HiPERCAM/hcam_widgets/blob/7219f0d96dd3a8ebe3139c7f542a72c02d02fce8/hcam_widgets/gtc/headers.py#L46-L72
HiPERCAM/hcam_widgets
hcam_widgets/gtc/headers.py
add_gtc_header_table_row
def add_gtc_header_table_row(t, telpars): """ Add a row with current values to GTC table Arguments --------- t : `~astropy.table.Table` The table to append row to telpars : list list returned by server call to getTelescopeParams """ now = Time.now().mjd hdr = create_header_from_telpars(telpars) # make dictionary of vals to put in table vals = {k: v for k, v in hdr.items() if k in VARIABLE_GTC_KEYS} vals['MJD'] = now # store LST as hourangle vals['LST'] = Longitude(vals['LST'], unit=u.hour).hourangle t.add_row(vals)
python
def add_gtc_header_table_row(t, telpars): """ Add a row with current values to GTC table Arguments --------- t : `~astropy.table.Table` The table to append row to telpars : list list returned by server call to getTelescopeParams """ now = Time.now().mjd hdr = create_header_from_telpars(telpars) # make dictionary of vals to put in table vals = {k: v for k, v in hdr.items() if k in VARIABLE_GTC_KEYS} vals['MJD'] = now # store LST as hourangle vals['LST'] = Longitude(vals['LST'], unit=u.hour).hourangle t.add_row(vals)
[ "def", "add_gtc_header_table_row", "(", "t", ",", "telpars", ")", ":", "now", "=", "Time", ".", "now", "(", ")", ".", "mjd", "hdr", "=", "create_header_from_telpars", "(", "telpars", ")", "# make dictionary of vals to put in table", "vals", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "hdr", ".", "items", "(", ")", "if", "k", "in", "VARIABLE_GTC_KEYS", "}", "vals", "[", "'MJD'", "]", "=", "now", "# store LST as hourangle", "vals", "[", "'LST'", "]", "=", "Longitude", "(", "vals", "[", "'LST'", "]", ",", "unit", "=", "u", ".", "hour", ")", ".", "hourangle", "t", ".", "add_row", "(", "vals", ")" ]
Add a row with current values to GTC table Arguments --------- t : `~astropy.table.Table` The table to append row to telpars : list list returned by server call to getTelescopeParams
[ "Add", "a", "row", "with", "current", "values", "to", "GTC", "table" ]
train
https://github.com/HiPERCAM/hcam_widgets/blob/7219f0d96dd3a8ebe3139c7f542a72c02d02fce8/hcam_widgets/gtc/headers.py#L80-L99
IceflowRE/unidown
unidown/plugin/plugin_info.py
PluginInfo.from_protobuf
def from_protobuf(cls, proto: PluginInfoProto) -> PluginInfo: """ Constructor from protobuf. :param proto: protobuf structure :type proto: ~unidown.plugin.protobuf.plugin_info_pb2.PluginInfoProto :return: the PluginInfo :rtype: ~unidown.plugin.plugin_info.PluginInfo :raises ValueError: name of PluginInfo does not exist or is empty inside the protobuf :raises ValueError: version of PluginInfo does not exist or is empty inside the protobuf :raises ValueError: host of PluginInfo does not exist or is empty inside the protobuf """ if proto.name == "": raise ValueError("name of PluginInfo does not exist or is empty inside the protobuf.") elif proto.version == "": raise ValueError("version of PluginInfo does not exist or is empty inside the protobuf.") elif proto.host == "": raise ValueError("host of PluginInfo does not exist or is empty inside the protobuf.") return cls(proto.name, proto.version, proto.host)
python
def from_protobuf(cls, proto: PluginInfoProto) -> PluginInfo: """ Constructor from protobuf. :param proto: protobuf structure :type proto: ~unidown.plugin.protobuf.plugin_info_pb2.PluginInfoProto :return: the PluginInfo :rtype: ~unidown.plugin.plugin_info.PluginInfo :raises ValueError: name of PluginInfo does not exist or is empty inside the protobuf :raises ValueError: version of PluginInfo does not exist or is empty inside the protobuf :raises ValueError: host of PluginInfo does not exist or is empty inside the protobuf """ if proto.name == "": raise ValueError("name of PluginInfo does not exist or is empty inside the protobuf.") elif proto.version == "": raise ValueError("version of PluginInfo does not exist or is empty inside the protobuf.") elif proto.host == "": raise ValueError("host of PluginInfo does not exist or is empty inside the protobuf.") return cls(proto.name, proto.version, proto.host)
[ "def", "from_protobuf", "(", "cls", ",", "proto", ":", "PluginInfoProto", ")", "->", "PluginInfo", ":", "if", "proto", ".", "name", "==", "\"\"", ":", "raise", "ValueError", "(", "\"name of PluginInfo does not exist or is empty inside the protobuf.\"", ")", "elif", "proto", ".", "version", "==", "\"\"", ":", "raise", "ValueError", "(", "\"version of PluginInfo does not exist or is empty inside the protobuf.\"", ")", "elif", "proto", ".", "host", "==", "\"\"", ":", "raise", "ValueError", "(", "\"host of PluginInfo does not exist or is empty inside the protobuf.\"", ")", "return", "cls", "(", "proto", ".", "name", ",", "proto", ".", "version", ",", "proto", ".", "host", ")" ]
Constructor from protobuf. :param proto: protobuf structure :type proto: ~unidown.plugin.protobuf.plugin_info_pb2.PluginInfoProto :return: the PluginInfo :rtype: ~unidown.plugin.plugin_info.PluginInfo :raises ValueError: name of PluginInfo does not exist or is empty inside the protobuf :raises ValueError: version of PluginInfo does not exist or is empty inside the protobuf :raises ValueError: host of PluginInfo does not exist or is empty inside the protobuf
[ "Constructor", "from", "protobuf", "." ]
train
https://github.com/IceflowRE/unidown/blob/2a6f82ab780bb825668bfc55b67c11c4f72ec05c/unidown/plugin/plugin_info.py#L48-L66
IceflowRE/unidown
unidown/plugin/plugin_info.py
PluginInfo.to_protobuf
def to_protobuf(self) -> PluginInfoProto: """ Create protobuf item. :return: protobuf structure :rtype: ~unidown.plugin.protobuf.link_item_pb2.PluginInfoProto """ proto = PluginInfoProto() proto.name = self.name proto.version = str(self.version) proto.host = self.host return proto
python
def to_protobuf(self) -> PluginInfoProto: """ Create protobuf item. :return: protobuf structure :rtype: ~unidown.plugin.protobuf.link_item_pb2.PluginInfoProto """ proto = PluginInfoProto() proto.name = self.name proto.version = str(self.version) proto.host = self.host return proto
[ "def", "to_protobuf", "(", "self", ")", "->", "PluginInfoProto", ":", "proto", "=", "PluginInfoProto", "(", ")", "proto", ".", "name", "=", "self", ".", "name", "proto", ".", "version", "=", "str", "(", "self", ".", "version", ")", "proto", ".", "host", "=", "self", ".", "host", "return", "proto" ]
Create protobuf item. :return: protobuf structure :rtype: ~unidown.plugin.protobuf.link_item_pb2.PluginInfoProto
[ "Create", "protobuf", "item", "." ]
train
https://github.com/IceflowRE/unidown/blob/2a6f82ab780bb825668bfc55b67c11c4f72ec05c/unidown/plugin/plugin_info.py#L79-L90
IceflowRE/unidown
unidown/plugin/save_state.py
SaveState.from_protobuf
def from_protobuf(cls, proto: SaveStateProto) -> SaveState: """ Constructor from protobuf. Can raise ValueErrors from called from_protobuf() parsers. :param proto: protobuf structure :type proto: ~unidown.plugin.protobuf.save_state_pb2.SaveStateProto :return: the SaveState :rtype: ~unidown.plugin.save_state.SaveState :raises ValueError: version of SaveState does not exist or is empty inside the protobuf :raises ~packaging.version.InvalidVersion: version is not PEP440 conform """ data_dict = {} for key, link_item in proto.data.items(): data_dict[key] = LinkItem.from_protobuf(link_item) if proto.version == "": raise ValueError("version of SaveState does not exist or is empty inside the protobuf.") try: version = Version(proto.version) except InvalidVersion: raise InvalidVersion(f"Plugin version is not PEP440 conform: {proto.version}") return cls(version, PluginInfo.from_protobuf(proto.plugin_info), Timestamp.ToDatetime(proto.last_update), data_dict)
python
def from_protobuf(cls, proto: SaveStateProto) -> SaveState: """ Constructor from protobuf. Can raise ValueErrors from called from_protobuf() parsers. :param proto: protobuf structure :type proto: ~unidown.plugin.protobuf.save_state_pb2.SaveStateProto :return: the SaveState :rtype: ~unidown.plugin.save_state.SaveState :raises ValueError: version of SaveState does not exist or is empty inside the protobuf :raises ~packaging.version.InvalidVersion: version is not PEP440 conform """ data_dict = {} for key, link_item in proto.data.items(): data_dict[key] = LinkItem.from_protobuf(link_item) if proto.version == "": raise ValueError("version of SaveState does not exist or is empty inside the protobuf.") try: version = Version(proto.version) except InvalidVersion: raise InvalidVersion(f"Plugin version is not PEP440 conform: {proto.version}") return cls(version, PluginInfo.from_protobuf(proto.plugin_info), Timestamp.ToDatetime(proto.last_update), data_dict)
[ "def", "from_protobuf", "(", "cls", ",", "proto", ":", "SaveStateProto", ")", "->", "SaveState", ":", "data_dict", "=", "{", "}", "for", "key", ",", "link_item", "in", "proto", ".", "data", ".", "items", "(", ")", ":", "data_dict", "[", "key", "]", "=", "LinkItem", ".", "from_protobuf", "(", "link_item", ")", "if", "proto", ".", "version", "==", "\"\"", ":", "raise", "ValueError", "(", "\"version of SaveState does not exist or is empty inside the protobuf.\"", ")", "try", ":", "version", "=", "Version", "(", "proto", ".", "version", ")", "except", "InvalidVersion", ":", "raise", "InvalidVersion", "(", "f\"Plugin version is not PEP440 conform: {proto.version}\"", ")", "return", "cls", "(", "version", ",", "PluginInfo", ".", "from_protobuf", "(", "proto", ".", "plugin_info", ")", ",", "Timestamp", ".", "ToDatetime", "(", "proto", ".", "last_update", ")", ",", "data_dict", ")" ]
Constructor from protobuf. Can raise ValueErrors from called from_protobuf() parsers. :param proto: protobuf structure :type proto: ~unidown.plugin.protobuf.save_state_pb2.SaveStateProto :return: the SaveState :rtype: ~unidown.plugin.save_state.SaveState :raises ValueError: version of SaveState does not exist or is empty inside the protobuf :raises ~packaging.version.InvalidVersion: version is not PEP440 conform
[ "Constructor", "from", "protobuf", ".", "Can", "raise", "ValueErrors", "from", "called", "from_protobuf", "()", "parsers", "." ]
train
https://github.com/IceflowRE/unidown/blob/2a6f82ab780bb825668bfc55b67c11c4f72ec05c/unidown/plugin/save_state.py#L55-L76
IceflowRE/unidown
unidown/plugin/save_state.py
SaveState.to_protobuf
def to_protobuf(self) -> SaveStateProto: """ Create protobuf item. :return: protobuf structure :rtype: ~unidown.plugin.protobuf.save_state_pb2.SaveStateProto """ result = SaveStateProto() result.version = str(self.version) result.last_update.CopyFrom(datetime_to_timestamp(self.last_update)) result.plugin_info.CopyFrom(self.plugin_info.to_protobuf()) for key, link_item in self.link_item_dict.items(): result.data[key].CopyFrom(link_item.to_protobuf()) return result
python
def to_protobuf(self) -> SaveStateProto: """ Create protobuf item. :return: protobuf structure :rtype: ~unidown.plugin.protobuf.save_state_pb2.SaveStateProto """ result = SaveStateProto() result.version = str(self.version) result.last_update.CopyFrom(datetime_to_timestamp(self.last_update)) result.plugin_info.CopyFrom(self.plugin_info.to_protobuf()) for key, link_item in self.link_item_dict.items(): result.data[key].CopyFrom(link_item.to_protobuf()) return result
[ "def", "to_protobuf", "(", "self", ")", "->", "SaveStateProto", ":", "result", "=", "SaveStateProto", "(", ")", "result", ".", "version", "=", "str", "(", "self", ".", "version", ")", "result", ".", "last_update", ".", "CopyFrom", "(", "datetime_to_timestamp", "(", "self", ".", "last_update", ")", ")", "result", ".", "plugin_info", ".", "CopyFrom", "(", "self", ".", "plugin_info", ".", "to_protobuf", "(", ")", ")", "for", "key", ",", "link_item", "in", "self", ".", "link_item_dict", ".", "items", "(", ")", ":", "result", ".", "data", "[", "key", "]", ".", "CopyFrom", "(", "link_item", ".", "to_protobuf", "(", ")", ")", "return", "result" ]
Create protobuf item. :return: protobuf structure :rtype: ~unidown.plugin.protobuf.save_state_pb2.SaveStateProto
[ "Create", "protobuf", "item", "." ]
train
https://github.com/IceflowRE/unidown/blob/2a6f82ab780bb825668bfc55b67c11c4f72ec05c/unidown/plugin/save_state.py#L78-L91
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/inflect.py
definite_article
def definite_article(word, gender=MALE, role=SUBJECT): """ Returns the definite article (der/die/das/die) for a given word. """ return article_definite.get((gender[:1].lower(), role[:3].lower()))
python
def definite_article(word, gender=MALE, role=SUBJECT): """ Returns the definite article (der/die/das/die) for a given word. """ return article_definite.get((gender[:1].lower(), role[:3].lower()))
[ "def", "definite_article", "(", "word", ",", "gender", "=", "MALE", ",", "role", "=", "SUBJECT", ")", ":", "return", "article_definite", ".", "get", "(", "(", "gender", "[", ":", "1", "]", ".", "lower", "(", ")", ",", "role", "[", ":", "3", "]", ".", "lower", "(", ")", ")", ")" ]
Returns the definite article (der/die/das/die) for a given word.
[ "Returns", "the", "definite", "article", "(", "der", "/", "die", "/", "das", "/", "die", ")", "for", "a", "given", "word", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/inflect.py#L87-L90
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/inflect.py
indefinite_article
def indefinite_article(word, gender=MALE, role=SUBJECT): """ Returns the indefinite article (ein) for a given word. """ return article_indefinite.get((gender[:1].lower(), role[:3].lower()))
python
def indefinite_article(word, gender=MALE, role=SUBJECT): """ Returns the indefinite article (ein) for a given word. """ return article_indefinite.get((gender[:1].lower(), role[:3].lower()))
[ "def", "indefinite_article", "(", "word", ",", "gender", "=", "MALE", ",", "role", "=", "SUBJECT", ")", ":", "return", "article_indefinite", ".", "get", "(", "(", "gender", "[", ":", "1", "]", ".", "lower", "(", ")", ",", "role", "[", ":", "3", "]", ".", "lower", "(", ")", ")", ")" ]
Returns the indefinite article (ein) for a given word.
[ "Returns", "the", "indefinite", "article", "(", "ein", ")", "for", "a", "given", "word", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/inflect.py#L92-L95
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/inflect.py
article
def article(word, function=INDEFINITE, gender=MALE, role=SUBJECT): """ Returns the indefinite (ein) or definite (der/die/das/die) article for the given word. """ return function == DEFINITE \ and definite_article(word, gender, role) \ or indefinite_article(word, gender, role)
python
def article(word, function=INDEFINITE, gender=MALE, role=SUBJECT): """ Returns the indefinite (ein) or definite (der/die/das/die) article for the given word. """ return function == DEFINITE \ and definite_article(word, gender, role) \ or indefinite_article(word, gender, role)
[ "def", "article", "(", "word", ",", "function", "=", "INDEFINITE", ",", "gender", "=", "MALE", ",", "role", "=", "SUBJECT", ")", ":", "return", "function", "==", "DEFINITE", "and", "definite_article", "(", "word", ",", "gender", ",", "role", ")", "or", "indefinite_article", "(", "word", ",", "gender", ",", "role", ")" ]
Returns the indefinite (ein) or definite (der/die/das/die) article for the given word.
[ "Returns", "the", "indefinite", "(", "ein", ")", "or", "definite", "(", "der", "/", "die", "/", "das", "/", "die", ")", "article", "for", "the", "given", "word", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/inflect.py#L100-L105
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/inflect.py
referenced
def referenced(word, article=INDEFINITE, gender=MALE, role=SUBJECT): """ Returns a string with the article + the word. """ return "%s %s" % (_article(word, article, gender, role), word)
python
def referenced(word, article=INDEFINITE, gender=MALE, role=SUBJECT): """ Returns a string with the article + the word. """ return "%s %s" % (_article(word, article, gender, role), word)
[ "def", "referenced", "(", "word", ",", "article", "=", "INDEFINITE", ",", "gender", "=", "MALE", ",", "role", "=", "SUBJECT", ")", ":", "return", "\"%s %s\"", "%", "(", "_article", "(", "word", ",", "article", ",", "gender", ",", "role", ")", ",", "word", ")" ]
Returns a string with the article + the word.
[ "Returns", "a", "string", "with", "the", "article", "+", "the", "word", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/inflect.py#L108-L111
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/inflect.py
gender
def gender(word, pos=NOUN): """ Returns the gender (MALE, FEMALE or NEUTRAL) for nouns (majority vote). Returns None for words that are not nouns. """ w = word.lower() if pos == NOUN: # Default rules (baseline = 32%). if w.endswith(gender_masculine): return MASCULINE if w.endswith(gender_feminine): return FEMININE if w.endswith(gender_neuter): return NEUTER # Majority vote. for g in gender_majority_vote: if w.endswith(gender_majority_vote[g]): return g
python
def gender(word, pos=NOUN): """ Returns the gender (MALE, FEMALE or NEUTRAL) for nouns (majority vote). Returns None for words that are not nouns. """ w = word.lower() if pos == NOUN: # Default rules (baseline = 32%). if w.endswith(gender_masculine): return MASCULINE if w.endswith(gender_feminine): return FEMININE if w.endswith(gender_neuter): return NEUTER # Majority vote. for g in gender_majority_vote: if w.endswith(gender_majority_vote[g]): return g
[ "def", "gender", "(", "word", ",", "pos", "=", "NOUN", ")", ":", "w", "=", "word", ".", "lower", "(", ")", "if", "pos", "==", "NOUN", ":", "# Default rules (baseline = 32%).", "if", "w", ".", "endswith", "(", "gender_masculine", ")", ":", "return", "MASCULINE", "if", "w", ".", "endswith", "(", "gender_feminine", ")", ":", "return", "FEMININE", "if", "w", ".", "endswith", "(", "gender_neuter", ")", ":", "return", "NEUTER", "# Majority vote.", "for", "g", "in", "gender_majority_vote", ":", "if", "w", ".", "endswith", "(", "gender_majority_vote", "[", "g", "]", ")", ":", "return", "g" ]
Returns the gender (MALE, FEMALE or NEUTRAL) for nouns (majority vote). Returns None for words that are not nouns.
[ "Returns", "the", "gender", "(", "MALE", "FEMALE", "or", "NEUTRAL", ")", "for", "nouns", "(", "majority", "vote", ")", ".", "Returns", "None", "for", "words", "that", "are", "not", "nouns", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/inflect.py#L147-L163
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/inflect.py
pluralize
def pluralize(word, pos=NOUN, gender=MALE, role=SUBJECT, custom={}): """ Returns the plural of a given word. The inflection is based on probability rather than gender and role. """ w = word.lower().capitalize() if word in custom: return custom[word] if pos == NOUN: for a, b in plural_inflections: if w.endswith(a): return w[:-len(a)] + b # Default rules (baseline = 69%). if w.startswith("ge"): return w if w.endswith("gie"): return w if w.endswith("e"): return w + "n" if w.endswith("ien"): return w[:-2] + "um" if w.endswith(("au", "ein", "eit", "er", "en", "el", "chen", "mus", u"tät", "tik", "tum", "u")): return w if w.endswith(("ant", "ei", "enz", "ion", "ist", "or", "schaft", "tur", "ung")): return w + "en" if w.endswith("in"): return w + "nen" if w.endswith("nis"): return w + "se" if w.endswith(("eld", "ild", "ind")): return w + "er" if w.endswith("o"): return w + "s" if w.endswith("a"): return w[:-1] + "en" # Inflect common umlaut vowels: Kopf => Köpfe. if w.endswith(("all", "and", "ang", "ank", "atz", "auf", "ock", "opf", "uch", "uss")): umlaut = w[-3] umlaut = umlaut.replace("a", u"ä") umlaut = umlaut.replace("o", u"ö") umlaut = umlaut.replace("u", u"ü") return w[:-3] + umlaut + w[-2:] + "e" for a, b in ( ("ag", u"äge"), ("ann", u"änner"), ("aum", u"äume"), ("aus", u"äuser"), ("zug", u"züge")): if w.endswith(a): return w[:-len(a)] + b return w + "e" return w
python
def pluralize(word, pos=NOUN, gender=MALE, role=SUBJECT, custom={}): """ Returns the plural of a given word. The inflection is based on probability rather than gender and role. """ w = word.lower().capitalize() if word in custom: return custom[word] if pos == NOUN: for a, b in plural_inflections: if w.endswith(a): return w[:-len(a)] + b # Default rules (baseline = 69%). if w.startswith("ge"): return w if w.endswith("gie"): return w if w.endswith("e"): return w + "n" if w.endswith("ien"): return w[:-2] + "um" if w.endswith(("au", "ein", "eit", "er", "en", "el", "chen", "mus", u"tät", "tik", "tum", "u")): return w if w.endswith(("ant", "ei", "enz", "ion", "ist", "or", "schaft", "tur", "ung")): return w + "en" if w.endswith("in"): return w + "nen" if w.endswith("nis"): return w + "se" if w.endswith(("eld", "ild", "ind")): return w + "er" if w.endswith("o"): return w + "s" if w.endswith("a"): return w[:-1] + "en" # Inflect common umlaut vowels: Kopf => Köpfe. if w.endswith(("all", "and", "ang", "ank", "atz", "auf", "ock", "opf", "uch", "uss")): umlaut = w[-3] umlaut = umlaut.replace("a", u"ä") umlaut = umlaut.replace("o", u"ö") umlaut = umlaut.replace("u", u"ü") return w[:-3] + umlaut + w[-2:] + "e" for a, b in ( ("ag", u"äge"), ("ann", u"änner"), ("aum", u"äume"), ("aus", u"äuser"), ("zug", u"züge")): if w.endswith(a): return w[:-len(a)] + b return w + "e" return w
[ "def", "pluralize", "(", "word", ",", "pos", "=", "NOUN", ",", "gender", "=", "MALE", ",", "role", "=", "SUBJECT", ",", "custom", "=", "{", "}", ")", ":", "w", "=", "word", ".", "lower", "(", ")", ".", "capitalize", "(", ")", "if", "word", "in", "custom", ":", "return", "custom", "[", "word", "]", "if", "pos", "==", "NOUN", ":", "for", "a", ",", "b", "in", "plural_inflections", ":", "if", "w", ".", "endswith", "(", "a", ")", ":", "return", "w", "[", ":", "-", "len", "(", "a", ")", "]", "+", "b", "# Default rules (baseline = 69%).", "if", "w", ".", "startswith", "(", "\"ge\"", ")", ":", "return", "w", "if", "w", ".", "endswith", "(", "\"gie\"", ")", ":", "return", "w", "if", "w", ".", "endswith", "(", "\"e\"", ")", ":", "return", "w", "+", "\"n\"", "if", "w", ".", "endswith", "(", "\"ien\"", ")", ":", "return", "w", "[", ":", "-", "2", "]", "+", "\"um\"", "if", "w", ".", "endswith", "(", "(", "\"au\"", ",", "\"ein\"", ",", "\"eit\"", ",", "\"er\"", ",", "\"en\"", ",", "\"el\"", ",", "\"chen\"", ",", "\"mus\"", ",", "u\"tät\",", " ", "tik\",", " ", "tum\",", " ", "u\")", ")", ":", "", "return", "w", "if", "w", ".", "endswith", "(", "(", "\"ant\"", ",", "\"ei\"", ",", "\"enz\"", ",", "\"ion\"", ",", "\"ist\"", ",", "\"or\"", ",", "\"schaft\"", ",", "\"tur\"", ",", "\"ung\"", ")", ")", ":", "return", "w", "+", "\"en\"", "if", "w", ".", "endswith", "(", "\"in\"", ")", ":", "return", "w", "+", "\"nen\"", "if", "w", ".", "endswith", "(", "\"nis\"", ")", ":", "return", "w", "+", "\"se\"", "if", "w", ".", "endswith", "(", "(", "\"eld\"", ",", "\"ild\"", ",", "\"ind\"", ")", ")", ":", "return", "w", "+", "\"er\"", "if", "w", ".", "endswith", "(", "\"o\"", ")", ":", "return", "w", "+", "\"s\"", "if", "w", ".", "endswith", "(", "\"a\"", ")", ":", "return", "w", "[", ":", "-", "1", "]", "+", "\"en\"", "# Inflect common umlaut vowels: Kopf => Köpfe.", "if", "w", ".", "endswith", "(", "(", "\"all\"", ",", "\"and\"", ",", "\"ang\"", ",", "\"ank\"", ",", "\"atz\"", ",", "\"auf\"", ",", "\"ock\"", ",", "\"opf\"", ",", "\"uch\"", ",", "\"uss\"", ")", ")", ":", "umlaut", "=", "w", "[", "-", "3", "]", "umlaut", "=", "umlaut", ".", "replace", "(", "\"a\"", ",", "u\"ä\")", "", "umlaut", "=", "umlaut", ".", "replace", "(", "\"o\"", ",", "u\"ö\")", "", "umlaut", "=", "umlaut", ".", "replace", "(", "\"u\"", ",", "u\"ü\")", "", "return", "w", "[", ":", "-", "3", "]", "+", "umlaut", "+", "w", "[", "-", "2", ":", "]", "+", "\"e\"", "for", "a", ",", "b", "in", "(", "(", "\"ag\"", ",", "u\"äge\")", ",", " ", "(", "\"ann\"", ",", "u\"änner\")", ",", " ", "(", "\"aum\"", ",", "u\"äume\")", ",", " ", "(", "\"aus\"", ",", "u\"äuser\")", ",", " ", "(", "\"zug\"", ",", "u\"züge\")", ")", ":", "", "if", "w", ".", "endswith", "(", "a", ")", ":", "return", "w", "[", ":", "-", "len", "(", "a", ")", "]", "+", "b", "return", "w", "+", "\"e\"", "return", "w" ]
Returns the plural of a given word. The inflection is based on probability rather than gender and role.
[ "Returns", "the", "plural", "of", "a", "given", "word", ".", "The", "inflection", "is", "based", "on", "probability", "rather", "than", "gender", "and", "role", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/inflect.py#L212-L262
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/inflect.py
singularize
def singularize(word, pos=NOUN, gender=MALE, role=SUBJECT, custom={}): """ Returns the singular of a given word. The inflection is based on probability rather than gender and role. """ w = word.lower().capitalize() if word in custom: return custom[word] if word in singular: return singular[word] if pos == NOUN: for a, b in singular_inflections: if w.endswith(a): return w[:-len(a)] + b # Default rule: strip known plural suffixes (baseline = 51%). for suffix in ("nen", "en", "n", "e", "er", "s"): if w.endswith(suffix): w = w[:-len(suffix)] break # Corrections (these add about 1% accuracy): if w.endswith(("rr", "rv", "nz")): return w + "e" return w return w
python
def singularize(word, pos=NOUN, gender=MALE, role=SUBJECT, custom={}): """ Returns the singular of a given word. The inflection is based on probability rather than gender and role. """ w = word.lower().capitalize() if word in custom: return custom[word] if word in singular: return singular[word] if pos == NOUN: for a, b in singular_inflections: if w.endswith(a): return w[:-len(a)] + b # Default rule: strip known plural suffixes (baseline = 51%). for suffix in ("nen", "en", "n", "e", "er", "s"): if w.endswith(suffix): w = w[:-len(suffix)] break # Corrections (these add about 1% accuracy): if w.endswith(("rr", "rv", "nz")): return w + "e" return w return w
[ "def", "singularize", "(", "word", ",", "pos", "=", "NOUN", ",", "gender", "=", "MALE", ",", "role", "=", "SUBJECT", ",", "custom", "=", "{", "}", ")", ":", "w", "=", "word", ".", "lower", "(", ")", ".", "capitalize", "(", ")", "if", "word", "in", "custom", ":", "return", "custom", "[", "word", "]", "if", "word", "in", "singular", ":", "return", "singular", "[", "word", "]", "if", "pos", "==", "NOUN", ":", "for", "a", ",", "b", "in", "singular_inflections", ":", "if", "w", ".", "endswith", "(", "a", ")", ":", "return", "w", "[", ":", "-", "len", "(", "a", ")", "]", "+", "b", "# Default rule: strip known plural suffixes (baseline = 51%).", "for", "suffix", "in", "(", "\"nen\"", ",", "\"en\"", ",", "\"n\"", ",", "\"e\"", ",", "\"er\"", ",", "\"s\"", ")", ":", "if", "w", ".", "endswith", "(", "suffix", ")", ":", "w", "=", "w", "[", ":", "-", "len", "(", "suffix", ")", "]", "break", "# Corrections (these add about 1% accuracy):", "if", "w", ".", "endswith", "(", "(", "\"rr\"", ",", "\"rv\"", ",", "\"nz\"", ")", ")", ":", "return", "w", "+", "\"e\"", "return", "w", "return", "w" ]
Returns the singular of a given word. The inflection is based on probability rather than gender and role.
[ "Returns", "the", "singular", "of", "a", "given", "word", ".", "The", "inflection", "is", "based", "on", "probability", "rather", "than", "gender", "and", "role", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/inflect.py#L317-L339
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/inflect.py
attributive
def attributive(adjective, gender=MALE, role=SUBJECT, article=None): """ For a predicative adjective, returns the attributive form (lowercase). In German, the attributive is formed with -e, -em, -en, -er or -es, depending on gender (masculine, feminine, neuter or plural) and role (nominative, accusative, dative, genitive). """ w, g, c, a = \ adjective.lower(), gender[:1].lower(), role[:3].lower(), article and article.lower() or None if w in adjective_attributive: return adjective_attributive[w] if a is None \ or a in ("mir", "dir", "ihm") \ or a in ("ein", "etwas", "mehr") \ or a.startswith(("all", "mehrer", "wenig", "viel")): return w + adjectives_strong.get((g, c), "") if a.startswith(("ein", "kein")) \ or a.startswith(("mein", "dein", "sein", "ihr", "Ihr", "unser", "euer")): return w + adjectives_mixed.get((g, c), "") if a in ("arm", "alt", "all", "der", "die", "das", "den", "dem", "des") \ or a.startswith(( "derselb", "derjenig", "jed", "jeglich", "jen", "manch", "dies", "solch", "welch")): return w + adjectives_weak.get((g, c), "") # Default to strong inflection. return w + adjectives_strong.get((g, c), "")
python
def attributive(adjective, gender=MALE, role=SUBJECT, article=None): """ For a predicative adjective, returns the attributive form (lowercase). In German, the attributive is formed with -e, -em, -en, -er or -es, depending on gender (masculine, feminine, neuter or plural) and role (nominative, accusative, dative, genitive). """ w, g, c, a = \ adjective.lower(), gender[:1].lower(), role[:3].lower(), article and article.lower() or None if w in adjective_attributive: return adjective_attributive[w] if a is None \ or a in ("mir", "dir", "ihm") \ or a in ("ein", "etwas", "mehr") \ or a.startswith(("all", "mehrer", "wenig", "viel")): return w + adjectives_strong.get((g, c), "") if a.startswith(("ein", "kein")) \ or a.startswith(("mein", "dein", "sein", "ihr", "Ihr", "unser", "euer")): return w + adjectives_mixed.get((g, c), "") if a in ("arm", "alt", "all", "der", "die", "das", "den", "dem", "des") \ or a.startswith(( "derselb", "derjenig", "jed", "jeglich", "jen", "manch", "dies", "solch", "welch")): return w + adjectives_weak.get((g, c), "") # Default to strong inflection. return w + adjectives_strong.get((g, c), "")
[ "def", "attributive", "(", "adjective", ",", "gender", "=", "MALE", ",", "role", "=", "SUBJECT", ",", "article", "=", "None", ")", ":", "w", ",", "g", ",", "c", ",", "a", "=", "adjective", ".", "lower", "(", ")", ",", "gender", "[", ":", "1", "]", ".", "lower", "(", ")", ",", "role", "[", ":", "3", "]", ".", "lower", "(", ")", ",", "article", "and", "article", ".", "lower", "(", ")", "or", "None", "if", "w", "in", "adjective_attributive", ":", "return", "adjective_attributive", "[", "w", "]", "if", "a", "is", "None", "or", "a", "in", "(", "\"mir\"", ",", "\"dir\"", ",", "\"ihm\"", ")", "or", "a", "in", "(", "\"ein\"", ",", "\"etwas\"", ",", "\"mehr\"", ")", "or", "a", ".", "startswith", "(", "(", "\"all\"", ",", "\"mehrer\"", ",", "\"wenig\"", ",", "\"viel\"", ")", ")", ":", "return", "w", "+", "adjectives_strong", ".", "get", "(", "(", "g", ",", "c", ")", ",", "\"\"", ")", "if", "a", ".", "startswith", "(", "(", "\"ein\"", ",", "\"kein\"", ")", ")", "or", "a", ".", "startswith", "(", "(", "\"mein\"", ",", "\"dein\"", ",", "\"sein\"", ",", "\"ihr\"", ",", "\"Ihr\"", ",", "\"unser\"", ",", "\"euer\"", ")", ")", ":", "return", "w", "+", "adjectives_mixed", ".", "get", "(", "(", "g", ",", "c", ")", ",", "\"\"", ")", "if", "a", "in", "(", "\"arm\"", ",", "\"alt\"", ",", "\"all\"", ",", "\"der\"", ",", "\"die\"", ",", "\"das\"", ",", "\"den\"", ",", "\"dem\"", ",", "\"des\"", ")", "or", "a", ".", "startswith", "(", "(", "\"derselb\"", ",", "\"derjenig\"", ",", "\"jed\"", ",", "\"jeglich\"", ",", "\"jen\"", ",", "\"manch\"", ",", "\"dies\"", ",", "\"solch\"", ",", "\"welch\"", ")", ")", ":", "return", "w", "+", "adjectives_weak", ".", "get", "(", "(", "g", ",", "c", ")", ",", "\"\"", ")", "# Default to strong inflection.", "return", "w", "+", "adjectives_strong", ".", "get", "(", "(", "g", ",", "c", ")", ",", "\"\"", ")" ]
For a predicative adjective, returns the attributive form (lowercase). In German, the attributive is formed with -e, -em, -en, -er or -es, depending on gender (masculine, feminine, neuter or plural) and role (nominative, accusative, dative, genitive).
[ "For", "a", "predicative", "adjective", "returns", "the", "attributive", "form", "(", "lowercase", ")", ".", "In", "German", "the", "attributive", "is", "formed", "with", "-", "e", "-", "em", "-", "en", "-", "er", "or", "-", "es", "depending", "on", "gender", "(", "masculine", "feminine", "neuter", "or", "plural", ")", "and", "role", "(", "nominative", "accusative", "dative", "genitive", ")", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/inflect.py#L517-L541
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/inflect.py
predicative
def predicative(adjective): """ Returns the predicative adjective (lowercase). In German, the attributive form preceding a noun is always used: "ein kleiner Junge" => strong, masculine, nominative, "eine schöne Frau" => mixed, feminine, nominative, "der kleine Prinz" => weak, masculine, nominative, etc. The predicative is useful for lemmatization. """ w = adjective.lower() if len(w) > 3: for suffix in ("em", "en", "er", "es", "e"): if w.endswith(suffix): b = w[:max(-len(suffix), -(len(w)-3))] if b.endswith("bl"): # plausibles => plausibel b = b[:-1] + "el" if b.endswith("pr"): # propres => proper b = b[:-1] + "er" return b return w
python
def predicative(adjective): """ Returns the predicative adjective (lowercase). In German, the attributive form preceding a noun is always used: "ein kleiner Junge" => strong, masculine, nominative, "eine schöne Frau" => mixed, feminine, nominative, "der kleine Prinz" => weak, masculine, nominative, etc. The predicative is useful for lemmatization. """ w = adjective.lower() if len(w) > 3: for suffix in ("em", "en", "er", "es", "e"): if w.endswith(suffix): b = w[:max(-len(suffix), -(len(w)-3))] if b.endswith("bl"): # plausibles => plausibel b = b[:-1] + "el" if b.endswith("pr"): # propres => proper b = b[:-1] + "er" return b return w
[ "def", "predicative", "(", "adjective", ")", ":", "w", "=", "adjective", ".", "lower", "(", ")", "if", "len", "(", "w", ")", ">", "3", ":", "for", "suffix", "in", "(", "\"em\"", ",", "\"en\"", ",", "\"er\"", ",", "\"es\"", ",", "\"e\"", ")", ":", "if", "w", ".", "endswith", "(", "suffix", ")", ":", "b", "=", "w", "[", ":", "max", "(", "-", "len", "(", "suffix", ")", ",", "-", "(", "len", "(", "w", ")", "-", "3", ")", ")", "]", "if", "b", ".", "endswith", "(", "\"bl\"", ")", ":", "# plausibles => plausibel", "b", "=", "b", "[", ":", "-", "1", "]", "+", "\"el\"", "if", "b", ".", "endswith", "(", "\"pr\"", ")", ":", "# propres => proper", "b", "=", "b", "[", ":", "-", "1", "]", "+", "\"er\"", "return", "b", "return", "w" ]
Returns the predicative adjective (lowercase). In German, the attributive form preceding a noun is always used: "ein kleiner Junge" => strong, masculine, nominative, "eine schöne Frau" => mixed, feminine, nominative, "der kleine Prinz" => weak, masculine, nominative, etc. The predicative is useful for lemmatization.
[ "Returns", "the", "predicative", "adjective", "(", "lowercase", ")", ".", "In", "German", "the", "attributive", "form", "preceding", "a", "noun", "is", "always", "used", ":", "ein", "kleiner", "Junge", "=", ">", "strong", "masculine", "nominative", "eine", "schöne", "Frau", "=", ">", "mixed", "feminine", "nominative", "der", "kleine", "Prinz", "=", ">", "weak", "masculine", "nominative", "etc", ".", "The", "predicative", "is", "useful", "for", "lemmatization", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/inflect.py#L543-L561
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/inflect.py
grade
def grade(adjective, suffix=COMPARATIVE): """ Returns the comparative or superlative form of the given (inflected) adjective. """ b = predicative(adjective) # groß => großt, schön => schönst if suffix == SUPERLATIVE and b.endswith(("s", u"ß")): suffix = suffix[1:] # große => großere, schönes => schöneres return adjective[:len(b)] + suffix + adjective[len(b):]
python
def grade(adjective, suffix=COMPARATIVE): """ Returns the comparative or superlative form of the given (inflected) adjective. """ b = predicative(adjective) # groß => großt, schön => schönst if suffix == SUPERLATIVE and b.endswith(("s", u"ß")): suffix = suffix[1:] # große => großere, schönes => schöneres return adjective[:len(b)] + suffix + adjective[len(b):]
[ "def", "grade", "(", "adjective", ",", "suffix", "=", "COMPARATIVE", ")", ":", "b", "=", "predicative", "(", "adjective", ")", "# groß => großt, schön => schönst", "if", "suffix", "==", "SUPERLATIVE", "and", "b", ".", "endswith", "(", "(", "\"s\"", ",", "u\"ß\")", ")", ":", "", "suffix", "=", "suffix", "[", "1", ":", "]", "# große => großere, schönes => schöneres", "return", "adjective", "[", ":", "len", "(", "b", ")", "]", "+", "suffix", "+", "adjective", "[", "len", "(", "b", ")", ":", "]" ]
Returns the comparative or superlative form of the given (inflected) adjective.
[ "Returns", "the", "comparative", "or", "superlative", "form", "of", "the", "given", "(", "inflected", ")", "adjective", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/inflect.py#L568-L576
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/inflect.py
Verbs.find_lemma
def find_lemma(self, verb): """ Returns the base form of the given inflected verb, using a rule-based approach. """ v = verb.lower() # Common prefixes: be-finden and emp-finden probably inflect like finden. if not (v.startswith("ge") and v.endswith("t")): # Probably gerund. for prefix in prefixes: if v.startswith(prefix) and v[len(prefix):] in self.inflections: return prefix + self.inflections[v[len(prefix):]] # Common sufixes: setze nieder => niedersetzen. b, suffix = " " in v and v.split()[:2] or (v, "") # Infinitive -ln: trommeln. if b.endswith(("ln", "rn")): return b # Lemmatize regular inflections. for x in ("test", "est", "end", "ten", "tet", "en", "et", "te", "st", "e", "t"): if b.endswith(x): b = b[:-len(x)]; break # Subjunctive: hielte => halten, schnitte => schneiden. for x, y in ( ("ieb", "eib"), ( "ied", "eid"), ( "ief", "auf" ), ( "ieg", "eig" ), ("iel", "alt"), ("ien", "ein"), ("iess", "ass"), (u"ieß", u"aß" ), ( "iff", "eif" ), ("iss", "eiss"), (u"iß", u"eiß"), ( "it", "eid"), ( "oss", "iess"), (u"öss", "iess")): if b.endswith(x): b = b[:-len(x)] + y; break b = b.replace("eeiss", "eiss") b = b.replace("eeid", "eit") # Subjunctive: wechselte => wechseln if not b.endswith(("e", "l")) and not (b.endswith("er") and len(b) >= 3 and not b[-3] in VOWELS): b = b + "e" # abknallst != abknalln => abknallen if b.endswith(("hl", "ll", "ul", "eil")): b = b + "e" # Strip ge- from (likely) gerund: if b.startswith("ge") and v.endswith("t"): b = b[2:] # Corrections (these add about 1.5% accuracy): if b.endswith(("lnde", "rnde")): b = b[:-3] if b.endswith(("ae", "al", u"öe", u"üe")): b = b.rstrip("e") + "te" if b.endswith(u"äl"): b = b + "e" return suffix + b + "n"
python
def find_lemma(self, verb): """ Returns the base form of the given inflected verb, using a rule-based approach. """ v = verb.lower() # Common prefixes: be-finden and emp-finden probably inflect like finden. if not (v.startswith("ge") and v.endswith("t")): # Probably gerund. for prefix in prefixes: if v.startswith(prefix) and v[len(prefix):] in self.inflections: return prefix + self.inflections[v[len(prefix):]] # Common sufixes: setze nieder => niedersetzen. b, suffix = " " in v and v.split()[:2] or (v, "") # Infinitive -ln: trommeln. if b.endswith(("ln", "rn")): return b # Lemmatize regular inflections. for x in ("test", "est", "end", "ten", "tet", "en", "et", "te", "st", "e", "t"): if b.endswith(x): b = b[:-len(x)]; break # Subjunctive: hielte => halten, schnitte => schneiden. for x, y in ( ("ieb", "eib"), ( "ied", "eid"), ( "ief", "auf" ), ( "ieg", "eig" ), ("iel", "alt"), ("ien", "ein"), ("iess", "ass"), (u"ieß", u"aß" ), ( "iff", "eif" ), ("iss", "eiss"), (u"iß", u"eiß"), ( "it", "eid"), ( "oss", "iess"), (u"öss", "iess")): if b.endswith(x): b = b[:-len(x)] + y; break b = b.replace("eeiss", "eiss") b = b.replace("eeid", "eit") # Subjunctive: wechselte => wechseln if not b.endswith(("e", "l")) and not (b.endswith("er") and len(b) >= 3 and not b[-3] in VOWELS): b = b + "e" # abknallst != abknalln => abknallen if b.endswith(("hl", "ll", "ul", "eil")): b = b + "e" # Strip ge- from (likely) gerund: if b.startswith("ge") and v.endswith("t"): b = b[2:] # Corrections (these add about 1.5% accuracy): if b.endswith(("lnde", "rnde")): b = b[:-3] if b.endswith(("ae", "al", u"öe", u"üe")): b = b.rstrip("e") + "te" if b.endswith(u"äl"): b = b + "e" return suffix + b + "n"
[ "def", "find_lemma", "(", "self", ",", "verb", ")", ":", "v", "=", "verb", ".", "lower", "(", ")", "# Common prefixes: be-finden and emp-finden probably inflect like finden.", "if", "not", "(", "v", ".", "startswith", "(", "\"ge\"", ")", "and", "v", ".", "endswith", "(", "\"t\"", ")", ")", ":", "# Probably gerund.", "for", "prefix", "in", "prefixes", ":", "if", "v", ".", "startswith", "(", "prefix", ")", "and", "v", "[", "len", "(", "prefix", ")", ":", "]", "in", "self", ".", "inflections", ":", "return", "prefix", "+", "self", ".", "inflections", "[", "v", "[", "len", "(", "prefix", ")", ":", "]", "]", "# Common sufixes: setze nieder => niedersetzen.", "b", ",", "suffix", "=", "\" \"", "in", "v", "and", "v", ".", "split", "(", ")", "[", ":", "2", "]", "or", "(", "v", ",", "\"\"", ")", "# Infinitive -ln: trommeln.", "if", "b", ".", "endswith", "(", "(", "\"ln\"", ",", "\"rn\"", ")", ")", ":", "return", "b", "# Lemmatize regular inflections.", "for", "x", "in", "(", "\"test\"", ",", "\"est\"", ",", "\"end\"", ",", "\"ten\"", ",", "\"tet\"", ",", "\"en\"", ",", "\"et\"", ",", "\"te\"", ",", "\"st\"", ",", "\"e\"", ",", "\"t\"", ")", ":", "if", "b", ".", "endswith", "(", "x", ")", ":", "b", "=", "b", "[", ":", "-", "len", "(", "x", ")", "]", "break", "# Subjunctive: hielte => halten, schnitte => schneiden.", "for", "x", ",", "y", "in", "(", "(", "\"ieb\"", ",", "\"eib\"", ")", ",", "(", "\"ied\"", ",", "\"eid\"", ")", ",", "(", "\"ief\"", ",", "\"auf\"", ")", ",", "(", "\"ieg\"", ",", "\"eig\"", ")", ",", "(", "\"iel\"", ",", "\"alt\"", ")", ",", "(", "\"ien\"", ",", "\"ein\"", ")", ",", "(", "\"iess\"", ",", "\"ass\"", ")", ",", "(", "u\"ieß\",", " ", "\"aß\" ", " ", "(", "\"", "ff\", ", "\"", "if\" )", " ", "(", "i", "ss\", ", "\"", "iss\"),", " ", "", "(", "u\"iß\",", " ", "\"eiß\"),", " ", "(", " ", "t\", ", "\"", "id\"),", " ", "(", "\"", "ss\", ", " ", "ess\"),", " ", "(", "\"", "öss\", \"", "i", "ss\")):", "", "", "", "if", "b", ".", "endswith", "(", "x", ")", ":", "b", "=", "b", "[", ":", "-", "len", "(", "x", ")", "]", "+", "y", "break", "b", "=", "b", ".", "replace", "(", "\"eeiss\"", ",", "\"eiss\"", ")", "b", "=", "b", ".", "replace", "(", "\"eeid\"", ",", "\"eit\"", ")", "# Subjunctive: wechselte => wechseln", "if", "not", "b", ".", "endswith", "(", "(", "\"e\"", ",", "\"l\"", ")", ")", "and", "not", "(", "b", ".", "endswith", "(", "\"er\"", ")", "and", "len", "(", "b", ")", ">=", "3", "and", "not", "b", "[", "-", "3", "]", "in", "VOWELS", ")", ":", "b", "=", "b", "+", "\"e\"", "# abknallst != abknalln => abknallen", "if", "b", ".", "endswith", "(", "(", "\"hl\"", ",", "\"ll\"", ",", "\"ul\"", ",", "\"eil\"", ")", ")", ":", "b", "=", "b", "+", "\"e\"", "# Strip ge- from (likely) gerund:", "if", "b", ".", "startswith", "(", "\"ge\"", ")", "and", "v", ".", "endswith", "(", "\"t\"", ")", ":", "b", "=", "b", "[", "2", ":", "]", "# Corrections (these add about 1.5% accuracy):", "if", "b", ".", "endswith", "(", "(", "\"lnde\"", ",", "\"rnde\"", ")", ")", ":", "b", "=", "b", "[", ":", "-", "3", "]", "if", "b", ".", "endswith", "(", "(", "\"ae\"", ",", "\"al\"", ",", "u\"öe\",", " ", "\"üe\"))", ":", "", "", "b", "=", "b", ".", "rstrip", "(", "\"e\"", ")", "+", "\"te\"", "if", "b", ".", "endswith", "(", "u\"äl\")", ":", "", "b", "=", "b", "+", "\"e\"", "return", "suffix", "+", "b", "+", "\"n\"" ]
Returns the base form of the given inflected verb, using a rule-based approach.
[ "Returns", "the", "base", "form", "of", "the", "given", "inflected", "verb", "using", "a", "rule", "-", "based", "approach", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/inflect.py#L372-L413
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/inflect.py
Verbs.find_lexeme
def find_lexeme(self, verb): """ For a regular verb (base form), returns the forms using a rule-based approach. """ v = verb.lower() # Stem = infinitive minus -en, -ln, -rn. b = b0 = re.sub("en$", "", re.sub("ln$", "l", re.sub("rn$", "r", v))) # Split common prefixes. x, x1, x2 = "", "", "" for prefix in prefix_separable: if v.startswith(prefix): b, x = b[len(prefix):], prefix x1 = (" " + x).rstrip() x2 = x + "ge" break # Present tense 1sg and subjunctive -el: handeln => ich handle, du handlest. pl = b.endswith("el") and b[:-2]+"l" or b # Present tense 1pl -el: handeln => wir handeln pw = v.endswith(("ln", "rn")) and v or b+"en" # Present tense ending in -d or -t gets -e: pr = b.endswith(("d", "t")) and b+"e" or b # Present tense 2sg gets -st, unless stem ends with -s or -z. p2 = pr.endswith(("s","z")) and pr+"t" or pr+"st" # Present participle: spiel + -end, arbeiten + -d: pp = v.endswith(("en", "ln", "rn")) and v+"d" or v+"end" # Past tense regular: pt = encode_sz(pr) + "t" # Past participle: haushalten => hausgehalten ge = (v.startswith(prefix_inseparable) or b.endswith(("r","t"))) and pt or "ge"+pt ge = x and x+"ge"+pt or ge # Present subjunctive: stem + -e, -est, -en, -et: s1 = encode_sz(pl) # Past subjunctive: past (usually with Umlaut) + -e, -est, -en, -et: s2 = encode_sz(pt) # Construct the lexeme: lexeme = a = [ v, pl+"e"+x1, p2+x1, pr+"t"+x1, pw+x1, pr+"t"+x1, pp, # present pt+"e"+x1, pt+"est"+x1, pt+"e"+x1, pt+"en"+x1, pt+"et"+x1, ge, # past b+"e"+x1, pr+"t"+x1, x+pw, # imperative s1+"e"+x1, s1+"est"+x1, s1+"en"+x1, s1+"et"+x1, # subjunctive I s2+"e"+x1, s2+"est"+x1, s2+"en"+x1, s2+"et"+x1 # subjunctive II ] # Encode Eszett (ß) and attempt to retrieve from the lexicon. # Decode Eszett for present and imperative. if encode_sz(v) in self: a = self[encode_sz(v)] a = [decode_sz(v) for v in a[:7]] + a[7:13] + [decode_sz(v) for v in a[13:20]] + a[20:] # Since the lexicon does not contain imperative for all verbs, don't simply return it. # Instead, update the rule-based lexeme with inflections from the lexicon. return [a[i] or lexeme[i] for i in range(len(a))]
python
def find_lexeme(self, verb): """ For a regular verb (base form), returns the forms using a rule-based approach. """ v = verb.lower() # Stem = infinitive minus -en, -ln, -rn. b = b0 = re.sub("en$", "", re.sub("ln$", "l", re.sub("rn$", "r", v))) # Split common prefixes. x, x1, x2 = "", "", "" for prefix in prefix_separable: if v.startswith(prefix): b, x = b[len(prefix):], prefix x1 = (" " + x).rstrip() x2 = x + "ge" break # Present tense 1sg and subjunctive -el: handeln => ich handle, du handlest. pl = b.endswith("el") and b[:-2]+"l" or b # Present tense 1pl -el: handeln => wir handeln pw = v.endswith(("ln", "rn")) and v or b+"en" # Present tense ending in -d or -t gets -e: pr = b.endswith(("d", "t")) and b+"e" or b # Present tense 2sg gets -st, unless stem ends with -s or -z. p2 = pr.endswith(("s","z")) and pr+"t" or pr+"st" # Present participle: spiel + -end, arbeiten + -d: pp = v.endswith(("en", "ln", "rn")) and v+"d" or v+"end" # Past tense regular: pt = encode_sz(pr) + "t" # Past participle: haushalten => hausgehalten ge = (v.startswith(prefix_inseparable) or b.endswith(("r","t"))) and pt or "ge"+pt ge = x and x+"ge"+pt or ge # Present subjunctive: stem + -e, -est, -en, -et: s1 = encode_sz(pl) # Past subjunctive: past (usually with Umlaut) + -e, -est, -en, -et: s2 = encode_sz(pt) # Construct the lexeme: lexeme = a = [ v, pl+"e"+x1, p2+x1, pr+"t"+x1, pw+x1, pr+"t"+x1, pp, # present pt+"e"+x1, pt+"est"+x1, pt+"e"+x1, pt+"en"+x1, pt+"et"+x1, ge, # past b+"e"+x1, pr+"t"+x1, x+pw, # imperative s1+"e"+x1, s1+"est"+x1, s1+"en"+x1, s1+"et"+x1, # subjunctive I s2+"e"+x1, s2+"est"+x1, s2+"en"+x1, s2+"et"+x1 # subjunctive II ] # Encode Eszett (ß) and attempt to retrieve from the lexicon. # Decode Eszett for present and imperative. if encode_sz(v) in self: a = self[encode_sz(v)] a = [decode_sz(v) for v in a[:7]] + a[7:13] + [decode_sz(v) for v in a[13:20]] + a[20:] # Since the lexicon does not contain imperative for all verbs, don't simply return it. # Instead, update the rule-based lexeme with inflections from the lexicon. return [a[i] or lexeme[i] for i in range(len(a))]
[ "def", "find_lexeme", "(", "self", ",", "verb", ")", ":", "v", "=", "verb", ".", "lower", "(", ")", "# Stem = infinitive minus -en, -ln, -rn.", "b", "=", "b0", "=", "re", ".", "sub", "(", "\"en$\"", ",", "\"\"", ",", "re", ".", "sub", "(", "\"ln$\"", ",", "\"l\"", ",", "re", ".", "sub", "(", "\"rn$\"", ",", "\"r\"", ",", "v", ")", ")", ")", "# Split common prefixes.", "x", ",", "x1", ",", "x2", "=", "\"\"", ",", "\"\"", ",", "\"\"", "for", "prefix", "in", "prefix_separable", ":", "if", "v", ".", "startswith", "(", "prefix", ")", ":", "b", ",", "x", "=", "b", "[", "len", "(", "prefix", ")", ":", "]", ",", "prefix", "x1", "=", "(", "\" \"", "+", "x", ")", ".", "rstrip", "(", ")", "x2", "=", "x", "+", "\"ge\"", "break", "# Present tense 1sg and subjunctive -el: handeln => ich handle, du handlest.", "pl", "=", "b", ".", "endswith", "(", "\"el\"", ")", "and", "b", "[", ":", "-", "2", "]", "+", "\"l\"", "or", "b", "# Present tense 1pl -el: handeln => wir handeln", "pw", "=", "v", ".", "endswith", "(", "(", "\"ln\"", ",", "\"rn\"", ")", ")", "and", "v", "or", "b", "+", "\"en\"", "# Present tense ending in -d or -t gets -e:", "pr", "=", "b", ".", "endswith", "(", "(", "\"d\"", ",", "\"t\"", ")", ")", "and", "b", "+", "\"e\"", "or", "b", "# Present tense 2sg gets -st, unless stem ends with -s or -z.", "p2", "=", "pr", ".", "endswith", "(", "(", "\"s\"", ",", "\"z\"", ")", ")", "and", "pr", "+", "\"t\"", "or", "pr", "+", "\"st\"", "# Present participle: spiel + -end, arbeiten + -d:", "pp", "=", "v", ".", "endswith", "(", "(", "\"en\"", ",", "\"ln\"", ",", "\"rn\"", ")", ")", "and", "v", "+", "\"d\"", "or", "v", "+", "\"end\"", "# Past tense regular:", "pt", "=", "encode_sz", "(", "pr", ")", "+", "\"t\"", "# Past participle: haushalten => hausgehalten", "ge", "=", "(", "v", ".", "startswith", "(", "prefix_inseparable", ")", "or", "b", ".", "endswith", "(", "(", "\"r\"", ",", "\"t\"", ")", ")", ")", "and", "pt", "or", "\"ge\"", "+", "pt", "ge", "=", "x", "and", "x", "+", "\"ge\"", "+", "pt", "or", "ge", "# Present subjunctive: stem + -e, -est, -en, -et:", "s1", "=", "encode_sz", "(", "pl", ")", "# Past subjunctive: past (usually with Umlaut) + -e, -est, -en, -et:", "s2", "=", "encode_sz", "(", "pt", ")", "# Construct the lexeme:", "lexeme", "=", "a", "=", "[", "v", ",", "pl", "+", "\"e\"", "+", "x1", ",", "p2", "+", "x1", ",", "pr", "+", "\"t\"", "+", "x1", ",", "pw", "+", "x1", ",", "pr", "+", "\"t\"", "+", "x1", ",", "pp", ",", "# present", "pt", "+", "\"e\"", "+", "x1", ",", "pt", "+", "\"est\"", "+", "x1", ",", "pt", "+", "\"e\"", "+", "x1", ",", "pt", "+", "\"en\"", "+", "x1", ",", "pt", "+", "\"et\"", "+", "x1", ",", "ge", ",", "# past", "b", "+", "\"e\"", "+", "x1", ",", "pr", "+", "\"t\"", "+", "x1", ",", "x", "+", "pw", ",", "# imperative", "s1", "+", "\"e\"", "+", "x1", ",", "s1", "+", "\"est\"", "+", "x1", ",", "s1", "+", "\"en\"", "+", "x1", ",", "s1", "+", "\"et\"", "+", "x1", ",", "# subjunctive I", "s2", "+", "\"e\"", "+", "x1", ",", "s2", "+", "\"est\"", "+", "x1", ",", "s2", "+", "\"en\"", "+", "x1", ",", "s2", "+", "\"et\"", "+", "x1", "# subjunctive II", "]", "# Encode Eszett (ß) and attempt to retrieve from the lexicon.", "# Decode Eszett for present and imperative.", "if", "encode_sz", "(", "v", ")", "in", "self", ":", "a", "=", "self", "[", "encode_sz", "(", "v", ")", "]", "a", "=", "[", "decode_sz", "(", "v", ")", "for", "v", "in", "a", "[", ":", "7", "]", "]", "+", "a", "[", "7", ":", "13", "]", "+", "[", "decode_sz", "(", "v", ")", "for", "v", "in", "a", "[", "13", ":", "20", "]", "]", "+", "a", "[", "20", ":", "]", "# Since the lexicon does not contain imperative for all verbs, don't simply return it.", "# Instead, update the rule-based lexeme with inflections from the lexicon.", "return", "[", "a", "[", "i", "]", "or", "lexeme", "[", "i", "]", "for", "i", "in", "range", "(", "len", "(", "a", ")", ")", "]" ]
For a regular verb (base form), returns the forms using a rule-based approach.
[ "For", "a", "regular", "verb", "(", "base", "form", ")", "returns", "the", "forms", "using", "a", "rule", "-", "based", "approach", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/inflect.py#L415-L464
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/inflect.py
Verbs.tenses
def tenses(self, verb, parse=True): """ Returns a list of possible tenses for the given inflected verb. """ tenses = _Verbs.tenses(self, verb, parse) if len(tenses) == 0: # auswirkte => wirkte aus for prefix in prefix_separable: if verb.startswith(prefix): tenses = _Verbs.tenses(self, verb[len(prefix):] + " " + prefix, parse) break return tenses
python
def tenses(self, verb, parse=True): """ Returns a list of possible tenses for the given inflected verb. """ tenses = _Verbs.tenses(self, verb, parse) if len(tenses) == 0: # auswirkte => wirkte aus for prefix in prefix_separable: if verb.startswith(prefix): tenses = _Verbs.tenses(self, verb[len(prefix):] + " " + prefix, parse) break return tenses
[ "def", "tenses", "(", "self", ",", "verb", ",", "parse", "=", "True", ")", ":", "tenses", "=", "_Verbs", ".", "tenses", "(", "self", ",", "verb", ",", "parse", ")", "if", "len", "(", "tenses", ")", "==", "0", ":", "# auswirkte => wirkte aus", "for", "prefix", "in", "prefix_separable", ":", "if", "verb", ".", "startswith", "(", "prefix", ")", ":", "tenses", "=", "_Verbs", ".", "tenses", "(", "self", ",", "verb", "[", "len", "(", "prefix", ")", ":", "]", "+", "\" \"", "+", "prefix", ",", "parse", ")", "break", "return", "tenses" ]
Returns a list of possible tenses for the given inflected verb.
[ "Returns", "a", "list", "of", "possible", "tenses", "for", "the", "given", "inflected", "verb", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/inflect.py#L466-L476
markuskiller/textblob-de
textblob_de/classifiers.py
_get_words_from_dataset
def _get_words_from_dataset(dataset): """Return a set of all words in a dataset. :param dataset: A list of tuples of the form ``(words, label)`` where ``words`` is either a string of a list of tokens. """ # Words may be either a string or a list of tokens. Return an iterator # of tokens accordingly def tokenize(words): if isinstance(words, basestring): return word_tokenize(words, include_punc=False) else: return words all_words = chain.from_iterable(tokenize(words) for words, _ in dataset) return set(all_words)
python
def _get_words_from_dataset(dataset): """Return a set of all words in a dataset. :param dataset: A list of tuples of the form ``(words, label)`` where ``words`` is either a string of a list of tokens. """ # Words may be either a string or a list of tokens. Return an iterator # of tokens accordingly def tokenize(words): if isinstance(words, basestring): return word_tokenize(words, include_punc=False) else: return words all_words = chain.from_iterable(tokenize(words) for words, _ in dataset) return set(all_words)
[ "def", "_get_words_from_dataset", "(", "dataset", ")", ":", "# Words may be either a string or a list of tokens. Return an iterator", "# of tokens accordingly", "def", "tokenize", "(", "words", ")", ":", "if", "isinstance", "(", "words", ",", "basestring", ")", ":", "return", "word_tokenize", "(", "words", ",", "include_punc", "=", "False", ")", "else", ":", "return", "words", "all_words", "=", "chain", ".", "from_iterable", "(", "tokenize", "(", "words", ")", "for", "words", ",", "_", "in", "dataset", ")", "return", "set", "(", "all_words", ")" ]
Return a set of all words in a dataset. :param dataset: A list of tuples of the form ``(words, label)`` where ``words`` is either a string of a list of tokens.
[ "Return", "a", "set", "of", "all", "words", "in", "a", "dataset", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/classifiers.py#L57-L72
markuskiller/textblob-de
textblob_de/classifiers.py
basic_extractor
def basic_extractor(document, train_set): """A basic document feature extractor that returns a dict indicating what words in ``train_set`` are contained in ``document``. :param document: The text to extract features from. Can be a string or an iterable. :param list train_set: Training data set, a list of tuples of the form ``(words, label)``. """ word_features = _get_words_from_dataset(train_set) tokens = _get_document_tokens(document) features = dict(((u'contains({0})'.format(word), (word in tokens)) for word in word_features)) return features
python
def basic_extractor(document, train_set): """A basic document feature extractor that returns a dict indicating what words in ``train_set`` are contained in ``document``. :param document: The text to extract features from. Can be a string or an iterable. :param list train_set: Training data set, a list of tuples of the form ``(words, label)``. """ word_features = _get_words_from_dataset(train_set) tokens = _get_document_tokens(document) features = dict(((u'contains({0})'.format(word), (word in tokens)) for word in word_features)) return features
[ "def", "basic_extractor", "(", "document", ",", "train_set", ")", ":", "word_features", "=", "_get_words_from_dataset", "(", "train_set", ")", "tokens", "=", "_get_document_tokens", "(", "document", ")", "features", "=", "dict", "(", "(", "(", "u'contains({0})'", ".", "format", "(", "word", ")", ",", "(", "word", "in", "tokens", ")", ")", "for", "word", "in", "word_features", ")", ")", "return", "features" ]
A basic document feature extractor that returns a dict indicating what words in ``train_set`` are contained in ``document``. :param document: The text to extract features from. Can be a string or an iterable. :param list train_set: Training data set, a list of tuples of the form ``(words, label)``.
[ "A", "basic", "document", "feature", "extractor", "that", "returns", "a", "dict", "indicating", "what", "words", "in", "train_set", "are", "contained", "in", "document", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/classifiers.py#L84-L97
markuskiller/textblob-de
textblob_de/classifiers.py
contains_extractor
def contains_extractor(document): """A basic document feature extractor that returns a dict of words that the document contains.""" tokens = _get_document_tokens(document) features = dict((u'contains({0})'.format(w), True) for w in tokens) return features
python
def contains_extractor(document): """A basic document feature extractor that returns a dict of words that the document contains.""" tokens = _get_document_tokens(document) features = dict((u'contains({0})'.format(w), True) for w in tokens) return features
[ "def", "contains_extractor", "(", "document", ")", ":", "tokens", "=", "_get_document_tokens", "(", "document", ")", "features", "=", "dict", "(", "(", "u'contains({0})'", ".", "format", "(", "w", ")", ",", "True", ")", "for", "w", "in", "tokens", ")", "return", "features" ]
A basic document feature extractor that returns a dict of words that the document contains.
[ "A", "basic", "document", "feature", "extractor", "that", "returns", "a", "dict", "of", "words", "that", "the", "document", "contains", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/classifiers.py#L100-L105
markuskiller/textblob-de
textblob_de/classifiers.py
BaseClassifier._read_data
def _read_data(self, dataset, format=None): """Reads a data file and returns and iterable that can be used as testing or training data.""" # Attempt to detect file format if "format" isn't specified if not format: format_class = formats.detect(dataset) else: if format not in formats.AVAILABLE.keys(): raise ValueError("'{0}' format not supported.".format(format)) format_class = formats.AVAILABLE[format] return format_class(dataset).to_iterable()
python
def _read_data(self, dataset, format=None): """Reads a data file and returns and iterable that can be used as testing or training data.""" # Attempt to detect file format if "format" isn't specified if not format: format_class = formats.detect(dataset) else: if format not in formats.AVAILABLE.keys(): raise ValueError("'{0}' format not supported.".format(format)) format_class = formats.AVAILABLE[format] return format_class(dataset).to_iterable()
[ "def", "_read_data", "(", "self", ",", "dataset", ",", "format", "=", "None", ")", ":", "# Attempt to detect file format if \"format\" isn't specified", "if", "not", "format", ":", "format_class", "=", "formats", ".", "detect", "(", "dataset", ")", "else", ":", "if", "format", "not", "in", "formats", ".", "AVAILABLE", ".", "keys", "(", ")", ":", "raise", "ValueError", "(", "\"'{0}' format not supported.\"", ".", "format", "(", "format", ")", ")", "format_class", "=", "formats", ".", "AVAILABLE", "[", "format", "]", "return", "format_class", "(", "dataset", ")", ".", "to_iterable", "(", ")" ]
Reads a data file and returns and iterable that can be used as testing or training data.
[ "Reads", "a", "data", "file", "and", "returns", "and", "iterable", "that", "can", "be", "used", "as", "testing", "or", "training", "data", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/classifiers.py#L138-L148
markuskiller/textblob-de
textblob_de/classifiers.py
BaseClassifier.extract_features
def extract_features(self, text): """Extracts features from a body of text. :rtype: dictionary of features """ # Feature extractor may take one or two arguments try: return self.feature_extractor(text, self.train_set) except (TypeError, AttributeError): return self.feature_extractor(text)
python
def extract_features(self, text): """Extracts features from a body of text. :rtype: dictionary of features """ # Feature extractor may take one or two arguments try: return self.feature_extractor(text, self.train_set) except (TypeError, AttributeError): return self.feature_extractor(text)
[ "def", "extract_features", "(", "self", ",", "text", ")", ":", "# Feature extractor may take one or two arguments", "try", ":", "return", "self", ".", "feature_extractor", "(", "text", ",", "self", ".", "train_set", ")", "except", "(", "TypeError", ",", "AttributeError", ")", ":", "return", "self", ".", "feature_extractor", "(", "text", ")" ]
Extracts features from a body of text. :rtype: dictionary of features
[ "Extracts", "features", "from", "a", "body", "of", "text", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/classifiers.py#L167-L177
markuskiller/textblob-de
textblob_de/classifiers.py
NLTKClassifier.train
def train(self, *args, **kwargs): """Train the classifier with a labeled feature set and return the classifier. Takes the same arguments as the wrapped NLTK class. This method is implicitly called when calling ``classify`` or ``accuracy`` methods and is included only to allow passing in arguments to the ``train`` method of the wrapped NLTK class. .. versionadded:: 0.6.2 :rtype: A classifier """ try: self.classifier = self.nltk_class.train(self.train_features, *args, **kwargs) return self.classifier except AttributeError: raise ValueError("NLTKClassifier must have a nltk_class" " variable that is not None.")
python
def train(self, *args, **kwargs): """Train the classifier with a labeled feature set and return the classifier. Takes the same arguments as the wrapped NLTK class. This method is implicitly called when calling ``classify`` or ``accuracy`` methods and is included only to allow passing in arguments to the ``train`` method of the wrapped NLTK class. .. versionadded:: 0.6.2 :rtype: A classifier """ try: self.classifier = self.nltk_class.train(self.train_features, *args, **kwargs) return self.classifier except AttributeError: raise ValueError("NLTKClassifier must have a nltk_class" " variable that is not None.")
[ "def", "train", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "self", ".", "classifier", "=", "self", ".", "nltk_class", ".", "train", "(", "self", ".", "train_features", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "self", ".", "classifier", "except", "AttributeError", ":", "raise", "ValueError", "(", "\"NLTKClassifier must have a nltk_class\"", "\" variable that is not None.\"", ")" ]
Train the classifier with a labeled feature set and return the classifier. Takes the same arguments as the wrapped NLTK class. This method is implicitly called when calling ``classify`` or ``accuracy`` methods and is included only to allow passing in arguments to the ``train`` method of the wrapped NLTK class. .. versionadded:: 0.6.2 :rtype: A classifier
[ "Train", "the", "classifier", "with", "a", "labeled", "feature", "set", "and", "return", "the", "classifier", ".", "Takes", "the", "same", "arguments", "as", "the", "wrapped", "NLTK", "class", ".", "This", "method", "is", "implicitly", "called", "when", "calling", "classify", "or", "accuracy", "methods", "and", "is", "included", "only", "to", "allow", "passing", "in", "arguments", "to", "the", "train", "method", "of", "the", "wrapped", "NLTK", "class", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/classifiers.py#L224-L242
markuskiller/textblob-de
textblob_de/classifiers.py
NLTKClassifier.classify
def classify(self, text): """Classifies the text. :param str text: A string of text. """ text_features = self.extract_features(text) return self.classifier.classify(text_features)
python
def classify(self, text): """Classifies the text. :param str text: A string of text. """ text_features = self.extract_features(text) return self.classifier.classify(text_features)
[ "def", "classify", "(", "self", ",", "text", ")", ":", "text_features", "=", "self", ".", "extract_features", "(", "text", ")", "return", "self", ".", "classifier", ".", "classify", "(", "text_features", ")" ]
Classifies the text. :param str text: A string of text.
[ "Classifies", "the", "text", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/classifiers.py#L248-L255
markuskiller/textblob-de
textblob_de/classifiers.py
NLTKClassifier.accuracy
def accuracy(self, test_set, format=None): """Compute the accuracy on a test set. :param test_set: A list of tuples of the form ``(text, label)``, or a filename. :param format: If ``test_set`` is a filename, the file format, e.g. ``"csv"`` or ``"json"``. If ``None``, will attempt to detect the file format. """ if isinstance(test_set, basestring): # test_set is a filename test_data = self._read_data(test_set) else: # test_set is a list of tuples test_data = test_set test_features = [(self.extract_features(d), c) for d, c in test_data] return nltk.classify.accuracy(self.classifier, test_features)
python
def accuracy(self, test_set, format=None): """Compute the accuracy on a test set. :param test_set: A list of tuples of the form ``(text, label)``, or a filename. :param format: If ``test_set`` is a filename, the file format, e.g. ``"csv"`` or ``"json"``. If ``None``, will attempt to detect the file format. """ if isinstance(test_set, basestring): # test_set is a filename test_data = self._read_data(test_set) else: # test_set is a list of tuples test_data = test_set test_features = [(self.extract_features(d), c) for d, c in test_data] return nltk.classify.accuracy(self.classifier, test_features)
[ "def", "accuracy", "(", "self", ",", "test_set", ",", "format", "=", "None", ")", ":", "if", "isinstance", "(", "test_set", ",", "basestring", ")", ":", "# test_set is a filename", "test_data", "=", "self", ".", "_read_data", "(", "test_set", ")", "else", ":", "# test_set is a list of tuples", "test_data", "=", "test_set", "test_features", "=", "[", "(", "self", ".", "extract_features", "(", "d", ")", ",", "c", ")", "for", "d", ",", "c", "in", "test_data", "]", "return", "nltk", ".", "classify", ".", "accuracy", "(", "self", ".", "classifier", ",", "test_features", ")" ]
Compute the accuracy on a test set. :param test_set: A list of tuples of the form ``(text, label)``, or a filename. :param format: If ``test_set`` is a filename, the file format, e.g. ``"csv"`` or ``"json"``. If ``None``, will attempt to detect the file format.
[ "Compute", "the", "accuracy", "on", "a", "test", "set", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/classifiers.py#L257-L272
markuskiller/textblob-de
textblob_de/classifiers.py
NLTKClassifier.update
def update(self, new_data, *args, **kwargs): '''Update the classifier with new training data and re-trains the classifier. :param new_data: New data as a list of tuples of the form ``(text, label)``. ''' self.train_set += new_data self.train_features = [(self.extract_features(d), c) for d, c in self.train_set] try: self.classifier = self.nltk_class.train(self.train_features, *args, **kwargs) except AttributeError: # Descendant has not defined nltk_class raise ValueError("NLTKClassifier must have a nltk_class" " variable that is not None.") return True
python
def update(self, new_data, *args, **kwargs): '''Update the classifier with new training data and re-trains the classifier. :param new_data: New data as a list of tuples of the form ``(text, label)``. ''' self.train_set += new_data self.train_features = [(self.extract_features(d), c) for d, c in self.train_set] try: self.classifier = self.nltk_class.train(self.train_features, *args, **kwargs) except AttributeError: # Descendant has not defined nltk_class raise ValueError("NLTKClassifier must have a nltk_class" " variable that is not None.") return True
[ "def", "update", "(", "self", ",", "new_data", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "train_set", "+=", "new_data", "self", ".", "train_features", "=", "[", "(", "self", ".", "extract_features", "(", "d", ")", ",", "c", ")", "for", "d", ",", "c", "in", "self", ".", "train_set", "]", "try", ":", "self", ".", "classifier", "=", "self", ".", "nltk_class", ".", "train", "(", "self", ".", "train_features", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "AttributeError", ":", "# Descendant has not defined nltk_class", "raise", "ValueError", "(", "\"NLTKClassifier must have a nltk_class\"", "\" variable that is not None.\"", ")", "return", "True" ]
Update the classifier with new training data and re-trains the classifier. :param new_data: New data as a list of tuples of the form ``(text, label)``.
[ "Update", "the", "classifier", "with", "new", "training", "data", "and", "re", "-", "trains", "the", "classifier", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/classifiers.py#L274-L290
markuskiller/textblob-de
textblob_de/classifiers.py
NaiveBayesClassifier.prob_classify
def prob_classify(self, text): """Return the label probability distribution for classifying a string of text. Example: :: >>> classifier = NaiveBayesClassifier(train_data) >>> prob_dist = classifier.prob_classify("I feel happy this morning.") >>> prob_dist.max() 'positive' >>> prob_dist.prob("positive") 0.7 :rtype: nltk.probability.DictionaryProbDist """ text_features = self.extract_features(text) return self.classifier.prob_classify(text_features)
python
def prob_classify(self, text): """Return the label probability distribution for classifying a string of text. Example: :: >>> classifier = NaiveBayesClassifier(train_data) >>> prob_dist = classifier.prob_classify("I feel happy this morning.") >>> prob_dist.max() 'positive' >>> prob_dist.prob("positive") 0.7 :rtype: nltk.probability.DictionaryProbDist """ text_features = self.extract_features(text) return self.classifier.prob_classify(text_features)
[ "def", "prob_classify", "(", "self", ",", "text", ")", ":", "text_features", "=", "self", ".", "extract_features", "(", "text", ")", "return", "self", ".", "classifier", ".", "prob_classify", "(", "text_features", ")" ]
Return the label probability distribution for classifying a string of text. Example: :: >>> classifier = NaiveBayesClassifier(train_data) >>> prob_dist = classifier.prob_classify("I feel happy this morning.") >>> prob_dist.max() 'positive' >>> prob_dist.prob("positive") 0.7 :rtype: nltk.probability.DictionaryProbDist
[ "Return", "the", "label", "probability", "distribution", "for", "classifying", "a", "string", "of", "text", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/classifiers.py#L312-L330
markuskiller/textblob-de
textblob_de/classifiers.py
PositiveNaiveBayesClassifier.train
def train(self, *args, **kwargs): """Train the classifier with a labeled and unlabeled feature sets and return the classifier. Takes the same arguments as the wrapped NLTK class. This method is implicitly called when calling ``classify`` or ``accuracy`` methods and is included only to allow passing in arguments to the ``train`` method of the wrapped NLTK class. :rtype: A classifier """ self.classifier = self.nltk_class.train(self.positive_features, self.unlabeled_features, self.positive_prob_prior) return self.classifier
python
def train(self, *args, **kwargs): """Train the classifier with a labeled and unlabeled feature sets and return the classifier. Takes the same arguments as the wrapped NLTK class. This method is implicitly called when calling ``classify`` or ``accuracy`` methods and is included only to allow passing in arguments to the ``train`` method of the wrapped NLTK class. :rtype: A classifier """ self.classifier = self.nltk_class.train(self.positive_features, self.unlabeled_features, self.positive_prob_prior) return self.classifier
[ "def", "train", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "classifier", "=", "self", ".", "nltk_class", ".", "train", "(", "self", ".", "positive_features", ",", "self", ".", "unlabeled_features", ",", "self", ".", "positive_prob_prior", ")", "return", "self", ".", "classifier" ]
Train the classifier with a labeled and unlabeled feature sets and return the classifier. Takes the same arguments as the wrapped NLTK class. This method is implicitly called when calling ``classify`` or ``accuracy`` methods and is included only to allow passing in arguments to the ``train`` method of the wrapped NLTK class. :rtype: A classifier
[ "Train", "the", "classifier", "with", "a", "labeled", "and", "unlabeled", "feature", "sets", "and", "return", "the", "classifier", ".", "Takes", "the", "same", "arguments", "as", "the", "wrapped", "NLTK", "class", ".", "This", "method", "is", "implicitly", "called", "when", "calling", "classify", "or", "accuracy", "methods", "and", "is", "included", "only", "to", "allow", "passing", "in", "arguments", "to", "the", "train", "method", "of", "the", "wrapped", "NLTK", "class", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/classifiers.py#L453-L466
markuskiller/textblob-de
textblob_de/classifiers.py
PositiveNaiveBayesClassifier.update
def update(self, new_positive_data=None, new_unlabeled_data=None, positive_prob_prior=0.5, *args, **kwargs): '''Update the classifier with new data and re-trains the classifier. :param new_positive_data: List of new, labeled strings. :param new_unlabeled_data: List of new, unlabeled strings. ''' self.positive_prob_prior = positive_prob_prior if new_positive_data: self.positive_set += new_positive_data self.positive_features += [self.extract_features(d) for d in new_positive_data] if new_unlabeled_data: self.unlabeled_set += new_unlabeled_data self.unlabeled_features += [self.extract_features(d) for d in new_unlabeled_data] self.classifier = self.nltk_class.train(self.positive_features, self.unlabeled_features, self.positive_prob_prior, *args, **kwargs) return True
python
def update(self, new_positive_data=None, new_unlabeled_data=None, positive_prob_prior=0.5, *args, **kwargs): '''Update the classifier with new data and re-trains the classifier. :param new_positive_data: List of new, labeled strings. :param new_unlabeled_data: List of new, unlabeled strings. ''' self.positive_prob_prior = positive_prob_prior if new_positive_data: self.positive_set += new_positive_data self.positive_features += [self.extract_features(d) for d in new_positive_data] if new_unlabeled_data: self.unlabeled_set += new_unlabeled_data self.unlabeled_features += [self.extract_features(d) for d in new_unlabeled_data] self.classifier = self.nltk_class.train(self.positive_features, self.unlabeled_features, self.positive_prob_prior, *args, **kwargs) return True
[ "def", "update", "(", "self", ",", "new_positive_data", "=", "None", ",", "new_unlabeled_data", "=", "None", ",", "positive_prob_prior", "=", "0.5", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "positive_prob_prior", "=", "positive_prob_prior", "if", "new_positive_data", ":", "self", ".", "positive_set", "+=", "new_positive_data", "self", ".", "positive_features", "+=", "[", "self", ".", "extract_features", "(", "d", ")", "for", "d", "in", "new_positive_data", "]", "if", "new_unlabeled_data", ":", "self", ".", "unlabeled_set", "+=", "new_unlabeled_data", "self", ".", "unlabeled_features", "+=", "[", "self", ".", "extract_features", "(", "d", ")", "for", "d", "in", "new_unlabeled_data", "]", "self", ".", "classifier", "=", "self", ".", "nltk_class", ".", "train", "(", "self", ".", "positive_features", ",", "self", ".", "unlabeled_features", ",", "self", ".", "positive_prob_prior", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "True" ]
Update the classifier with new data and re-trains the classifier. :param new_positive_data: List of new, labeled strings. :param new_unlabeled_data: List of new, unlabeled strings.
[ "Update", "the", "classifier", "with", "new", "data", "and", "re", "-", "trains", "the", "classifier", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/classifiers.py#L468-L490
markuskiller/textblob-de
textblob_de/classifiers.py
MaxEntClassifier.prob_classify
def prob_classify(self, text): """Return the label probability distribution for classifying a string of text. Example: :: >>> classifier = MaxEntClassifier(train_data) >>> prob_dist = classifier.prob_classify("I feel happy this morning.") >>> prob_dist.max() 'positive' >>> prob_dist.prob("positive") 0.7 :rtype: nltk.probability.DictionaryProbDist """ feats = self.extract_features(text) return self.classifier.prob_classify(feats)
python
def prob_classify(self, text): """Return the label probability distribution for classifying a string of text. Example: :: >>> classifier = MaxEntClassifier(train_data) >>> prob_dist = classifier.prob_classify("I feel happy this morning.") >>> prob_dist.max() 'positive' >>> prob_dist.prob("positive") 0.7 :rtype: nltk.probability.DictionaryProbDist """ feats = self.extract_features(text) return self.classifier.prob_classify(feats)
[ "def", "prob_classify", "(", "self", ",", "text", ")", ":", "feats", "=", "self", ".", "extract_features", "(", "text", ")", "return", "self", ".", "classifier", ".", "prob_classify", "(", "feats", ")" ]
Return the label probability distribution for classifying a string of text. Example: :: >>> classifier = MaxEntClassifier(train_data) >>> prob_dist = classifier.prob_classify("I feel happy this morning.") >>> prob_dist.max() 'positive' >>> prob_dist.prob("positive") 0.7 :rtype: nltk.probability.DictionaryProbDist
[ "Return", "the", "label", "probability", "distribution", "for", "classifying", "a", "string", "of", "text", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/classifiers.py#L497-L515
markuskiller/textblob-de
textblob_de/lemmatizers.py
PatternParserLemmatizer.lemmatize
def lemmatize(self, text): """Return a list of (lemma, tag) tuples. :param str text: A string. """ #: Do not process empty strings (Issue #3) if text.strip() == "": return [] parsed_sentences = self._parse_text(text) _lemmalist = [] for s in parsed_sentences: tokens = s.split() for i, t in enumerate(tokens): #: Filter empty tokens from the parser output (Issue #5) #: This only happens if parser input is improperly tokenized #: e.g. if there are empty strings in the list of tokens ['A', '', '.'] if t.startswith('/'): continue w, tag, phrase, role, lemma = t.split('/') # The lexicon uses Swiss spelling: "ss" instead of "ß". lemma = lemma.replace(u"ß", "ss") # Reverse previous replacement lemma = lemma.strip().replace("forwardslash", "/") if w[0].isupper() and i > 0: lemma = lemma.title() elif tag.startswith("N") and i == 0: lemma = lemma.title() # Todo: Check if it makes sense to treat '/' as punctuation # (especially for sentiment analysis it might be interesting # to treat it as OR ('oder')). if w in string.punctuation or lemma == '/': continue else: lemma = lemma _lemmalist.append((lemma, tag)) return _lemmalist
python
def lemmatize(self, text): """Return a list of (lemma, tag) tuples. :param str text: A string. """ #: Do not process empty strings (Issue #3) if text.strip() == "": return [] parsed_sentences = self._parse_text(text) _lemmalist = [] for s in parsed_sentences: tokens = s.split() for i, t in enumerate(tokens): #: Filter empty tokens from the parser output (Issue #5) #: This only happens if parser input is improperly tokenized #: e.g. if there are empty strings in the list of tokens ['A', '', '.'] if t.startswith('/'): continue w, tag, phrase, role, lemma = t.split('/') # The lexicon uses Swiss spelling: "ss" instead of "ß". lemma = lemma.replace(u"ß", "ss") # Reverse previous replacement lemma = lemma.strip().replace("forwardslash", "/") if w[0].isupper() and i > 0: lemma = lemma.title() elif tag.startswith("N") and i == 0: lemma = lemma.title() # Todo: Check if it makes sense to treat '/' as punctuation # (especially for sentiment analysis it might be interesting # to treat it as OR ('oder')). if w in string.punctuation or lemma == '/': continue else: lemma = lemma _lemmalist.append((lemma, tag)) return _lemmalist
[ "def", "lemmatize", "(", "self", ",", "text", ")", ":", "#: Do not process empty strings (Issue #3)", "if", "text", ".", "strip", "(", ")", "==", "\"\"", ":", "return", "[", "]", "parsed_sentences", "=", "self", ".", "_parse_text", "(", "text", ")", "_lemmalist", "=", "[", "]", "for", "s", "in", "parsed_sentences", ":", "tokens", "=", "s", ".", "split", "(", ")", "for", "i", ",", "t", "in", "enumerate", "(", "tokens", ")", ":", "#: Filter empty tokens from the parser output (Issue #5)", "#: This only happens if parser input is improperly tokenized", "#: e.g. if there are empty strings in the list of tokens ['A', '', '.']", "if", "t", ".", "startswith", "(", "'/'", ")", ":", "continue", "w", ",", "tag", ",", "phrase", ",", "role", ",", "lemma", "=", "t", ".", "split", "(", "'/'", ")", "# The lexicon uses Swiss spelling: \"ss\" instead of \"ß\".", "lemma", "=", "lemma", ".", "replace", "(", "u\"ß\",", " ", "ss\")", "", "# Reverse previous replacement", "lemma", "=", "lemma", ".", "strip", "(", ")", ".", "replace", "(", "\"forwardslash\"", ",", "\"/\"", ")", "if", "w", "[", "0", "]", ".", "isupper", "(", ")", "and", "i", ">", "0", ":", "lemma", "=", "lemma", ".", "title", "(", ")", "elif", "tag", ".", "startswith", "(", "\"N\"", ")", "and", "i", "==", "0", ":", "lemma", "=", "lemma", ".", "title", "(", ")", "# Todo: Check if it makes sense to treat '/' as punctuation", "# (especially for sentiment analysis it might be interesting", "# to treat it as OR ('oder')).", "if", "w", "in", "string", ".", "punctuation", "or", "lemma", "==", "'/'", ":", "continue", "else", ":", "lemma", "=", "lemma", "_lemmalist", ".", "append", "(", "(", "lemma", ",", "tag", ")", ")", "return", "_lemmalist" ]
Return a list of (lemma, tag) tuples. :param str text: A string.
[ "Return", "a", "list", "of", "(", "lemma", "tag", ")", "tuples", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/lemmatizers.py#L43-L80
markuskiller/textblob-de
textblob_de/lemmatizers.py
PatternParserLemmatizer._parse_text
def _parse_text(self, text): """Parse text (string) and return list of parsed sentences (strings). Each sentence consists of space separated token elements and the token format returned by the PatternParser is WORD/TAG/PHRASE/ROLE/LEMMA (separated by a forward slash '/') :param str text: A string. """ # Fix for issue #1 text = text.replace("/", " FORWARDSLASH ") _tokenized = " ".join(self.tokenizer.tokenize(text)) parsed_text = pattern_parse(_tokenized, tokenize=False, lemmata=True) return parsed_text.split('\n')
python
def _parse_text(self, text): """Parse text (string) and return list of parsed sentences (strings). Each sentence consists of space separated token elements and the token format returned by the PatternParser is WORD/TAG/PHRASE/ROLE/LEMMA (separated by a forward slash '/') :param str text: A string. """ # Fix for issue #1 text = text.replace("/", " FORWARDSLASH ") _tokenized = " ".join(self.tokenizer.tokenize(text)) parsed_text = pattern_parse(_tokenized, tokenize=False, lemmata=True) return parsed_text.split('\n')
[ "def", "_parse_text", "(", "self", ",", "text", ")", ":", "# Fix for issue #1", "text", "=", "text", ".", "replace", "(", "\"/\"", ",", "\" FORWARDSLASH \"", ")", "_tokenized", "=", "\" \"", ".", "join", "(", "self", ".", "tokenizer", ".", "tokenize", "(", "text", ")", ")", "parsed_text", "=", "pattern_parse", "(", "_tokenized", ",", "tokenize", "=", "False", ",", "lemmata", "=", "True", ")", "return", "parsed_text", ".", "split", "(", "'\\n'", ")" ]
Parse text (string) and return list of parsed sentences (strings). Each sentence consists of space separated token elements and the token format returned by the PatternParser is WORD/TAG/PHRASE/ROLE/LEMMA (separated by a forward slash '/') :param str text: A string.
[ "Parse", "text", "(", "string", ")", "and", "return", "list", "of", "parsed", "sentences", "(", "strings", ")", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/lemmatizers.py#L82-L96
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
_match
def _match(string, pattern): """ Returns True if the pattern matches the given word string. The pattern can include a wildcard (*front, back*, *both*, in*side), or it can be a compiled regular expression. """ p = pattern try: if p[:1] == WILDCARD and (p[-1:] == WILDCARD and p[1:-1] in string or string.endswith(p[1:])): return True if p[-1:] == WILDCARD and not p[-2:-1] == "\\" and string.startswith(p[:-1]): return True if p == string: return True if WILDCARD in p[1:-1]: p = p.split(WILDCARD) return string.startswith(p[0]) and string.endswith(p[-1]) except: # For performance, calling isinstance() last is 10% faster for plain strings. if isinstance(p, regexp): return p.search(string) is not None return False
python
def _match(string, pattern): """ Returns True if the pattern matches the given word string. The pattern can include a wildcard (*front, back*, *both*, in*side), or it can be a compiled regular expression. """ p = pattern try: if p[:1] == WILDCARD and (p[-1:] == WILDCARD and p[1:-1] in string or string.endswith(p[1:])): return True if p[-1:] == WILDCARD and not p[-2:-1] == "\\" and string.startswith(p[:-1]): return True if p == string: return True if WILDCARD in p[1:-1]: p = p.split(WILDCARD) return string.startswith(p[0]) and string.endswith(p[-1]) except: # For performance, calling isinstance() last is 10% faster for plain strings. if isinstance(p, regexp): return p.search(string) is not None return False
[ "def", "_match", "(", "string", ",", "pattern", ")", ":", "p", "=", "pattern", "try", ":", "if", "p", "[", ":", "1", "]", "==", "WILDCARD", "and", "(", "p", "[", "-", "1", ":", "]", "==", "WILDCARD", "and", "p", "[", "1", ":", "-", "1", "]", "in", "string", "or", "string", ".", "endswith", "(", "p", "[", "1", ":", "]", ")", ")", ":", "return", "True", "if", "p", "[", "-", "1", ":", "]", "==", "WILDCARD", "and", "not", "p", "[", "-", "2", ":", "-", "1", "]", "==", "\"\\\\\"", "and", "string", ".", "startswith", "(", "p", "[", ":", "-", "1", "]", ")", ":", "return", "True", "if", "p", "==", "string", ":", "return", "True", "if", "WILDCARD", "in", "p", "[", "1", ":", "-", "1", "]", ":", "p", "=", "p", ".", "split", "(", "WILDCARD", ")", "return", "string", ".", "startswith", "(", "p", "[", "0", "]", ")", "and", "string", ".", "endswith", "(", "p", "[", "-", "1", "]", ")", "except", ":", "# For performance, calling isinstance() last is 10% faster for plain strings.", "if", "isinstance", "(", "p", ",", "regexp", ")", ":", "return", "p", ".", "search", "(", "string", ")", "is", "not", "None", "return", "False" ]
Returns True if the pattern matches the given word string. The pattern can include a wildcard (*front, back*, *both*, in*side), or it can be a compiled regular expression.
[ "Returns", "True", "if", "the", "pattern", "matches", "the", "given", "word", "string", ".", "The", "pattern", "can", "include", "a", "wildcard", "(", "*", "front", "back", "*", "*", "both", "*", "in", "*", "side", ")", "or", "it", "can", "be", "a", "compiled", "regular", "expression", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L101-L121
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
unique
def unique(iterable): """ Returns a list copy in which each item occurs only once (in-order). """ seen = set() return [x for x in iterable if x not in seen and not seen.add(x)]
python
def unique(iterable): """ Returns a list copy in which each item occurs only once (in-order). """ seen = set() return [x for x in iterable if x not in seen and not seen.add(x)]
[ "def", "unique", "(", "iterable", ")", ":", "seen", "=", "set", "(", ")", "return", "[", "x", "for", "x", "in", "iterable", "if", "x", "not", "in", "seen", "and", "not", "seen", ".", "add", "(", "x", ")", "]" ]
Returns a list copy in which each item occurs only once (in-order).
[ "Returns", "a", "list", "copy", "in", "which", "each", "item", "occurs", "only", "once", "(", "in", "-", "order", ")", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L127-L131
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
product
def product(*args, **kwargs): """ Yields all permutations with replacement: list(product("cat", repeat=2)) => [("c", "c"), ("c", "a"), ("c", "t"), ("a", "c"), ("a", "a"), ("a", "t"), ("t", "c"), ("t", "a"), ("t", "t")] """ p = [[]] for iterable in map(tuple, args) * kwargs.get("repeat", 1): p = [x + [y] for x in p for y in iterable] for p in p: yield tuple(p)
python
def product(*args, **kwargs): """ Yields all permutations with replacement: list(product("cat", repeat=2)) => [("c", "c"), ("c", "a"), ("c", "t"), ("a", "c"), ("a", "a"), ("a", "t"), ("t", "c"), ("t", "a"), ("t", "t")] """ p = [[]] for iterable in map(tuple, args) * kwargs.get("repeat", 1): p = [x + [y] for x in p for y in iterable] for p in p: yield tuple(p)
[ "def", "product", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "p", "=", "[", "[", "]", "]", "for", "iterable", "in", "map", "(", "tuple", ",", "args", ")", "*", "kwargs", ".", "get", "(", "\"repeat\"", ",", "1", ")", ":", "p", "=", "[", "x", "+", "[", "y", "]", "for", "x", "in", "p", "for", "y", "in", "iterable", "]", "for", "p", "in", "p", ":", "yield", "tuple", "(", "p", ")" ]
Yields all permutations with replacement: list(product("cat", repeat=2)) => [("c", "c"), ("c", "a"), ("c", "t"), ("a", "c"), ("a", "a"), ("a", "t"), ("t", "c"), ("t", "a"), ("t", "t")]
[ "Yields", "all", "permutations", "with", "replacement", ":", "list", "(", "product", "(", "cat", "repeat", "=", "2", "))", "=", ">", "[", "(", "c", "c", ")", "(", "c", "a", ")", "(", "c", "t", ")", "(", "a", "c", ")", "(", "a", "a", ")", "(", "a", "t", ")", "(", "t", "c", ")", "(", "t", "a", ")", "(", "t", "t", ")", "]" ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L144-L161
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
variations
def variations(iterable, optional=lambda x: False): """ Returns all possible variations of a sequence with optional items. """ # For example: variations(["A?", "B?", "C"], optional=lambda s: s.endswith("?")) # defines a sequence where constraint A and B are optional: # [("A?", "B?", "C"), ("B?", "C"), ("A?", "C"), ("C")] iterable = tuple(iterable) # Create a boolean sequence where True means optional: # ("A?", "B?", "C") => [True, True, False] o = [optional(x) for x in iterable] # Find all permutations of the boolean sequence: # [True, False, True], [True, False, False], [False, False, True], [False, False, False]. # Map to sequences of constraints whose index in the boolean sequence yields True. a = set() for p in product([False, True], repeat=sum(o)): p = list(p) v = [b and (b and p.pop(0)) for b in o] v = tuple(iterable[i] for i in xrange(len(v)) if not v[i]) a.add(v) # Longest-first. return sorted(a, cmp=lambda x, y: len(y) - len(x))
python
def variations(iterable, optional=lambda x: False): """ Returns all possible variations of a sequence with optional items. """ # For example: variations(["A?", "B?", "C"], optional=lambda s: s.endswith("?")) # defines a sequence where constraint A and B are optional: # [("A?", "B?", "C"), ("B?", "C"), ("A?", "C"), ("C")] iterable = tuple(iterable) # Create a boolean sequence where True means optional: # ("A?", "B?", "C") => [True, True, False] o = [optional(x) for x in iterable] # Find all permutations of the boolean sequence: # [True, False, True], [True, False, False], [False, False, True], [False, False, False]. # Map to sequences of constraints whose index in the boolean sequence yields True. a = set() for p in product([False, True], repeat=sum(o)): p = list(p) v = [b and (b and p.pop(0)) for b in o] v = tuple(iterable[i] for i in xrange(len(v)) if not v[i]) a.add(v) # Longest-first. return sorted(a, cmp=lambda x, y: len(y) - len(x))
[ "def", "variations", "(", "iterable", ",", "optional", "=", "lambda", "x", ":", "False", ")", ":", "# For example: variations([\"A?\", \"B?\", \"C\"], optional=lambda s: s.endswith(\"?\"))", "# defines a sequence where constraint A and B are optional:", "# [(\"A?\", \"B?\", \"C\"), (\"B?\", \"C\"), (\"A?\", \"C\"), (\"C\")]", "iterable", "=", "tuple", "(", "iterable", ")", "# Create a boolean sequence where True means optional:", "# (\"A?\", \"B?\", \"C\") => [True, True, False]", "o", "=", "[", "optional", "(", "x", ")", "for", "x", "in", "iterable", "]", "# Find all permutations of the boolean sequence:", "# [True, False, True], [True, False, False], [False, False, True], [False, False, False].", "# Map to sequences of constraints whose index in the boolean sequence yields True.", "a", "=", "set", "(", ")", "for", "p", "in", "product", "(", "[", "False", ",", "True", "]", ",", "repeat", "=", "sum", "(", "o", ")", ")", ":", "p", "=", "list", "(", "p", ")", "v", "=", "[", "b", "and", "(", "b", "and", "p", ".", "pop", "(", "0", ")", ")", "for", "b", "in", "o", "]", "v", "=", "tuple", "(", "iterable", "[", "i", "]", "for", "i", "in", "xrange", "(", "len", "(", "v", ")", ")", "if", "not", "v", "[", "i", "]", ")", "a", ".", "add", "(", "v", ")", "# Longest-first.", "return", "sorted", "(", "a", ",", "cmp", "=", "lambda", "x", ",", "y", ":", "len", "(", "y", ")", "-", "len", "(", "x", ")", ")" ]
Returns all possible variations of a sequence with optional items.
[ "Returns", "all", "possible", "variations", "of", "a", "sequence", "with", "optional", "items", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L167-L187
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
compile
def compile(pattern, *args, **kwargs): """ Returns a Pattern from the given string or regular expression. Recently compiled patterns are kept in cache (if they do not use taxonomies, which are mutable dicts). """ id, p = repr(pattern) + repr(args), pattern if id in _cache and not kwargs: return _cache[id] if isinstance(pattern, basestring): p = Pattern.fromstring(pattern, *args, **kwargs) if isinstance(pattern, regexp): p = Pattern([Constraint(words=[pattern], taxonomy=kwargs.get("taxonomy", TAXONOMY))], *args, **kwargs) if len(_cache) > _CACHE_SIZE: _cache.clear() if isinstance(p, Pattern) and not kwargs: _cache[id] = p if isinstance(p, Pattern): return p else: raise TypeError("can't compile '%s' object" % pattern.__class__.__name__)
python
def compile(pattern, *args, **kwargs): """ Returns a Pattern from the given string or regular expression. Recently compiled patterns are kept in cache (if they do not use taxonomies, which are mutable dicts). """ id, p = repr(pattern) + repr(args), pattern if id in _cache and not kwargs: return _cache[id] if isinstance(pattern, basestring): p = Pattern.fromstring(pattern, *args, **kwargs) if isinstance(pattern, regexp): p = Pattern([Constraint(words=[pattern], taxonomy=kwargs.get("taxonomy", TAXONOMY))], *args, **kwargs) if len(_cache) > _CACHE_SIZE: _cache.clear() if isinstance(p, Pattern) and not kwargs: _cache[id] = p if isinstance(p, Pattern): return p else: raise TypeError("can't compile '%s' object" % pattern.__class__.__name__)
[ "def", "compile", "(", "pattern", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "id", ",", "p", "=", "repr", "(", "pattern", ")", "+", "repr", "(", "args", ")", ",", "pattern", "if", "id", "in", "_cache", "and", "not", "kwargs", ":", "return", "_cache", "[", "id", "]", "if", "isinstance", "(", "pattern", ",", "basestring", ")", ":", "p", "=", "Pattern", ".", "fromstring", "(", "pattern", ",", "*", "args", ",", "*", "*", "kwargs", ")", "if", "isinstance", "(", "pattern", ",", "regexp", ")", ":", "p", "=", "Pattern", "(", "[", "Constraint", "(", "words", "=", "[", "pattern", "]", ",", "taxonomy", "=", "kwargs", ".", "get", "(", "\"taxonomy\"", ",", "TAXONOMY", ")", ")", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")", "if", "len", "(", "_cache", ")", ">", "_CACHE_SIZE", ":", "_cache", ".", "clear", "(", ")", "if", "isinstance", "(", "p", ",", "Pattern", ")", "and", "not", "kwargs", ":", "_cache", "[", "id", "]", "=", "p", "if", "isinstance", "(", "p", ",", "Pattern", ")", ":", "return", "p", "else", ":", "raise", "TypeError", "(", "\"can't compile '%s' object\"", "%", "pattern", ".", "__class__", ".", "__name__", ")" ]
Returns a Pattern from the given string or regular expression. Recently compiled patterns are kept in cache (if they do not use taxonomies, which are mutable dicts).
[ "Returns", "a", "Pattern", "from", "the", "given", "string", "or", "regular", "expression", ".", "Recently", "compiled", "patterns", "are", "kept", "in", "cache", "(", "if", "they", "do", "not", "use", "taxonomies", "which", "are", "mutable", "dicts", ")", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L927-L946
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
scan
def scan(pattern, string, *args, **kwargs): """ Returns True if pattern.search(Sentence(string)) may yield matches. If is often faster to scan prior to creating a Sentence and searching it. """ return compile(pattern, *args, **kwargs).scan(string)
python
def scan(pattern, string, *args, **kwargs): """ Returns True if pattern.search(Sentence(string)) may yield matches. If is often faster to scan prior to creating a Sentence and searching it. """ return compile(pattern, *args, **kwargs).scan(string)
[ "def", "scan", "(", "pattern", ",", "string", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "compile", "(", "pattern", ",", "*", "args", ",", "*", "*", "kwargs", ")", ".", "scan", "(", "string", ")" ]
Returns True if pattern.search(Sentence(string)) may yield matches. If is often faster to scan prior to creating a Sentence and searching it.
[ "Returns", "True", "if", "pattern", ".", "search", "(", "Sentence", "(", "string", "))", "may", "yield", "matches", ".", "If", "is", "often", "faster", "to", "scan", "prior", "to", "creating", "a", "Sentence", "and", "searching", "it", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L948-L952
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
match
def match(pattern, sentence, *args, **kwargs): """ Returns the first match found in the given sentence, or None. """ return compile(pattern, *args, **kwargs).match(sentence)
python
def match(pattern, sentence, *args, **kwargs): """ Returns the first match found in the given sentence, or None. """ return compile(pattern, *args, **kwargs).match(sentence)
[ "def", "match", "(", "pattern", ",", "sentence", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "compile", "(", "pattern", ",", "*", "args", ",", "*", "*", "kwargs", ")", ".", "match", "(", "sentence", ")" ]
Returns the first match found in the given sentence, or None.
[ "Returns", "the", "first", "match", "found", "in", "the", "given", "sentence", "or", "None", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L954-L957
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
search
def search(pattern, sentence, *args, **kwargs): """ Returns a list of all matches found in the given sentence. """ return compile(pattern, *args, **kwargs).search(sentence)
python
def search(pattern, sentence, *args, **kwargs): """ Returns a list of all matches found in the given sentence. """ return compile(pattern, *args, **kwargs).search(sentence)
[ "def", "search", "(", "pattern", ",", "sentence", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "compile", "(", "pattern", ",", "*", "args", ",", "*", "*", "kwargs", ")", ".", "search", "(", "sentence", ")" ]
Returns a list of all matches found in the given sentence.
[ "Returns", "a", "list", "of", "all", "matches", "found", "in", "the", "given", "sentence", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L959-L962
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
odict.push
def push(self, kv): """ Adds a new item from the given (key, value)-tuple. If the key exists, pushes the updated item to the head of the dict. """ if kv[0] in self: self.__delitem__(kv[0]) self.__setitem__(kv[0], kv[1])
python
def push(self, kv): """ Adds a new item from the given (key, value)-tuple. If the key exists, pushes the updated item to the head of the dict. """ if kv[0] in self: self.__delitem__(kv[0]) self.__setitem__(kv[0], kv[1])
[ "def", "push", "(", "self", ",", "kv", ")", ":", "if", "kv", "[", "0", "]", "in", "self", ":", "self", ".", "__delitem__", "(", "kv", "[", "0", "]", ")", "self", ".", "__setitem__", "(", "kv", "[", "0", "]", ",", "kv", "[", "1", "]", ")" ]
Adds a new item from the given (key, value)-tuple. If the key exists, pushes the updated item to the head of the dict.
[ "Adds", "a", "new", "item", "from", "the", "given", "(", "key", "value", ")", "-", "tuple", ".", "If", "the", "key", "exists", "pushes", "the", "updated", "item", "to", "the", "head", "of", "the", "dict", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L211-L217
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
Taxonomy.append
def append(self, term, type=None, value=None): """ Appends the given term to the taxonomy and tags it as the given type. Optionally, a disambiguation value can be supplied. For example: taxonomy.append("many", "quantity", "50-200") """ term = self._normalize(term) type = self._normalize(type) self.setdefault(term, (odict(), odict()))[0].push((type, True)) self.setdefault(type, (odict(), odict()))[1].push((term, True)) self._values[term] = value
python
def append(self, term, type=None, value=None): """ Appends the given term to the taxonomy and tags it as the given type. Optionally, a disambiguation value can be supplied. For example: taxonomy.append("many", "quantity", "50-200") """ term = self._normalize(term) type = self._normalize(type) self.setdefault(term, (odict(), odict()))[0].push((type, True)) self.setdefault(type, (odict(), odict()))[1].push((term, True)) self._values[term] = value
[ "def", "append", "(", "self", ",", "term", ",", "type", "=", "None", ",", "value", "=", "None", ")", ":", "term", "=", "self", ".", "_normalize", "(", "term", ")", "type", "=", "self", ".", "_normalize", "(", "type", ")", "self", ".", "setdefault", "(", "term", ",", "(", "odict", "(", ")", ",", "odict", "(", ")", ")", ")", "[", "0", "]", ".", "push", "(", "(", "type", ",", "True", ")", ")", "self", ".", "setdefault", "(", "type", ",", "(", "odict", "(", ")", ",", "odict", "(", ")", ")", ")", "[", "1", "]", ".", "push", "(", "(", "term", ",", "True", ")", ")", "self", ".", "_values", "[", "term", "]", "=", "value" ]
Appends the given term to the taxonomy and tags it as the given type. Optionally, a disambiguation value can be supplied. For example: taxonomy.append("many", "quantity", "50-200")
[ "Appends", "the", "given", "term", "to", "the", "taxonomy", "and", "tags", "it", "as", "the", "given", "type", ".", "Optionally", "a", "disambiguation", "value", "can", "be", "supplied", ".", "For", "example", ":", "taxonomy", ".", "append", "(", "many", "quantity", "50", "-", "200", ")" ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L308-L317
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
Taxonomy.classify
def classify(self, term, **kwargs): """ Returns the (most recently added) semantic type for the given term ("many" => "quantity"). If the term is not in the dictionary, try Taxonomy.classifiers. """ term = self._normalize(term) if dict.__contains__(self, term): return self[term][0].keys()[-1] # If the term is not in the dictionary, check the classifiers. # Returns the first term in the list returned by a classifier. for classifier in self.classifiers: # **kwargs are useful if the classifier requests extra information, # for example the part-of-speech tag. v = classifier.parents(term, **kwargs) if v: return v[0]
python
def classify(self, term, **kwargs): """ Returns the (most recently added) semantic type for the given term ("many" => "quantity"). If the term is not in the dictionary, try Taxonomy.classifiers. """ term = self._normalize(term) if dict.__contains__(self, term): return self[term][0].keys()[-1] # If the term is not in the dictionary, check the classifiers. # Returns the first term in the list returned by a classifier. for classifier in self.classifiers: # **kwargs are useful if the classifier requests extra information, # for example the part-of-speech tag. v = classifier.parents(term, **kwargs) if v: return v[0]
[ "def", "classify", "(", "self", ",", "term", ",", "*", "*", "kwargs", ")", ":", "term", "=", "self", ".", "_normalize", "(", "term", ")", "if", "dict", ".", "__contains__", "(", "self", ",", "term", ")", ":", "return", "self", "[", "term", "]", "[", "0", "]", ".", "keys", "(", ")", "[", "-", "1", "]", "# If the term is not in the dictionary, check the classifiers.", "# Returns the first term in the list returned by a classifier.", "for", "classifier", "in", "self", ".", "classifiers", ":", "# **kwargs are useful if the classifier requests extra information,", "# for example the part-of-speech tag.", "v", "=", "classifier", ".", "parents", "(", "term", ",", "*", "*", "kwargs", ")", "if", "v", ":", "return", "v", "[", "0", "]" ]
Returns the (most recently added) semantic type for the given term ("many" => "quantity"). If the term is not in the dictionary, try Taxonomy.classifiers.
[ "Returns", "the", "(", "most", "recently", "added", ")", "semantic", "type", "for", "the", "given", "term", "(", "many", "=", ">", "quantity", ")", ".", "If", "the", "term", "is", "not", "in", "the", "dictionary", "try", "Taxonomy", ".", "classifiers", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L319-L333
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
Taxonomy.parents
def parents(self, term, recursive=False, **kwargs): """ Returns a list of all semantic types for the given term. If recursive=True, traverses parents up to the root. """ def dfs(term, recursive=False, visited={}, **kwargs): if term in visited: # Break on cyclic relations. return [] visited[term], a = True, [] if dict.__contains__(self, term): a = self[term][0].keys() for classifier in self.classifiers: a.extend(classifier.parents(term, **kwargs) or []) if recursive: for w in a: a += dfs(w, recursive, visited, **kwargs) return a return unique(dfs(self._normalize(term), recursive, {}, **kwargs))
python
def parents(self, term, recursive=False, **kwargs): """ Returns a list of all semantic types for the given term. If recursive=True, traverses parents up to the root. """ def dfs(term, recursive=False, visited={}, **kwargs): if term in visited: # Break on cyclic relations. return [] visited[term], a = True, [] if dict.__contains__(self, term): a = self[term][0].keys() for classifier in self.classifiers: a.extend(classifier.parents(term, **kwargs) or []) if recursive: for w in a: a += dfs(w, recursive, visited, **kwargs) return a return unique(dfs(self._normalize(term), recursive, {}, **kwargs))
[ "def", "parents", "(", "self", ",", "term", ",", "recursive", "=", "False", ",", "*", "*", "kwargs", ")", ":", "def", "dfs", "(", "term", ",", "recursive", "=", "False", ",", "visited", "=", "{", "}", ",", "*", "*", "kwargs", ")", ":", "if", "term", "in", "visited", ":", "# Break on cyclic relations.", "return", "[", "]", "visited", "[", "term", "]", ",", "a", "=", "True", ",", "[", "]", "if", "dict", ".", "__contains__", "(", "self", ",", "term", ")", ":", "a", "=", "self", "[", "term", "]", "[", "0", "]", ".", "keys", "(", ")", "for", "classifier", "in", "self", ".", "classifiers", ":", "a", ".", "extend", "(", "classifier", ".", "parents", "(", "term", ",", "*", "*", "kwargs", ")", "or", "[", "]", ")", "if", "recursive", ":", "for", "w", "in", "a", ":", "a", "+=", "dfs", "(", "w", ",", "recursive", ",", "visited", ",", "*", "*", "kwargs", ")", "return", "a", "return", "unique", "(", "dfs", "(", "self", ".", "_normalize", "(", "term", ")", ",", "recursive", ",", "{", "}", ",", "*", "*", "kwargs", ")", ")" ]
Returns a list of all semantic types for the given term. If recursive=True, traverses parents up to the root.
[ "Returns", "a", "list", "of", "all", "semantic", "types", "for", "the", "given", "term", ".", "If", "recursive", "=", "True", "traverses", "parents", "up", "to", "the", "root", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L335-L350
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
Taxonomy.value
def value(self, term, **kwargs): """ Returns the value of the given term ("many" => "50-200") """ term = self._normalize(term) if term in self._values: return self._values[term] for classifier in self.classifiers: v = classifier.value(term, **kwargs) if v is not None: return v
python
def value(self, term, **kwargs): """ Returns the value of the given term ("many" => "50-200") """ term = self._normalize(term) if term in self._values: return self._values[term] for classifier in self.classifiers: v = classifier.value(term, **kwargs) if v is not None: return v
[ "def", "value", "(", "self", ",", "term", ",", "*", "*", "kwargs", ")", ":", "term", "=", "self", ".", "_normalize", "(", "term", ")", "if", "term", "in", "self", ".", "_values", ":", "return", "self", ".", "_values", "[", "term", "]", "for", "classifier", "in", "self", ".", "classifiers", ":", "v", "=", "classifier", ".", "value", "(", "term", ",", "*", "*", "kwargs", ")", "if", "v", "is", "not", "None", ":", "return", "v" ]
Returns the value of the given term ("many" => "50-200")
[ "Returns", "the", "value", "of", "the", "given", "term", "(", "many", "=", ">", "50", "-", "200", ")" ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L369-L378
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
Constraint.fromstring
def fromstring(cls, s, **kwargs): """ Returns a new Constraint from the given string. Uppercase words indicate either a tag ("NN", "JJ", "VP") or a taxonomy term (e.g., "PRODUCT", "PERSON"). Syntax: ( defines an optional constraint, e.g., "(JJ)". [ defines a constraint with spaces, e.g., "[Mac OS X | Windows Vista]". _ is converted to spaces, e.g., "Windows_Vista". | separates different options, e.g., "ADJP|ADVP". ! can be used as a word prefix to disallow it. * can be used as a wildcard character, e.g., "soft*|JJ*". ? as a suffix defines a constraint that is optional, e.g., "JJ?". + as a suffix defines a constraint that can span multiple words, e.g., "JJ+". ^ as a prefix defines a constraint that can only match the first word. These characters need to be escaped if used as content: "\(". """ C = cls(**kwargs) s = s.strip() s = s.strip("{}") s = s.strip() for i in range(3): # Wrapping order of control characters is ignored: # (NN+) == (NN)+ == NN?+ == NN+? == [NN+?] == [NN]+? if s.startswith("^"): s = s[1: ]; C.first = True if s.endswith("+") and not s.endswith("\+"): s = s[0:-1]; C.multiple = True if s.endswith("?") and not s.endswith("\?"): s = s[0:-1]; C.optional = True if s.startswith("(") and s.endswith(")"): s = s[1:-1]; C.optional = True if s.startswith("[") and s.endswith("]"): s = s[1:-1] s = re.sub(r"^\\\^", "^", s) s = re.sub(r"\\\+$", "+", s) s = s.replace("\_", "&uscore;") s = s.replace("_"," ") s = s.replace("&uscore;", "_") s = s.replace("&lparen;", "(") s = s.replace("&rparen;", ")") s = s.replace("&lbrack;", "[") s = s.replace("&rbrack;", "]") s = s.replace("&lcurly;", "{") s = s.replace("&rcurly;", "}") s = s.replace("\(", "(") s = s.replace("\)", ")") s = s.replace("\[", "[") s = s.replace("\]", "]") s = s.replace("\{", "{") s = s.replace("\}", "}") s = s.replace("\*", "*") s = s.replace("\?", "?") s = s.replace("\+", "+") s = s.replace("\^", "^") s = s.replace("\|", "&vdash;") s = s.split("|") s = [v.replace("&vdash;", "|").strip() for v in s] for v in s: C._append(v) return C
python
def fromstring(cls, s, **kwargs): """ Returns a new Constraint from the given string. Uppercase words indicate either a tag ("NN", "JJ", "VP") or a taxonomy term (e.g., "PRODUCT", "PERSON"). Syntax: ( defines an optional constraint, e.g., "(JJ)". [ defines a constraint with spaces, e.g., "[Mac OS X | Windows Vista]". _ is converted to spaces, e.g., "Windows_Vista". | separates different options, e.g., "ADJP|ADVP". ! can be used as a word prefix to disallow it. * can be used as a wildcard character, e.g., "soft*|JJ*". ? as a suffix defines a constraint that is optional, e.g., "JJ?". + as a suffix defines a constraint that can span multiple words, e.g., "JJ+". ^ as a prefix defines a constraint that can only match the first word. These characters need to be escaped if used as content: "\(". """ C = cls(**kwargs) s = s.strip() s = s.strip("{}") s = s.strip() for i in range(3): # Wrapping order of control characters is ignored: # (NN+) == (NN)+ == NN?+ == NN+? == [NN+?] == [NN]+? if s.startswith("^"): s = s[1: ]; C.first = True if s.endswith("+") and not s.endswith("\+"): s = s[0:-1]; C.multiple = True if s.endswith("?") and not s.endswith("\?"): s = s[0:-1]; C.optional = True if s.startswith("(") and s.endswith(")"): s = s[1:-1]; C.optional = True if s.startswith("[") and s.endswith("]"): s = s[1:-1] s = re.sub(r"^\\\^", "^", s) s = re.sub(r"\\\+$", "+", s) s = s.replace("\_", "&uscore;") s = s.replace("_"," ") s = s.replace("&uscore;", "_") s = s.replace("&lparen;", "(") s = s.replace("&rparen;", ")") s = s.replace("&lbrack;", "[") s = s.replace("&rbrack;", "]") s = s.replace("&lcurly;", "{") s = s.replace("&rcurly;", "}") s = s.replace("\(", "(") s = s.replace("\)", ")") s = s.replace("\[", "[") s = s.replace("\]", "]") s = s.replace("\{", "{") s = s.replace("\}", "}") s = s.replace("\*", "*") s = s.replace("\?", "?") s = s.replace("\+", "+") s = s.replace("\^", "^") s = s.replace("\|", "&vdash;") s = s.split("|") s = [v.replace("&vdash;", "|").strip() for v in s] for v in s: C._append(v) return C
[ "def", "fromstring", "(", "cls", ",", "s", ",", "*", "*", "kwargs", ")", ":", "C", "=", "cls", "(", "*", "*", "kwargs", ")", "s", "=", "s", ".", "strip", "(", ")", "s", "=", "s", ".", "strip", "(", "\"{}\"", ")", "s", "=", "s", ".", "strip", "(", ")", "for", "i", "in", "range", "(", "3", ")", ":", "# Wrapping order of control characters is ignored:", "# (NN+) == (NN)+ == NN?+ == NN+? == [NN+?] == [NN]+?", "if", "s", ".", "startswith", "(", "\"^\"", ")", ":", "s", "=", "s", "[", "1", ":", "]", "C", ".", "first", "=", "True", "if", "s", ".", "endswith", "(", "\"+\"", ")", "and", "not", "s", ".", "endswith", "(", "\"\\+\"", ")", ":", "s", "=", "s", "[", "0", ":", "-", "1", "]", "C", ".", "multiple", "=", "True", "if", "s", ".", "endswith", "(", "\"?\"", ")", "and", "not", "s", ".", "endswith", "(", "\"\\?\"", ")", ":", "s", "=", "s", "[", "0", ":", "-", "1", "]", "C", ".", "optional", "=", "True", "if", "s", ".", "startswith", "(", "\"(\"", ")", "and", "s", ".", "endswith", "(", "\")\"", ")", ":", "s", "=", "s", "[", "1", ":", "-", "1", "]", "C", ".", "optional", "=", "True", "if", "s", ".", "startswith", "(", "\"[\"", ")", "and", "s", ".", "endswith", "(", "\"]\"", ")", ":", "s", "=", "s", "[", "1", ":", "-", "1", "]", "s", "=", "re", ".", "sub", "(", "r\"^\\\\\\^\"", ",", "\"^\"", ",", "s", ")", "s", "=", "re", ".", "sub", "(", "r\"\\\\\\+$\"", ",", "\"+\"", ",", "s", ")", "s", "=", "s", ".", "replace", "(", "\"\\_\"", ",", "\"&uscore;\"", ")", "s", "=", "s", ".", "replace", "(", "\"_\"", ",", "\" \"", ")", "s", "=", "s", ".", "replace", "(", "\"&uscore;\"", ",", "\"_\"", ")", "s", "=", "s", ".", "replace", "(", "\"&lparen;\"", ",", "\"(\"", ")", "s", "=", "s", ".", "replace", "(", "\"&rparen;\"", ",", "\")\"", ")", "s", "=", "s", ".", "replace", "(", "\"&lbrack;\"", ",", "\"[\"", ")", "s", "=", "s", ".", "replace", "(", "\"&rbrack;\"", ",", "\"]\"", ")", "s", "=", "s", ".", "replace", "(", "\"&lcurly;\"", ",", "\"{\"", ")", "s", "=", "s", ".", "replace", "(", "\"&rcurly;\"", ",", "\"}\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\(\"", ",", "\"(\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\)\"", ",", "\")\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\[\"", ",", "\"[\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\]\"", ",", "\"]\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\{\"", ",", "\"{\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\}\"", ",", "\"}\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\*\"", ",", "\"*\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\?\"", ",", "\"?\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\+\"", ",", "\"+\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\^\"", ",", "\"^\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\|\"", ",", "\"&vdash;\"", ")", "s", "=", "s", ".", "split", "(", "\"|\"", ")", "s", "=", "[", "v", ".", "replace", "(", "\"&vdash;\"", ",", "\"|\"", ")", ".", "strip", "(", ")", "for", "v", "in", "s", "]", "for", "v", "in", "s", ":", "C", ".", "_append", "(", "v", ")", "return", "C" ]
Returns a new Constraint from the given string. Uppercase words indicate either a tag ("NN", "JJ", "VP") or a taxonomy term (e.g., "PRODUCT", "PERSON"). Syntax: ( defines an optional constraint, e.g., "(JJ)". [ defines a constraint with spaces, e.g., "[Mac OS X | Windows Vista]". _ is converted to spaces, e.g., "Windows_Vista". | separates different options, e.g., "ADJP|ADVP". ! can be used as a word prefix to disallow it. * can be used as a wildcard character, e.g., "soft*|JJ*". ? as a suffix defines a constraint that is optional, e.g., "JJ?". + as a suffix defines a constraint that can span multiple words, e.g., "JJ+". ^ as a prefix defines a constraint that can only match the first word. These characters need to be escaped if used as content: "\(".
[ "Returns", "a", "new", "Constraint", "from", "the", "given", "string", ".", "Uppercase", "words", "indicate", "either", "a", "tag", "(", "NN", "JJ", "VP", ")", "or", "a", "taxonomy", "term", "(", "e", ".", "g", ".", "PRODUCT", "PERSON", ")", ".", "Syntax", ":", "(", "defines", "an", "optional", "constraint", "e", ".", "g", ".", "(", "JJ", ")", ".", "[", "defines", "a", "constraint", "with", "spaces", "e", ".", "g", ".", "[", "Mac", "OS", "X", "|", "Windows", "Vista", "]", ".", "_", "is", "converted", "to", "spaces", "e", ".", "g", ".", "Windows_Vista", ".", "|", "separates", "different", "options", "e", ".", "g", ".", "ADJP|ADVP", ".", "!", "can", "be", "used", "as", "a", "word", "prefix", "to", "disallow", "it", ".", "*", "can", "be", "used", "as", "a", "wildcard", "character", "e", ".", "g", ".", "soft", "*", "|JJ", "*", ".", "?", "as", "a", "suffix", "defines", "a", "constraint", "that", "is", "optional", "e", ".", "g", ".", "JJ?", ".", "+", "as", "a", "suffix", "defines", "a", "constraint", "that", "can", "span", "multiple", "words", "e", ".", "g", ".", "JJ", "+", ".", "^", "as", "a", "prefix", "defines", "a", "constraint", "that", "can", "only", "match", "the", "first", "word", ".", "These", "characters", "need", "to", "be", "escaped", "if", "used", "as", "content", ":", "\\", "(", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L487-L546
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
Constraint.match
def match(self, word): """ Return True if the given Word is part of the constraint: - the word (or lemma) occurs in Constraint.words, OR - the word (or lemma) occurs in Constraint.taxa taxonomy tree, AND - the word and/or chunk tags match those defined in the constraint. Individual terms in Constraint.words or the taxonomy can contain wildcards (*). Some part-of-speech-tags can also contain wildcards: NN*, VB*, JJ*, RB* If the given word contains spaces (e.g., proper noun), the entire chunk will also be compared. For example: Constraint(words=["Mac OS X*"]) matches the word "Mac" if the word occurs in a Chunk("Mac OS X 10.5"). """ # If the constraint has a custom function it must return True. if self.custom is not None and self.custom(word) is False: return False # If the constraint can only match the first word, Word.index must be 0. if self.first and word.index > 0: return False # If the constraint defines excluded options, Word can not match any of these. if self.exclude and self.exclude.match(word): return False # If the constraint defines allowed tags, Word.tag needs to match one of these. if self.tags: if find(lambda w: _match(word.tag, w), self.tags) is None: return False # If the constraint defines allowed chunks, Word.chunk.tag needs to match one of these. if self.chunks: ch = word.chunk and word.chunk.tag or None if find(lambda w: _match(ch, w), self.chunks) is None: return False # If the constraint defines allowed role, Word.chunk.tag needs to match one of these. if self.roles: R = word.chunk and [r2 for r1, r2 in word.chunk.relations] or [] if find(lambda w: w in R, self.roles) is None: return False # If the constraint defines allowed words, # Word.string.lower() OR Word.lemma needs to match one of these. b = True # b==True when word in constraint (or Constraints.words=[]). if len(self.words) + len(self.taxa) > 0: s1 = word.string.lower() s2 = word.lemma b = False for w in itertools.chain(self.words, self.taxa): # If the constraint has a word with spaces (e.g., a proper noun), # compare it to the entire chunk. try: if " " in w and (s1 in w or s2 and s2 in w or "*" in w): s1 = word.chunk and word.chunk.string.lower() or s1 s2 = word.chunk and " ".join([x or "" for x in word.chunk.lemmata]) or s2 except: s1 = s1 s2 = None # Compare the word to the allowed words (which can contain wildcards). if _match(s1, w): b=True; break # Compare the word lemma to the allowed words, e.g., # if "was" is not in the constraint, perhaps "be" is, which is a good match. if s2 and _match(s2, w): b=True; break # If the constraint defines allowed taxonomy terms, # and the given word did not match an allowed word, traverse the taxonomy. # The search goes up from the given word to its parents in the taxonomy. # This is faster than traversing all the children of terms in Constraint.taxa. # The drawback is that: # 1) Wildcards in the taxonomy are not detected (use classifiers instead), # 2) Classifier.children() has no effect, only Classifier.parent(). if self.taxa and (not self.words or (self.words and not b)): for s in ( word.string, # "ants" word.lemma, # "ant" word.chunk and word.chunk.string or None, # "army ants" word.chunk and " ".join([x or "" for x in word.chunk.lemmata]) or None): # "army ant" if s is not None: if self.taxonomy.case_sensitive is False: s = s.lower() # Compare ancestors of the word to each term in Constraint.taxa. for p in self.taxonomy.parents(s, recursive=True): if find(lambda s: p==s, self.taxa): # No wildcards. return True return b
python
def match(self, word): """ Return True if the given Word is part of the constraint: - the word (or lemma) occurs in Constraint.words, OR - the word (or lemma) occurs in Constraint.taxa taxonomy tree, AND - the word and/or chunk tags match those defined in the constraint. Individual terms in Constraint.words or the taxonomy can contain wildcards (*). Some part-of-speech-tags can also contain wildcards: NN*, VB*, JJ*, RB* If the given word contains spaces (e.g., proper noun), the entire chunk will also be compared. For example: Constraint(words=["Mac OS X*"]) matches the word "Mac" if the word occurs in a Chunk("Mac OS X 10.5"). """ # If the constraint has a custom function it must return True. if self.custom is not None and self.custom(word) is False: return False # If the constraint can only match the first word, Word.index must be 0. if self.first and word.index > 0: return False # If the constraint defines excluded options, Word can not match any of these. if self.exclude and self.exclude.match(word): return False # If the constraint defines allowed tags, Word.tag needs to match one of these. if self.tags: if find(lambda w: _match(word.tag, w), self.tags) is None: return False # If the constraint defines allowed chunks, Word.chunk.tag needs to match one of these. if self.chunks: ch = word.chunk and word.chunk.tag or None if find(lambda w: _match(ch, w), self.chunks) is None: return False # If the constraint defines allowed role, Word.chunk.tag needs to match one of these. if self.roles: R = word.chunk and [r2 for r1, r2 in word.chunk.relations] or [] if find(lambda w: w in R, self.roles) is None: return False # If the constraint defines allowed words, # Word.string.lower() OR Word.lemma needs to match one of these. b = True # b==True when word in constraint (or Constraints.words=[]). if len(self.words) + len(self.taxa) > 0: s1 = word.string.lower() s2 = word.lemma b = False for w in itertools.chain(self.words, self.taxa): # If the constraint has a word with spaces (e.g., a proper noun), # compare it to the entire chunk. try: if " " in w and (s1 in w or s2 and s2 in w or "*" in w): s1 = word.chunk and word.chunk.string.lower() or s1 s2 = word.chunk and " ".join([x or "" for x in word.chunk.lemmata]) or s2 except: s1 = s1 s2 = None # Compare the word to the allowed words (which can contain wildcards). if _match(s1, w): b=True; break # Compare the word lemma to the allowed words, e.g., # if "was" is not in the constraint, perhaps "be" is, which is a good match. if s2 and _match(s2, w): b=True; break # If the constraint defines allowed taxonomy terms, # and the given word did not match an allowed word, traverse the taxonomy. # The search goes up from the given word to its parents in the taxonomy. # This is faster than traversing all the children of terms in Constraint.taxa. # The drawback is that: # 1) Wildcards in the taxonomy are not detected (use classifiers instead), # 2) Classifier.children() has no effect, only Classifier.parent(). if self.taxa and (not self.words or (self.words and not b)): for s in ( word.string, # "ants" word.lemma, # "ant" word.chunk and word.chunk.string or None, # "army ants" word.chunk and " ".join([x or "" for x in word.chunk.lemmata]) or None): # "army ant" if s is not None: if self.taxonomy.case_sensitive is False: s = s.lower() # Compare ancestors of the word to each term in Constraint.taxa. for p in self.taxonomy.parents(s, recursive=True): if find(lambda s: p==s, self.taxa): # No wildcards. return True return b
[ "def", "match", "(", "self", ",", "word", ")", ":", "# If the constraint has a custom function it must return True.", "if", "self", ".", "custom", "is", "not", "None", "and", "self", ".", "custom", "(", "word", ")", "is", "False", ":", "return", "False", "# If the constraint can only match the first word, Word.index must be 0.", "if", "self", ".", "first", "and", "word", ".", "index", ">", "0", ":", "return", "False", "# If the constraint defines excluded options, Word can not match any of these.", "if", "self", ".", "exclude", "and", "self", ".", "exclude", ".", "match", "(", "word", ")", ":", "return", "False", "# If the constraint defines allowed tags, Word.tag needs to match one of these.", "if", "self", ".", "tags", ":", "if", "find", "(", "lambda", "w", ":", "_match", "(", "word", ".", "tag", ",", "w", ")", ",", "self", ".", "tags", ")", "is", "None", ":", "return", "False", "# If the constraint defines allowed chunks, Word.chunk.tag needs to match one of these.", "if", "self", ".", "chunks", ":", "ch", "=", "word", ".", "chunk", "and", "word", ".", "chunk", ".", "tag", "or", "None", "if", "find", "(", "lambda", "w", ":", "_match", "(", "ch", ",", "w", ")", ",", "self", ".", "chunks", ")", "is", "None", ":", "return", "False", "# If the constraint defines allowed role, Word.chunk.tag needs to match one of these.", "if", "self", ".", "roles", ":", "R", "=", "word", ".", "chunk", "and", "[", "r2", "for", "r1", ",", "r2", "in", "word", ".", "chunk", ".", "relations", "]", "or", "[", "]", "if", "find", "(", "lambda", "w", ":", "w", "in", "R", ",", "self", ".", "roles", ")", "is", "None", ":", "return", "False", "# If the constraint defines allowed words,", "# Word.string.lower() OR Word.lemma needs to match one of these.", "b", "=", "True", "# b==True when word in constraint (or Constraints.words=[]).", "if", "len", "(", "self", ".", "words", ")", "+", "len", "(", "self", ".", "taxa", ")", ">", "0", ":", "s1", "=", "word", ".", "string", ".", "lower", "(", ")", "s2", "=", "word", ".", "lemma", "b", "=", "False", "for", "w", "in", "itertools", ".", "chain", "(", "self", ".", "words", ",", "self", ".", "taxa", ")", ":", "# If the constraint has a word with spaces (e.g., a proper noun),", "# compare it to the entire chunk.", "try", ":", "if", "\" \"", "in", "w", "and", "(", "s1", "in", "w", "or", "s2", "and", "s2", "in", "w", "or", "\"*\"", "in", "w", ")", ":", "s1", "=", "word", ".", "chunk", "and", "word", ".", "chunk", ".", "string", ".", "lower", "(", ")", "or", "s1", "s2", "=", "word", ".", "chunk", "and", "\" \"", ".", "join", "(", "[", "x", "or", "\"\"", "for", "x", "in", "word", ".", "chunk", ".", "lemmata", "]", ")", "or", "s2", "except", ":", "s1", "=", "s1", "s2", "=", "None", "# Compare the word to the allowed words (which can contain wildcards).", "if", "_match", "(", "s1", ",", "w", ")", ":", "b", "=", "True", "break", "# Compare the word lemma to the allowed words, e.g.,", "# if \"was\" is not in the constraint, perhaps \"be\" is, which is a good match.", "if", "s2", "and", "_match", "(", "s2", ",", "w", ")", ":", "b", "=", "True", "break", "# If the constraint defines allowed taxonomy terms,", "# and the given word did not match an allowed word, traverse the taxonomy.", "# The search goes up from the given word to its parents in the taxonomy.", "# This is faster than traversing all the children of terms in Constraint.taxa.", "# The drawback is that:", "# 1) Wildcards in the taxonomy are not detected (use classifiers instead),", "# 2) Classifier.children() has no effect, only Classifier.parent().", "if", "self", ".", "taxa", "and", "(", "not", "self", ".", "words", "or", "(", "self", ".", "words", "and", "not", "b", ")", ")", ":", "for", "s", "in", "(", "word", ".", "string", ",", "# \"ants\"", "word", ".", "lemma", ",", "# \"ant\"", "word", ".", "chunk", "and", "word", ".", "chunk", ".", "string", "or", "None", ",", "# \"army ants\"", "word", ".", "chunk", "and", "\" \"", ".", "join", "(", "[", "x", "or", "\"\"", "for", "x", "in", "word", ".", "chunk", ".", "lemmata", "]", ")", "or", "None", ")", ":", "# \"army ant\"", "if", "s", "is", "not", "None", ":", "if", "self", ".", "taxonomy", ".", "case_sensitive", "is", "False", ":", "s", "=", "s", ".", "lower", "(", ")", "# Compare ancestors of the word to each term in Constraint.taxa.", "for", "p", "in", "self", ".", "taxonomy", ".", "parents", "(", "s", ",", "recursive", "=", "True", ")", ":", "if", "find", "(", "lambda", "s", ":", "p", "==", "s", ",", "self", ".", "taxa", ")", ":", "# No wildcards.", "return", "True", "return", "b" ]
Return True if the given Word is part of the constraint: - the word (or lemma) occurs in Constraint.words, OR - the word (or lemma) occurs in Constraint.taxa taxonomy tree, AND - the word and/or chunk tags match those defined in the constraint. Individual terms in Constraint.words or the taxonomy can contain wildcards (*). Some part-of-speech-tags can also contain wildcards: NN*, VB*, JJ*, RB* If the given word contains spaces (e.g., proper noun), the entire chunk will also be compared. For example: Constraint(words=["Mac OS X*"]) matches the word "Mac" if the word occurs in a Chunk("Mac OS X 10.5").
[ "Return", "True", "if", "the", "given", "Word", "is", "part", "of", "the", "constraint", ":", "-", "the", "word", "(", "or", "lemma", ")", "occurs", "in", "Constraint", ".", "words", "OR", "-", "the", "word", "(", "or", "lemma", ")", "occurs", "in", "Constraint", ".", "taxa", "taxonomy", "tree", "AND", "-", "the", "word", "and", "/", "or", "chunk", "tags", "match", "those", "defined", "in", "the", "constraint", ".", "Individual", "terms", "in", "Constraint", ".", "words", "or", "the", "taxonomy", "can", "contain", "wildcards", "(", "*", ")", ".", "Some", "part", "-", "of", "-", "speech", "-", "tags", "can", "also", "contain", "wildcards", ":", "NN", "*", "VB", "*", "JJ", "*", "RB", "*", "If", "the", "given", "word", "contains", "spaces", "(", "e", ".", "g", ".", "proper", "noun", ")", "the", "entire", "chunk", "will", "also", "be", "compared", ".", "For", "example", ":", "Constraint", "(", "words", "=", "[", "Mac", "OS", "X", "*", "]", ")", "matches", "the", "word", "Mac", "if", "the", "word", "occurs", "in", "a", "Chunk", "(", "Mac", "OS", "X", "10", ".", "5", ")", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L571-L650
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
Pattern.fromstring
def fromstring(cls, s, *args, **kwargs): """ Returns a new Pattern from the given string. Constraints are separated by a space. If a constraint contains a space, it must be wrapped in []. """ s = s.replace("\(", "&lparen;") s = s.replace("\)", "&rparen;") s = s.replace("\[", "&lbrack;") s = s.replace("\]", "&rbrack;") s = s.replace("\{", "&lcurly;") s = s.replace("\}", "&rcurly;") p = [] i = 0 for m in re.finditer(r"\[.*?\]|\(.*?\)", s): # Spaces in a range encapsulated in square brackets are encoded. # "[Windows Vista]" is one range, don't split on space. p.append(s[i:m.start()]) p.append(s[m.start():m.end()].replace(" ", "&space;")); i=m.end() p.append(s[i:]) s = "".join(p) s = s.replace("][", "] [") s = s.replace(")(", ") (") s = s.replace("\|", "&vdash;") s = re.sub(r"\s+\|\s+", "|", s) s = re.sub(r"\s+", " ", s) s = re.sub(r"\{\s+", "{", s) s = re.sub(r"\s+\}", "}", s) s = s.split(" ") s = [v.replace("&space;"," ") for v in s] P = cls([], *args, **kwargs) G, O, i = [], [], 0 for s in s: constraint = Constraint.fromstring(s.strip("{}"), taxonomy=kwargs.get("taxonomy", TAXONOMY)) constraint.index = len(P.sequence) P.sequence.append(constraint) # Push a new group on the stack if string starts with "{". # Parse constraint from string, add it to all open groups. # Pop latest group from stack if string ends with "}". # Insert groups in opened-first order (i). while s.startswith("{"): s = s[1:] G.append((i, [])); i+=1 O.append([]) for g in G: g[1].append(constraint) while s.endswith("}"): s = s[:-1] if G: O[G[-1][0]] = G[-1][1]; G.pop() P.groups = [g for g in O if g] return P
python
def fromstring(cls, s, *args, **kwargs): """ Returns a new Pattern from the given string. Constraints are separated by a space. If a constraint contains a space, it must be wrapped in []. """ s = s.replace("\(", "&lparen;") s = s.replace("\)", "&rparen;") s = s.replace("\[", "&lbrack;") s = s.replace("\]", "&rbrack;") s = s.replace("\{", "&lcurly;") s = s.replace("\}", "&rcurly;") p = [] i = 0 for m in re.finditer(r"\[.*?\]|\(.*?\)", s): # Spaces in a range encapsulated in square brackets are encoded. # "[Windows Vista]" is one range, don't split on space. p.append(s[i:m.start()]) p.append(s[m.start():m.end()].replace(" ", "&space;")); i=m.end() p.append(s[i:]) s = "".join(p) s = s.replace("][", "] [") s = s.replace(")(", ") (") s = s.replace("\|", "&vdash;") s = re.sub(r"\s+\|\s+", "|", s) s = re.sub(r"\s+", " ", s) s = re.sub(r"\{\s+", "{", s) s = re.sub(r"\s+\}", "}", s) s = s.split(" ") s = [v.replace("&space;"," ") for v in s] P = cls([], *args, **kwargs) G, O, i = [], [], 0 for s in s: constraint = Constraint.fromstring(s.strip("{}"), taxonomy=kwargs.get("taxonomy", TAXONOMY)) constraint.index = len(P.sequence) P.sequence.append(constraint) # Push a new group on the stack if string starts with "{". # Parse constraint from string, add it to all open groups. # Pop latest group from stack if string ends with "}". # Insert groups in opened-first order (i). while s.startswith("{"): s = s[1:] G.append((i, [])); i+=1 O.append([]) for g in G: g[1].append(constraint) while s.endswith("}"): s = s[:-1] if G: O[G[-1][0]] = G[-1][1]; G.pop() P.groups = [g for g in O if g] return P
[ "def", "fromstring", "(", "cls", ",", "s", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "s", "=", "s", ".", "replace", "(", "\"\\(\"", ",", "\"&lparen;\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\)\"", ",", "\"&rparen;\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\[\"", ",", "\"&lbrack;\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\]\"", ",", "\"&rbrack;\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\{\"", ",", "\"&lcurly;\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\}\"", ",", "\"&rcurly;\"", ")", "p", "=", "[", "]", "i", "=", "0", "for", "m", "in", "re", ".", "finditer", "(", "r\"\\[.*?\\]|\\(.*?\\)\"", ",", "s", ")", ":", "# Spaces in a range encapsulated in square brackets are encoded.", "# \"[Windows Vista]\" is one range, don't split on space.", "p", ".", "append", "(", "s", "[", "i", ":", "m", ".", "start", "(", ")", "]", ")", "p", ".", "append", "(", "s", "[", "m", ".", "start", "(", ")", ":", "m", ".", "end", "(", ")", "]", ".", "replace", "(", "\" \"", ",", "\"&space;\"", ")", ")", "i", "=", "m", ".", "end", "(", ")", "p", ".", "append", "(", "s", "[", "i", ":", "]", ")", "s", "=", "\"\"", ".", "join", "(", "p", ")", "s", "=", "s", ".", "replace", "(", "\"][\"", ",", "\"] [\"", ")", "s", "=", "s", ".", "replace", "(", "\")(\"", ",", "\") (\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\|\"", ",", "\"&vdash;\"", ")", "s", "=", "re", ".", "sub", "(", "r\"\\s+\\|\\s+\"", ",", "\"|\"", ",", "s", ")", "s", "=", "re", ".", "sub", "(", "r\"\\s+\"", ",", "\" \"", ",", "s", ")", "s", "=", "re", ".", "sub", "(", "r\"\\{\\s+\"", ",", "\"{\"", ",", "s", ")", "s", "=", "re", ".", "sub", "(", "r\"\\s+\\}\"", ",", "\"}\"", ",", "s", ")", "s", "=", "s", ".", "split", "(", "\" \"", ")", "s", "=", "[", "v", ".", "replace", "(", "\"&space;\"", ",", "\" \"", ")", "for", "v", "in", "s", "]", "P", "=", "cls", "(", "[", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")", "G", ",", "O", ",", "i", "=", "[", "]", ",", "[", "]", ",", "0", "for", "s", "in", "s", ":", "constraint", "=", "Constraint", ".", "fromstring", "(", "s", ".", "strip", "(", "\"{}\"", ")", ",", "taxonomy", "=", "kwargs", ".", "get", "(", "\"taxonomy\"", ",", "TAXONOMY", ")", ")", "constraint", ".", "index", "=", "len", "(", "P", ".", "sequence", ")", "P", ".", "sequence", ".", "append", "(", "constraint", ")", "# Push a new group on the stack if string starts with \"{\".", "# Parse constraint from string, add it to all open groups.", "# Pop latest group from stack if string ends with \"}\".", "# Insert groups in opened-first order (i).", "while", "s", ".", "startswith", "(", "\"{\"", ")", ":", "s", "=", "s", "[", "1", ":", "]", "G", ".", "append", "(", "(", "i", ",", "[", "]", ")", ")", "i", "+=", "1", "O", ".", "append", "(", "[", "]", ")", "for", "g", "in", "G", ":", "g", "[", "1", "]", ".", "append", "(", "constraint", ")", "while", "s", ".", "endswith", "(", "\"}\"", ")", ":", "s", "=", "s", "[", ":", "-", "1", "]", "if", "G", ":", "O", "[", "G", "[", "-", "1", "]", "[", "0", "]", "]", "=", "G", "[", "-", "1", "]", "[", "1", "]", "G", ".", "pop", "(", ")", "P", ".", "groups", "=", "[", "g", "for", "g", "in", "O", "if", "g", "]", "return", "P" ]
Returns a new Pattern from the given string. Constraints are separated by a space. If a constraint contains a space, it must be wrapped in [].
[ "Returns", "a", "new", "Pattern", "from", "the", "given", "string", ".", "Constraints", "are", "separated", "by", "a", "space", ".", "If", "a", "constraint", "contains", "a", "space", "it", "must", "be", "wrapped", "in", "[]", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L718-L767
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
Pattern.scan
def scan(self, string): """ Returns True if search(Sentence(string)) may yield matches. If is often faster to scan prior to creating a Sentence and searching it. """ # In the following example, first scan the string for "good" and "bad": # p = Pattern.fromstring("good|bad NN") # for s in open("parsed.txt"): # if p.scan(s): # s = Sentence(s) # m = p.search(s) # if m: # print(m) w = (constraint.words for constraint in self.sequence if not constraint.optional) w = itertools.chain(*w) w = [w.strip(WILDCARD) for w in w if WILDCARD not in w[1:-1]] if w and not any(w in string.lower() for w in w): return False return True
python
def scan(self, string): """ Returns True if search(Sentence(string)) may yield matches. If is often faster to scan prior to creating a Sentence and searching it. """ # In the following example, first scan the string for "good" and "bad": # p = Pattern.fromstring("good|bad NN") # for s in open("parsed.txt"): # if p.scan(s): # s = Sentence(s) # m = p.search(s) # if m: # print(m) w = (constraint.words for constraint in self.sequence if not constraint.optional) w = itertools.chain(*w) w = [w.strip(WILDCARD) for w in w if WILDCARD not in w[1:-1]] if w and not any(w in string.lower() for w in w): return False return True
[ "def", "scan", "(", "self", ",", "string", ")", ":", "# In the following example, first scan the string for \"good\" and \"bad\":", "# p = Pattern.fromstring(\"good|bad NN\")", "# for s in open(\"parsed.txt\"):", "# if p.scan(s):", "# s = Sentence(s)", "# m = p.search(s)", "# if m:", "# print(m)", "w", "=", "(", "constraint", ".", "words", "for", "constraint", "in", "self", ".", "sequence", "if", "not", "constraint", ".", "optional", ")", "w", "=", "itertools", ".", "chain", "(", "*", "w", ")", "w", "=", "[", "w", ".", "strip", "(", "WILDCARD", ")", "for", "w", "in", "w", "if", "WILDCARD", "not", "in", "w", "[", "1", ":", "-", "1", "]", "]", "if", "w", "and", "not", "any", "(", "w", "in", "string", ".", "lower", "(", ")", "for", "w", "in", "w", ")", ":", "return", "False", "return", "True" ]
Returns True if search(Sentence(string)) may yield matches. If is often faster to scan prior to creating a Sentence and searching it.
[ "Returns", "True", "if", "search", "(", "Sentence", "(", "string", "))", "may", "yield", "matches", ".", "If", "is", "often", "faster", "to", "scan", "prior", "to", "creating", "a", "Sentence", "and", "searching", "it", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L769-L786
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
Pattern.search
def search(self, sentence): """ Returns a list of all matches found in the given sentence. """ if sentence.__class__.__name__ == "Sentence": pass elif isinstance(sentence, list) or sentence.__class__.__name__ == "Text": a=[]; [a.extend(self.search(s)) for s in sentence]; return a elif isinstance(sentence, basestring): sentence = Sentence(sentence) elif isinstance(sentence, Match) and len(sentence) > 0: sentence = sentence[0].sentence.slice(sentence[0].index, sentence[-1].index + 1) a = [] v = self._variations() u = {} m = self.match(sentence, _v=v) while m: a.append(m) m = self.match(sentence, start=m.words[-1].index+1, _v=v, _u=u) return a
python
def search(self, sentence): """ Returns a list of all matches found in the given sentence. """ if sentence.__class__.__name__ == "Sentence": pass elif isinstance(sentence, list) or sentence.__class__.__name__ == "Text": a=[]; [a.extend(self.search(s)) for s in sentence]; return a elif isinstance(sentence, basestring): sentence = Sentence(sentence) elif isinstance(sentence, Match) and len(sentence) > 0: sentence = sentence[0].sentence.slice(sentence[0].index, sentence[-1].index + 1) a = [] v = self._variations() u = {} m = self.match(sentence, _v=v) while m: a.append(m) m = self.match(sentence, start=m.words[-1].index+1, _v=v, _u=u) return a
[ "def", "search", "(", "self", ",", "sentence", ")", ":", "if", "sentence", ".", "__class__", ".", "__name__", "==", "\"Sentence\"", ":", "pass", "elif", "isinstance", "(", "sentence", ",", "list", ")", "or", "sentence", ".", "__class__", ".", "__name__", "==", "\"Text\"", ":", "a", "=", "[", "]", "[", "a", ".", "extend", "(", "self", ".", "search", "(", "s", ")", ")", "for", "s", "in", "sentence", "]", "return", "a", "elif", "isinstance", "(", "sentence", ",", "basestring", ")", ":", "sentence", "=", "Sentence", "(", "sentence", ")", "elif", "isinstance", "(", "sentence", ",", "Match", ")", "and", "len", "(", "sentence", ")", ">", "0", ":", "sentence", "=", "sentence", "[", "0", "]", ".", "sentence", ".", "slice", "(", "sentence", "[", "0", "]", ".", "index", ",", "sentence", "[", "-", "1", "]", ".", "index", "+", "1", ")", "a", "=", "[", "]", "v", "=", "self", ".", "_variations", "(", ")", "u", "=", "{", "}", "m", "=", "self", ".", "match", "(", "sentence", ",", "_v", "=", "v", ")", "while", "m", ":", "a", ".", "append", "(", "m", ")", "m", "=", "self", ".", "match", "(", "sentence", ",", "start", "=", "m", ".", "words", "[", "-", "1", "]", ".", "index", "+", "1", ",", "_v", "=", "v", ",", "_u", "=", "u", ")", "return", "a" ]
Returns a list of all matches found in the given sentence.
[ "Returns", "a", "list", "of", "all", "matches", "found", "in", "the", "given", "sentence", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L788-L806
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
Pattern.match
def match(self, sentence, start=0, _v=None, _u=None): """ Returns the first match found in the given sentence, or None. """ if sentence.__class__.__name__ == "Sentence": pass elif isinstance(sentence, list) or sentence.__class__.__name__ == "Text": return find(lambda m,s: m is not None, ((self.match(s, start, _v), s) for s in sentence))[0] elif isinstance(sentence, basestring): sentence = Sentence(sentence) elif isinstance(sentence, Match) and len(sentence) > 0: sentence = sentence[0].sentence.slice(sentence[0].index, sentence[-1].index + 1) # Variations (_v) further down the list may match words more to the front. # We need to check all of them. Unmatched variations are blacklisted (_u). # Pattern.search() calls Pattern.match() with a persistent blacklist (1.5x faster). a = [] for sequence in (_v is not None and _v or self._variations()): if _u is not None and id(sequence) in _u: continue m = self._match(sequence, sentence, start) if m is not None: a.append((m.words[0].index, len(m.words), m)) if m is not None and m.words[0].index == start: return m if m is None and _u is not None: _u[id(sequence)] = False # Return the leftmost-longest. if len(a) > 0: return sorted(a)[0][-1]
python
def match(self, sentence, start=0, _v=None, _u=None): """ Returns the first match found in the given sentence, or None. """ if sentence.__class__.__name__ == "Sentence": pass elif isinstance(sentence, list) or sentence.__class__.__name__ == "Text": return find(lambda m,s: m is not None, ((self.match(s, start, _v), s) for s in sentence))[0] elif isinstance(sentence, basestring): sentence = Sentence(sentence) elif isinstance(sentence, Match) and len(sentence) > 0: sentence = sentence[0].sentence.slice(sentence[0].index, sentence[-1].index + 1) # Variations (_v) further down the list may match words more to the front. # We need to check all of them. Unmatched variations are blacklisted (_u). # Pattern.search() calls Pattern.match() with a persistent blacklist (1.5x faster). a = [] for sequence in (_v is not None and _v or self._variations()): if _u is not None and id(sequence) in _u: continue m = self._match(sequence, sentence, start) if m is not None: a.append((m.words[0].index, len(m.words), m)) if m is not None and m.words[0].index == start: return m if m is None and _u is not None: _u[id(sequence)] = False # Return the leftmost-longest. if len(a) > 0: return sorted(a)[0][-1]
[ "def", "match", "(", "self", ",", "sentence", ",", "start", "=", "0", ",", "_v", "=", "None", ",", "_u", "=", "None", ")", ":", "if", "sentence", ".", "__class__", ".", "__name__", "==", "\"Sentence\"", ":", "pass", "elif", "isinstance", "(", "sentence", ",", "list", ")", "or", "sentence", ".", "__class__", ".", "__name__", "==", "\"Text\"", ":", "return", "find", "(", "lambda", "m", ",", "s", ":", "m", "is", "not", "None", ",", "(", "(", "self", ".", "match", "(", "s", ",", "start", ",", "_v", ")", ",", "s", ")", "for", "s", "in", "sentence", ")", ")", "[", "0", "]", "elif", "isinstance", "(", "sentence", ",", "basestring", ")", ":", "sentence", "=", "Sentence", "(", "sentence", ")", "elif", "isinstance", "(", "sentence", ",", "Match", ")", "and", "len", "(", "sentence", ")", ">", "0", ":", "sentence", "=", "sentence", "[", "0", "]", ".", "sentence", ".", "slice", "(", "sentence", "[", "0", "]", ".", "index", ",", "sentence", "[", "-", "1", "]", ".", "index", "+", "1", ")", "# Variations (_v) further down the list may match words more to the front.", "# We need to check all of them. Unmatched variations are blacklisted (_u).", "# Pattern.search() calls Pattern.match() with a persistent blacklist (1.5x faster).", "a", "=", "[", "]", "for", "sequence", "in", "(", "_v", "is", "not", "None", "and", "_v", "or", "self", ".", "_variations", "(", ")", ")", ":", "if", "_u", "is", "not", "None", "and", "id", "(", "sequence", ")", "in", "_u", ":", "continue", "m", "=", "self", ".", "_match", "(", "sequence", ",", "sentence", ",", "start", ")", "if", "m", "is", "not", "None", ":", "a", ".", "append", "(", "(", "m", ".", "words", "[", "0", "]", ".", "index", ",", "len", "(", "m", ".", "words", ")", ",", "m", ")", ")", "if", "m", "is", "not", "None", "and", "m", ".", "words", "[", "0", "]", ".", "index", "==", "start", ":", "return", "m", "if", "m", "is", "None", "and", "_u", "is", "not", "None", ":", "_u", "[", "id", "(", "sequence", ")", "]", "=", "False", "# Return the leftmost-longest.", "if", "len", "(", "a", ")", ">", "0", ":", "return", "sorted", "(", "a", ")", "[", "0", "]", "[", "-", "1", "]" ]
Returns the first match found in the given sentence, or None.
[ "Returns", "the", "first", "match", "found", "in", "the", "given", "sentence", "or", "None", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L808-L835
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
Match.constraint
def constraint(self, word): """ Returns the constraint that matches the given Word, or None. """ if word.index in self._map1: return self._map1[word.index]
python
def constraint(self, word): """ Returns the constraint that matches the given Word, or None. """ if word.index in self._map1: return self._map1[word.index]
[ "def", "constraint", "(", "self", ",", "word", ")", ":", "if", "word", ".", "index", "in", "self", ".", "_map1", ":", "return", "self", ".", "_map1", "[", "word", ".", "index", "]" ]
Returns the constraint that matches the given Word, or None.
[ "Returns", "the", "constraint", "that", "matches", "the", "given", "Word", "or", "None", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L1005-L1009
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
Match.constraints
def constraints(self, chunk): """ Returns a list of constraints that match the given Chunk. """ a = [self._map1[w.index] for w in chunk.words if w.index in self._map1] b = []; [b.append(constraint) for constraint in a if constraint not in b] return b
python
def constraints(self, chunk): """ Returns a list of constraints that match the given Chunk. """ a = [self._map1[w.index] for w in chunk.words if w.index in self._map1] b = []; [b.append(constraint) for constraint in a if constraint not in b] return b
[ "def", "constraints", "(", "self", ",", "chunk", ")", ":", "a", "=", "[", "self", ".", "_map1", "[", "w", ".", "index", "]", "for", "w", "in", "chunk", ".", "words", "if", "w", ".", "index", "in", "self", ".", "_map1", "]", "b", "=", "[", "]", "[", "b", ".", "append", "(", "constraint", ")", "for", "constraint", "in", "a", "if", "constraint", "not", "in", "b", "]", "return", "b" ]
Returns a list of constraints that match the given Chunk.
[ "Returns", "a", "list", "of", "constraints", "that", "match", "the", "given", "Chunk", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L1011-L1016
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
Match.constituents
def constituents(self, constraint=None): """ Returns a list of Word and Chunk objects, where words have been grouped into their chunks whenever possible. Optionally, returns only chunks/words that match given constraint(s), or constraint index. """ # Select only words that match the given constraint. # Note: this will only work with constraints from Match.pattern.sequence. W = self.words n = len(self.pattern.sequence) if isinstance(constraint, (int, Constraint)): if isinstance(constraint, int): i = constraint i = i<0 and i%n or i else: i = self.pattern.sequence.index(constraint) W = self._map2.get(i,[]) W = [self.words[i-self.words[0].index] for i in W] if isinstance(constraint, (list, tuple)): W = []; [W.extend(self._map2.get(j<0 and j%n or j,[])) for j in constraint] W = [self.words[i-self.words[0].index] for i in W] W = unique(W) a = [] i = 0 while i < len(W): w = W[i] if w.chunk and W[i:i+len(w.chunk)] == w.chunk.words: i += len(w.chunk) - 1 a.append(w.chunk) else: a.append(w) i += 1 return a
python
def constituents(self, constraint=None): """ Returns a list of Word and Chunk objects, where words have been grouped into their chunks whenever possible. Optionally, returns only chunks/words that match given constraint(s), or constraint index. """ # Select only words that match the given constraint. # Note: this will only work with constraints from Match.pattern.sequence. W = self.words n = len(self.pattern.sequence) if isinstance(constraint, (int, Constraint)): if isinstance(constraint, int): i = constraint i = i<0 and i%n or i else: i = self.pattern.sequence.index(constraint) W = self._map2.get(i,[]) W = [self.words[i-self.words[0].index] for i in W] if isinstance(constraint, (list, tuple)): W = []; [W.extend(self._map2.get(j<0 and j%n or j,[])) for j in constraint] W = [self.words[i-self.words[0].index] for i in W] W = unique(W) a = [] i = 0 while i < len(W): w = W[i] if w.chunk and W[i:i+len(w.chunk)] == w.chunk.words: i += len(w.chunk) - 1 a.append(w.chunk) else: a.append(w) i += 1 return a
[ "def", "constituents", "(", "self", ",", "constraint", "=", "None", ")", ":", "# Select only words that match the given constraint.", "# Note: this will only work with constraints from Match.pattern.sequence.", "W", "=", "self", ".", "words", "n", "=", "len", "(", "self", ".", "pattern", ".", "sequence", ")", "if", "isinstance", "(", "constraint", ",", "(", "int", ",", "Constraint", ")", ")", ":", "if", "isinstance", "(", "constraint", ",", "int", ")", ":", "i", "=", "constraint", "i", "=", "i", "<", "0", "and", "i", "%", "n", "or", "i", "else", ":", "i", "=", "self", ".", "pattern", ".", "sequence", ".", "index", "(", "constraint", ")", "W", "=", "self", ".", "_map2", ".", "get", "(", "i", ",", "[", "]", ")", "W", "=", "[", "self", ".", "words", "[", "i", "-", "self", ".", "words", "[", "0", "]", ".", "index", "]", "for", "i", "in", "W", "]", "if", "isinstance", "(", "constraint", ",", "(", "list", ",", "tuple", ")", ")", ":", "W", "=", "[", "]", "[", "W", ".", "extend", "(", "self", ".", "_map2", ".", "get", "(", "j", "<", "0", "and", "j", "%", "n", "or", "j", ",", "[", "]", ")", ")", "for", "j", "in", "constraint", "]", "W", "=", "[", "self", ".", "words", "[", "i", "-", "self", ".", "words", "[", "0", "]", ".", "index", "]", "for", "i", "in", "W", "]", "W", "=", "unique", "(", "W", ")", "a", "=", "[", "]", "i", "=", "0", "while", "i", "<", "len", "(", "W", ")", ":", "w", "=", "W", "[", "i", "]", "if", "w", ".", "chunk", "and", "W", "[", "i", ":", "i", "+", "len", "(", "w", ".", "chunk", ")", "]", "==", "w", ".", "chunk", ".", "words", ":", "i", "+=", "len", "(", "w", ".", "chunk", ")", "-", "1", "a", ".", "append", "(", "w", ".", "chunk", ")", "else", ":", "a", ".", "append", "(", "w", ")", "i", "+=", "1", "return", "a" ]
Returns a list of Word and Chunk objects, where words have been grouped into their chunks whenever possible. Optionally, returns only chunks/words that match given constraint(s), or constraint index.
[ "Returns", "a", "list", "of", "Word", "and", "Chunk", "objects", "where", "words", "have", "been", "grouped", "into", "their", "chunks", "whenever", "possible", ".", "Optionally", "returns", "only", "chunks", "/", "words", "that", "match", "given", "constraint", "(", "s", ")", "or", "constraint", "index", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L1018-L1049
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
Match.group
def group(self, index, chunked=False): """ Returns a list of Word objects that match the given group. With chunked=True, returns a list of Word + Chunk objects - see Match.constituents(). A group consists of consecutive constraints wrapped in { }, e.g., search("{JJ JJ} NN", Sentence(parse("big black cat"))).group(1) => big black. """ if index < 0 or index > len(self.pattern.groups): raise IndexError("no such group") if index > 0 and index <= len(self.pattern.groups): g = self.pattern.groups[index-1] if index == 0: g = self.pattern.sequence if chunked is True: return Group(self, self.constituents(constraint=[self.pattern.sequence.index(x) for x in g])) return Group(self, [w for w in self.words if self.constraint(w) in g])
python
def group(self, index, chunked=False): """ Returns a list of Word objects that match the given group. With chunked=True, returns a list of Word + Chunk objects - see Match.constituents(). A group consists of consecutive constraints wrapped in { }, e.g., search("{JJ JJ} NN", Sentence(parse("big black cat"))).group(1) => big black. """ if index < 0 or index > len(self.pattern.groups): raise IndexError("no such group") if index > 0 and index <= len(self.pattern.groups): g = self.pattern.groups[index-1] if index == 0: g = self.pattern.sequence if chunked is True: return Group(self, self.constituents(constraint=[self.pattern.sequence.index(x) for x in g])) return Group(self, [w for w in self.words if self.constraint(w) in g])
[ "def", "group", "(", "self", ",", "index", ",", "chunked", "=", "False", ")", ":", "if", "index", "<", "0", "or", "index", ">", "len", "(", "self", ".", "pattern", ".", "groups", ")", ":", "raise", "IndexError", "(", "\"no such group\"", ")", "if", "index", ">", "0", "and", "index", "<=", "len", "(", "self", ".", "pattern", ".", "groups", ")", ":", "g", "=", "self", ".", "pattern", ".", "groups", "[", "index", "-", "1", "]", "if", "index", "==", "0", ":", "g", "=", "self", ".", "pattern", ".", "sequence", "if", "chunked", "is", "True", ":", "return", "Group", "(", "self", ",", "self", ".", "constituents", "(", "constraint", "=", "[", "self", ".", "pattern", ".", "sequence", ".", "index", "(", "x", ")", "for", "x", "in", "g", "]", ")", ")", "return", "Group", "(", "self", ",", "[", "w", "for", "w", "in", "self", ".", "words", "if", "self", ".", "constraint", "(", "w", ")", "in", "g", "]", ")" ]
Returns a list of Word objects that match the given group. With chunked=True, returns a list of Word + Chunk objects - see Match.constituents(). A group consists of consecutive constraints wrapped in { }, e.g., search("{JJ JJ} NN", Sentence(parse("big black cat"))).group(1) => big black.
[ "Returns", "a", "list", "of", "Word", "objects", "that", "match", "the", "given", "group", ".", "With", "chunked", "=", "True", "returns", "a", "list", "of", "Word", "+", "Chunk", "objects", "-", "see", "Match", ".", "constituents", "()", ".", "A", "group", "consists", "of", "consecutive", "constraints", "wrapped", "in", "{", "}", "e", ".", "g", ".", "search", "(", "{", "JJ", "JJ", "}", "NN", "Sentence", "(", "parse", "(", "big", "black", "cat", ")))", ".", "group", "(", "1", ")", "=", ">", "big", "black", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L1051-L1065
markuskiller/textblob-de
textblob_de/sentiments.py
PatternAnalyzer.analyze
def analyze(self, text): """Return the sentiment as a tuple of the form: ``(polarity, subjectivity)`` :param str text: A string. .. todo:: Figure out best format to be passed to the analyzer. There might be a better format than a string of space separated lemmas (e.g. with pos tags) but the parsing/tagging results look rather inaccurate and a wrong pos might prevent the lexicon lookup of an otherwise correctly lemmatized word form (or would it not?) - further checks needed. """ if self.lemmatize: text = self._lemmatize(text) return self.RETURN_TYPE(*pattern_sentiment(text))
python
def analyze(self, text): """Return the sentiment as a tuple of the form: ``(polarity, subjectivity)`` :param str text: A string. .. todo:: Figure out best format to be passed to the analyzer. There might be a better format than a string of space separated lemmas (e.g. with pos tags) but the parsing/tagging results look rather inaccurate and a wrong pos might prevent the lexicon lookup of an otherwise correctly lemmatized word form (or would it not?) - further checks needed. """ if self.lemmatize: text = self._lemmatize(text) return self.RETURN_TYPE(*pattern_sentiment(text))
[ "def", "analyze", "(", "self", ",", "text", ")", ":", "if", "self", ".", "lemmatize", ":", "text", "=", "self", ".", "_lemmatize", "(", "text", ")", "return", "self", ".", "RETURN_TYPE", "(", "*", "pattern_sentiment", "(", "text", ")", ")" ]
Return the sentiment as a tuple of the form: ``(polarity, subjectivity)`` :param str text: A string. .. todo:: Figure out best format to be passed to the analyzer. There might be a better format than a string of space separated lemmas (e.g. with pos tags) but the parsing/tagging results look rather inaccurate and a wrong pos might prevent the lexicon lookup of an otherwise correctly lemmatized word form (or would it not?) - further checks needed.
[ "Return", "the", "sentiment", "as", "a", "tuple", "of", "the", "form", ":", "(", "polarity", "subjectivity", ")" ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/sentiments.py#L124-L142
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/__init__.py
stts2universal
def stts2universal(token, tag): """ Converts an STTS tag to a universal tag. For example: ohne/APPR => ohne/PREP """ if tag in ("KON", "KOUI", "KOUS", "KOKOM"): return (token, CONJ) if tag in ("PTKZU", "PTKNEG", "PTKVZ", "PTKANT"): return (token, PRT) if tag in ("PDF", "PDAT", "PIS", "PIAT", "PIDAT", "PPER", "PPOS", "PPOSAT"): return (token, PRON) if tag in ("PRELS", "PRELAT", "PRF", "PWS", "PWAT", "PWAV", "PAV"): return (token, PRON) return penntreebank2universal(*stts2penntreebank(token, tag))
python
def stts2universal(token, tag): """ Converts an STTS tag to a universal tag. For example: ohne/APPR => ohne/PREP """ if tag in ("KON", "KOUI", "KOUS", "KOKOM"): return (token, CONJ) if tag in ("PTKZU", "PTKNEG", "PTKVZ", "PTKANT"): return (token, PRT) if tag in ("PDF", "PDAT", "PIS", "PIAT", "PIDAT", "PPER", "PPOS", "PPOSAT"): return (token, PRON) if tag in ("PRELS", "PRELAT", "PRF", "PWS", "PWAT", "PWAV", "PAV"): return (token, PRON) return penntreebank2universal(*stts2penntreebank(token, tag))
[ "def", "stts2universal", "(", "token", ",", "tag", ")", ":", "if", "tag", "in", "(", "\"KON\"", ",", "\"KOUI\"", ",", "\"KOUS\"", ",", "\"KOKOM\"", ")", ":", "return", "(", "token", ",", "CONJ", ")", "if", "tag", "in", "(", "\"PTKZU\"", ",", "\"PTKNEG\"", ",", "\"PTKVZ\"", ",", "\"PTKANT\"", ")", ":", "return", "(", "token", ",", "PRT", ")", "if", "tag", "in", "(", "\"PDF\"", ",", "\"PDAT\"", ",", "\"PIS\"", ",", "\"PIAT\"", ",", "\"PIDAT\"", ",", "\"PPER\"", ",", "\"PPOS\"", ",", "\"PPOSAT\"", ")", ":", "return", "(", "token", ",", "PRON", ")", "if", "tag", "in", "(", "\"PRELS\"", ",", "\"PRELAT\"", ",", "\"PRF\"", ",", "\"PWS\"", ",", "\"PWAT\"", ",", "\"PWAV\"", ",", "\"PAV\"", ")", ":", "return", "(", "token", ",", "PRON", ")", "return", "penntreebank2universal", "(", "*", "stts2penntreebank", "(", "token", ",", "tag", ")", ")" ]
Converts an STTS tag to a universal tag. For example: ohne/APPR => ohne/PREP
[ "Converts", "an", "STTS", "tag", "to", "a", "universal", "tag", ".", "For", "example", ":", "ohne", "/", "APPR", "=", ">", "ohne", "/", "PREP" ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/__init__.py#L160-L172
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/__init__.py
find_lemmata
def find_lemmata(tokens): """ Annotates the tokens with lemmata for plural nouns and conjugated verbs, where each token is a [word, part-of-speech] list. """ for token in tokens: word, pos, lemma = token[0], token[1], token[0] if pos.startswith(("DT", "JJ")): lemma = predicative(word) if pos == "NNS": lemma = singularize(word) if pos.startswith(("VB", "MD")): lemma = conjugate(word, INFINITIVE) or word token.append(lemma.lower()) return tokens
python
def find_lemmata(tokens): """ Annotates the tokens with lemmata for plural nouns and conjugated verbs, where each token is a [word, part-of-speech] list. """ for token in tokens: word, pos, lemma = token[0], token[1], token[0] if pos.startswith(("DT", "JJ")): lemma = predicative(word) if pos == "NNS": lemma = singularize(word) if pos.startswith(("VB", "MD")): lemma = conjugate(word, INFINITIVE) or word token.append(lemma.lower()) return tokens
[ "def", "find_lemmata", "(", "tokens", ")", ":", "for", "token", "in", "tokens", ":", "word", ",", "pos", ",", "lemma", "=", "token", "[", "0", "]", ",", "token", "[", "1", "]", ",", "token", "[", "0", "]", "if", "pos", ".", "startswith", "(", "(", "\"DT\"", ",", "\"JJ\"", ")", ")", ":", "lemma", "=", "predicative", "(", "word", ")", "if", "pos", "==", "\"NNS\"", ":", "lemma", "=", "singularize", "(", "word", ")", "if", "pos", ".", "startswith", "(", "(", "\"VB\"", ",", "\"MD\"", ")", ")", ":", "lemma", "=", "conjugate", "(", "word", ",", "INFINITIVE", ")", "or", "word", "token", ".", "append", "(", "lemma", ".", "lower", "(", ")", ")", "return", "tokens" ]
Annotates the tokens with lemmata for plural nouns and conjugated verbs, where each token is a [word, part-of-speech] list.
[ "Annotates", "the", "tokens", "with", "lemmata", "for", "plural", "nouns", "and", "conjugated", "verbs", "where", "each", "token", "is", "a", "[", "word", "part", "-", "of", "-", "speech", "]", "list", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/__init__.py#L186-L199
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/__init__.py
tree
def tree(s, token=[WORD, POS, CHUNK, PNP, REL, LEMMA]): """ Returns a parsed Text from the given parsed string. """ return Text(s, token)
python
def tree(s, token=[WORD, POS, CHUNK, PNP, REL, LEMMA]): """ Returns a parsed Text from the given parsed string. """ return Text(s, token)
[ "def", "tree", "(", "s", ",", "token", "=", "[", "WORD", ",", "POS", ",", "CHUNK", ",", "PNP", ",", "REL", ",", "LEMMA", "]", ")", ":", "return", "Text", "(", "s", ",", "token", ")" ]
Returns a parsed Text from the given parsed string.
[ "Returns", "a", "parsed", "Text", "from", "the", "given", "parsed", "string", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/__init__.py#L250-L253
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/__init__.py
tag
def tag(s, tokenize=True, encoding="utf-8", **kwargs): """ Returns a list of (token, tag)-tuples from the given string. """ tags = [] for sentence in parse(s, tokenize, True, False, False, False, encoding, **kwargs).split(): for token in sentence: tags.append((token[0], token[1])) return tags
python
def tag(s, tokenize=True, encoding="utf-8", **kwargs): """ Returns a list of (token, tag)-tuples from the given string. """ tags = [] for sentence in parse(s, tokenize, True, False, False, False, encoding, **kwargs).split(): for token in sentence: tags.append((token[0], token[1])) return tags
[ "def", "tag", "(", "s", ",", "tokenize", "=", "True", ",", "encoding", "=", "\"utf-8\"", ",", "*", "*", "kwargs", ")", ":", "tags", "=", "[", "]", "for", "sentence", "in", "parse", "(", "s", ",", "tokenize", ",", "True", ",", "False", ",", "False", ",", "False", ",", "encoding", ",", "*", "*", "kwargs", ")", ".", "split", "(", ")", ":", "for", "token", "in", "sentence", ":", "tags", ".", "append", "(", "(", "token", "[", "0", "]", ",", "token", "[", "1", "]", ")", ")", "return", "tags" ]
Returns a list of (token, tag)-tuples from the given string.
[ "Returns", "a", "list", "of", "(", "token", "tag", ")", "-", "tuples", "from", "the", "given", "string", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/__init__.py#L255-L262
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/__init__.py
keywords
def keywords(s, top=10, **kwargs): """ Returns a sorted list of keywords in the given string. """ return parser.find_keywords(s, top=top, frequency=parser.frequency)
python
def keywords(s, top=10, **kwargs): """ Returns a sorted list of keywords in the given string. """ return parser.find_keywords(s, top=top, frequency=parser.frequency)
[ "def", "keywords", "(", "s", ",", "top", "=", "10", ",", "*", "*", "kwargs", ")", ":", "return", "parser", ".", "find_keywords", "(", "s", ",", "top", "=", "top", ",", "frequency", "=", "parser", ".", "frequency", ")" ]
Returns a sorted list of keywords in the given string.
[ "Returns", "a", "sorted", "list", "of", "keywords", "in", "the", "given", "string", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/__init__.py#L264-L267
markuskiller/textblob-de
textblob_de/tokenizers.py
sent_tokenize
def sent_tokenize(text, tokenizer=None): """Convenience function for tokenizing sentences (not iterable). If tokenizer is not specified, the default tokenizer NLTKPunktTokenizer() is used (same behaviour as in the main `TextBlob`_ library). This function returns the sentences as a generator object. .. _TextBlob: http://textblob.readthedocs.org/ """ _tokenizer = tokenizer if tokenizer is not None else NLTKPunktTokenizer() return SentenceTokenizer(tokenizer=_tokenizer).itokenize(text)
python
def sent_tokenize(text, tokenizer=None): """Convenience function for tokenizing sentences (not iterable). If tokenizer is not specified, the default tokenizer NLTKPunktTokenizer() is used (same behaviour as in the main `TextBlob`_ library). This function returns the sentences as a generator object. .. _TextBlob: http://textblob.readthedocs.org/ """ _tokenizer = tokenizer if tokenizer is not None else NLTKPunktTokenizer() return SentenceTokenizer(tokenizer=_tokenizer).itokenize(text)
[ "def", "sent_tokenize", "(", "text", ",", "tokenizer", "=", "None", ")", ":", "_tokenizer", "=", "tokenizer", "if", "tokenizer", "is", "not", "None", "else", "NLTKPunktTokenizer", "(", ")", "return", "SentenceTokenizer", "(", "tokenizer", "=", "_tokenizer", ")", ".", "itokenize", "(", "text", ")" ]
Convenience function for tokenizing sentences (not iterable). If tokenizer is not specified, the default tokenizer NLTKPunktTokenizer() is used (same behaviour as in the main `TextBlob`_ library). This function returns the sentences as a generator object. .. _TextBlob: http://textblob.readthedocs.org/
[ "Convenience", "function", "for", "tokenizing", "sentences", "(", "not", "iterable", ")", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/tokenizers.py#L306-L318
markuskiller/textblob-de
textblob_de/tokenizers.py
word_tokenize
def word_tokenize(text, tokenizer=None, include_punc=True, *args, **kwargs): """Convenience function for tokenizing text into words. NOTE: NLTK's word tokenizer expects sentences as input, so the text will be tokenized to sentences before being tokenized to words. This function returns an itertools chain object (generator). """ _tokenizer = tokenizer if tokenizer is not None else NLTKPunktTokenizer() words = chain.from_iterable( WordTokenizer(tokenizer=_tokenizer).itokenize(sentence, include_punc, *args, **kwargs) for sentence in sent_tokenize(text, tokenizer=_tokenizer)) return words
python
def word_tokenize(text, tokenizer=None, include_punc=True, *args, **kwargs): """Convenience function for tokenizing text into words. NOTE: NLTK's word tokenizer expects sentences as input, so the text will be tokenized to sentences before being tokenized to words. This function returns an itertools chain object (generator). """ _tokenizer = tokenizer if tokenizer is not None else NLTKPunktTokenizer() words = chain.from_iterable( WordTokenizer(tokenizer=_tokenizer).itokenize(sentence, include_punc, *args, **kwargs) for sentence in sent_tokenize(text, tokenizer=_tokenizer)) return words
[ "def", "word_tokenize", "(", "text", ",", "tokenizer", "=", "None", ",", "include_punc", "=", "True", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "_tokenizer", "=", "tokenizer", "if", "tokenizer", "is", "not", "None", "else", "NLTKPunktTokenizer", "(", ")", "words", "=", "chain", ".", "from_iterable", "(", "WordTokenizer", "(", "tokenizer", "=", "_tokenizer", ")", ".", "itokenize", "(", "sentence", ",", "include_punc", ",", "*", "args", ",", "*", "*", "kwargs", ")", "for", "sentence", "in", "sent_tokenize", "(", "text", ",", "tokenizer", "=", "_tokenizer", ")", ")", "return", "words" ]
Convenience function for tokenizing text into words. NOTE: NLTK's word tokenizer expects sentences as input, so the text will be tokenized to sentences before being tokenized to words. This function returns an itertools chain object (generator).
[ "Convenience", "function", "for", "tokenizing", "text", "into", "words", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/tokenizers.py#L321-L335
markuskiller/textblob-de
textblob_de/tokenizers.py
NLTKPunktTokenizer.tokenize
def tokenize(self, text, include_punc=True, nested=False): """Return a list of word tokens. :param text: string of text. :param include_punc: (optional) whether to include punctuation as separate tokens. Default to True. :param nested: (optional) whether to return tokens as nested lists of sentences. Default to False. """ self.tokens = [ w for w in ( self.word_tokenize( s, include_punc) for s in self.sent_tokenize(text))] if nested: return self.tokens else: return list(chain.from_iterable(self.tokens))
python
def tokenize(self, text, include_punc=True, nested=False): """Return a list of word tokens. :param text: string of text. :param include_punc: (optional) whether to include punctuation as separate tokens. Default to True. :param nested: (optional) whether to return tokens as nested lists of sentences. Default to False. """ self.tokens = [ w for w in ( self.word_tokenize( s, include_punc) for s in self.sent_tokenize(text))] if nested: return self.tokens else: return list(chain.from_iterable(self.tokens))
[ "def", "tokenize", "(", "self", ",", "text", ",", "include_punc", "=", "True", ",", "nested", "=", "False", ")", ":", "self", ".", "tokens", "=", "[", "w", "for", "w", "in", "(", "self", ".", "word_tokenize", "(", "s", ",", "include_punc", ")", "for", "s", "in", "self", ".", "sent_tokenize", "(", "text", ")", ")", "]", "if", "nested", ":", "return", "self", ".", "tokens", "else", ":", "return", "list", "(", "chain", ".", "from_iterable", "(", "self", ".", "tokens", ")", ")" ]
Return a list of word tokens. :param text: string of text. :param include_punc: (optional) whether to include punctuation as separate tokens. Default to True. :param nested: (optional) whether to return tokens as nested lists of sentences. Default to False.
[ "Return", "a", "list", "of", "word", "tokens", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/tokenizers.py#L54-L72
markuskiller/textblob-de
textblob_de/tokenizers.py
NLTKPunktTokenizer.sent_tokenize
def sent_tokenize(self, text, **kwargs): """NLTK's sentence tokenizer (currently PunktSentenceTokenizer). Uses an unsupervised algorithm to build a model for abbreviation words, collocations, and words that start sentences, then uses that to find sentence boundaries. """ sentences = self.sent_tok.tokenize( text, realign_boundaries=kwargs.get( "realign_boundaries", True)) return sentences
python
def sent_tokenize(self, text, **kwargs): """NLTK's sentence tokenizer (currently PunktSentenceTokenizer). Uses an unsupervised algorithm to build a model for abbreviation words, collocations, and words that start sentences, then uses that to find sentence boundaries. """ sentences = self.sent_tok.tokenize( text, realign_boundaries=kwargs.get( "realign_boundaries", True)) return sentences
[ "def", "sent_tokenize", "(", "self", ",", "text", ",", "*", "*", "kwargs", ")", ":", "sentences", "=", "self", ".", "sent_tok", ".", "tokenize", "(", "text", ",", "realign_boundaries", "=", "kwargs", ".", "get", "(", "\"realign_boundaries\"", ",", "True", ")", ")", "return", "sentences" ]
NLTK's sentence tokenizer (currently PunktSentenceTokenizer). Uses an unsupervised algorithm to build a model for abbreviation words, collocations, and words that start sentences, then uses that to find sentence boundaries.
[ "NLTK", "s", "sentence", "tokenizer", "(", "currently", "PunktSentenceTokenizer", ")", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/tokenizers.py#L75-L88
markuskiller/textblob-de
textblob_de/tokenizers.py
NLTKPunktTokenizer.word_tokenize
def word_tokenize(self, text, include_punc=True): """The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank. It assumes that the text has already been segmented into sentences, e.g. using ``self.sent_tokenize()``. This tokenizer performs the following steps: - split standard contractions, e.g. ``don't`` -> ``do n't`` and ``they'll`` -> ``they 'll`` - treat most punctuation characters as separate tokens - split off commas and single quotes, when followed by whitespace - separate periods that appear at the end of line Source: NLTK's docstring of ``TreebankWordTokenizer`` (accessed: 02/10/2014) """ #: Do not process empty strings (Issue #3) if text.strip() == "": return [] _tokens = self.word_tok.tokenize(text) #: Handle strings consisting of a single punctuation mark seperately (Issue #4) if len(_tokens) == 1: if _tokens[0] in PUNCTUATION: if include_punc: return _tokens else: return [] if include_punc: return _tokens else: # Return each word token # Strips punctuation unless the word comes from a contraction # e.g. "gibt's" => ["gibt", "'s"] in "Heute gibt's viel zu tun!" # e.g. "hat's" => ["hat", "'s"] # e.g. "home." => ['home'] words = [ word if word.startswith("'") else strip_punc( word, all=False) for word in _tokens if strip_punc( word, all=False)] return list(words)
python
def word_tokenize(self, text, include_punc=True): """The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank. It assumes that the text has already been segmented into sentences, e.g. using ``self.sent_tokenize()``. This tokenizer performs the following steps: - split standard contractions, e.g. ``don't`` -> ``do n't`` and ``they'll`` -> ``they 'll`` - treat most punctuation characters as separate tokens - split off commas and single quotes, when followed by whitespace - separate periods that appear at the end of line Source: NLTK's docstring of ``TreebankWordTokenizer`` (accessed: 02/10/2014) """ #: Do not process empty strings (Issue #3) if text.strip() == "": return [] _tokens = self.word_tok.tokenize(text) #: Handle strings consisting of a single punctuation mark seperately (Issue #4) if len(_tokens) == 1: if _tokens[0] in PUNCTUATION: if include_punc: return _tokens else: return [] if include_punc: return _tokens else: # Return each word token # Strips punctuation unless the word comes from a contraction # e.g. "gibt's" => ["gibt", "'s"] in "Heute gibt's viel zu tun!" # e.g. "hat's" => ["hat", "'s"] # e.g. "home." => ['home'] words = [ word if word.startswith("'") else strip_punc( word, all=False) for word in _tokens if strip_punc( word, all=False)] return list(words)
[ "def", "word_tokenize", "(", "self", ",", "text", ",", "include_punc", "=", "True", ")", ":", "#: Do not process empty strings (Issue #3)", "if", "text", ".", "strip", "(", ")", "==", "\"\"", ":", "return", "[", "]", "_tokens", "=", "self", ".", "word_tok", ".", "tokenize", "(", "text", ")", "#: Handle strings consisting of a single punctuation mark seperately (Issue #4)", "if", "len", "(", "_tokens", ")", "==", "1", ":", "if", "_tokens", "[", "0", "]", "in", "PUNCTUATION", ":", "if", "include_punc", ":", "return", "_tokens", "else", ":", "return", "[", "]", "if", "include_punc", ":", "return", "_tokens", "else", ":", "# Return each word token", "# Strips punctuation unless the word comes from a contraction", "# e.g. \"gibt's\" => [\"gibt\", \"'s\"] in \"Heute gibt's viel zu tun!\"", "# e.g. \"hat's\" => [\"hat\", \"'s\"]", "# e.g. \"home.\" => ['home']", "words", "=", "[", "word", "if", "word", ".", "startswith", "(", "\"'\"", ")", "else", "strip_punc", "(", "word", ",", "all", "=", "False", ")", "for", "word", "in", "_tokens", "if", "strip_punc", "(", "word", ",", "all", "=", "False", ")", "]", "return", "list", "(", "words", ")" ]
The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank. It assumes that the text has already been segmented into sentences, e.g. using ``self.sent_tokenize()``. This tokenizer performs the following steps: - split standard contractions, e.g. ``don't`` -> ``do n't`` and ``they'll`` -> ``they 'll`` - treat most punctuation characters as separate tokens - split off commas and single quotes, when followed by whitespace - separate periods that appear at the end of line Source: NLTK's docstring of ``TreebankWordTokenizer`` (accessed: 02/10/2014)
[ "The", "Treebank", "tokenizer", "uses", "regular", "expressions", "to", "tokenize", "text", "as", "in", "Penn", "Treebank", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/tokenizers.py#L90-L132
markuskiller/textblob-de
textblob_de/tokenizers.py
PatternTokenizer.sent_tokenize
def sent_tokenize(self, text, **kwargs): """Returns a list of sentences. Each sentence is a space-separated string of tokens (words). Handles common cases of abbreviations (e.g., etc., ...). Punctuation marks are split from other words. Periods (or ?!) mark the end of a sentence. Headings without an ending period are inferred by line breaks. """ sentences = find_sentences(text, punctuation=kwargs.get( "punctuation", PUNCTUATION), abbreviations=kwargs.get( "abbreviations", ABBREVIATIONS_DE), replace=kwargs.get("replace", replacements), linebreak=r"\n{2,}") return sentences
python
def sent_tokenize(self, text, **kwargs): """Returns a list of sentences. Each sentence is a space-separated string of tokens (words). Handles common cases of abbreviations (e.g., etc., ...). Punctuation marks are split from other words. Periods (or ?!) mark the end of a sentence. Headings without an ending period are inferred by line breaks. """ sentences = find_sentences(text, punctuation=kwargs.get( "punctuation", PUNCTUATION), abbreviations=kwargs.get( "abbreviations", ABBREVIATIONS_DE), replace=kwargs.get("replace", replacements), linebreak=r"\n{2,}") return sentences
[ "def", "sent_tokenize", "(", "self", ",", "text", ",", "*", "*", "kwargs", ")", ":", "sentences", "=", "find_sentences", "(", "text", ",", "punctuation", "=", "kwargs", ".", "get", "(", "\"punctuation\"", ",", "PUNCTUATION", ")", ",", "abbreviations", "=", "kwargs", ".", "get", "(", "\"abbreviations\"", ",", "ABBREVIATIONS_DE", ")", ",", "replace", "=", "kwargs", ".", "get", "(", "\"replace\"", ",", "replacements", ")", ",", "linebreak", "=", "r\"\\n{2,}\"", ")", "return", "sentences" ]
Returns a list of sentences. Each sentence is a space-separated string of tokens (words). Handles common cases of abbreviations (e.g., etc., ...). Punctuation marks are split from other words. Periods (or ?!) mark the end of a sentence. Headings without an ending period are inferred by line breaks.
[ "Returns", "a", "list", "of", "sentences", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/tokenizers.py#L173-L192
markuskiller/textblob-de
textblob_de/tokenizers.py
WordTokenizer.tokenize
def tokenize(self, text, include_punc=True, **kwargs): """Return a list of word tokens. :param text: string of text. :param include_punc: (optional) whether to include punctuation as separate tokens. Default to True. """ return self.tokenizer.word_tokenize(text, include_punc, **kwargs)
python
def tokenize(self, text, include_punc=True, **kwargs): """Return a list of word tokens. :param text: string of text. :param include_punc: (optional) whether to include punctuation as separate tokens. Default to True. """ return self.tokenizer.word_tokenize(text, include_punc, **kwargs)
[ "def", "tokenize", "(", "self", ",", "text", ",", "include_punc", "=", "True", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "tokenizer", ".", "word_tokenize", "(", "text", ",", "include_punc", ",", "*", "*", "kwargs", ")" ]
Return a list of word tokens. :param text: string of text. :param include_punc: (optional) whether to include punctuation as separate tokens. Default to True.
[ "Return", "a", "list", "of", "word", "tokens", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/tokenizers.py#L254-L262
markuskiller/textblob-de
textblob_de/parsers.py
PatternParser.parse
def parse(self, text): """Parses the text. ``pattern.de.parse(**kwargs)`` can be passed to the parser instance and are documented in the main docstring of :class:`PatternParser() <textblob_de.parsers.PatternParser>`. :param str text: A string. """ #: Do not process empty strings (Issue #3) if text.strip() == "": return "" #: Do not process strings consisting of a single punctuation mark (Issue #4) elif text.strip() in PUNCTUATION: _sym = text.strip() if _sym in tuple('.?!'): _tag = "." else: _tag = _sym if self.lemmata: return "{0}/{1}/O/O/{0}".format(_sym, _tag) else: return "{0}/{1}/O/O".format(_sym, _tag) if self.tokenize: _tokenized = " ".join(self.tokenizer.tokenize(text)) else: _tokenized = text _parsed = pattern_parse(_tokenized, # text is tokenized before it is passed on to # pattern.de.parse tokenize=False, tags=self.tags, chunks=self.chunks, relations=self.relations, lemmata=self.lemmata, encoding=self.encoding, tagset=self.tagset) if self.pprint: _parsed = pattern_pprint(_parsed) return _parsed
python
def parse(self, text): """Parses the text. ``pattern.de.parse(**kwargs)`` can be passed to the parser instance and are documented in the main docstring of :class:`PatternParser() <textblob_de.parsers.PatternParser>`. :param str text: A string. """ #: Do not process empty strings (Issue #3) if text.strip() == "": return "" #: Do not process strings consisting of a single punctuation mark (Issue #4) elif text.strip() in PUNCTUATION: _sym = text.strip() if _sym in tuple('.?!'): _tag = "." else: _tag = _sym if self.lemmata: return "{0}/{1}/O/O/{0}".format(_sym, _tag) else: return "{0}/{1}/O/O".format(_sym, _tag) if self.tokenize: _tokenized = " ".join(self.tokenizer.tokenize(text)) else: _tokenized = text _parsed = pattern_parse(_tokenized, # text is tokenized before it is passed on to # pattern.de.parse tokenize=False, tags=self.tags, chunks=self.chunks, relations=self.relations, lemmata=self.lemmata, encoding=self.encoding, tagset=self.tagset) if self.pprint: _parsed = pattern_pprint(_parsed) return _parsed
[ "def", "parse", "(", "self", ",", "text", ")", ":", "#: Do not process empty strings (Issue #3)", "if", "text", ".", "strip", "(", ")", "==", "\"\"", ":", "return", "\"\"", "#: Do not process strings consisting of a single punctuation mark (Issue #4)", "elif", "text", ".", "strip", "(", ")", "in", "PUNCTUATION", ":", "_sym", "=", "text", ".", "strip", "(", ")", "if", "_sym", "in", "tuple", "(", "'.?!'", ")", ":", "_tag", "=", "\".\"", "else", ":", "_tag", "=", "_sym", "if", "self", ".", "lemmata", ":", "return", "\"{0}/{1}/O/O/{0}\"", ".", "format", "(", "_sym", ",", "_tag", ")", "else", ":", "return", "\"{0}/{1}/O/O\"", ".", "format", "(", "_sym", ",", "_tag", ")", "if", "self", ".", "tokenize", ":", "_tokenized", "=", "\" \"", ".", "join", "(", "self", ".", "tokenizer", ".", "tokenize", "(", "text", ")", ")", "else", ":", "_tokenized", "=", "text", "_parsed", "=", "pattern_parse", "(", "_tokenized", ",", "# text is tokenized before it is passed on to", "# pattern.de.parse", "tokenize", "=", "False", ",", "tags", "=", "self", ".", "tags", ",", "chunks", "=", "self", ".", "chunks", ",", "relations", "=", "self", ".", "relations", ",", "lemmata", "=", "self", ".", "lemmata", ",", "encoding", "=", "self", ".", "encoding", ",", "tagset", "=", "self", ".", "tagset", ")", "if", "self", ".", "pprint", ":", "_parsed", "=", "pattern_pprint", "(", "_parsed", ")", "return", "_parsed" ]
Parses the text. ``pattern.de.parse(**kwargs)`` can be passed to the parser instance and are documented in the main docstring of :class:`PatternParser() <textblob_de.parsers.PatternParser>`. :param str text: A string.
[ "Parses", "the", "text", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/parsers.py#L77-L116
markuskiller/textblob-de
textblob_de/np_extractors.py
PatternParserNPExtractor.extract
def extract(self, text): """Return a list of noun phrases (strings) for a body of text. :param str text: A string. """ _extracted = [] if text.strip() == "": return _extracted parsed_sentences = self._parse_text(text) for s in parsed_sentences: tokens = s.split() new_np = [] for t in tokens: w, tag, phrase, role = t.split('/') # exclude some parser errors (e.g. VB within NP), # extend startswith tuple if necessary if 'NP' in phrase and not self._is_verb(w, tag): if len(new_np) > 0 and w.lower() in START_NEW_NP: _extracted.append(" ".join(new_np)) new_np = [w] else: # normalize capitalisation of sentence starters, except # for nouns new_np.append(w.lower() if tokens[0].startswith(w) and not tag.startswith('N') else w) else: if len(new_np) > 0: _extracted.append(" ".join(new_np)) new_np = [] return self._filter_extracted(_extracted)
python
def extract(self, text): """Return a list of noun phrases (strings) for a body of text. :param str text: A string. """ _extracted = [] if text.strip() == "": return _extracted parsed_sentences = self._parse_text(text) for s in parsed_sentences: tokens = s.split() new_np = [] for t in tokens: w, tag, phrase, role = t.split('/') # exclude some parser errors (e.g. VB within NP), # extend startswith tuple if necessary if 'NP' in phrase and not self._is_verb(w, tag): if len(new_np) > 0 and w.lower() in START_NEW_NP: _extracted.append(" ".join(new_np)) new_np = [w] else: # normalize capitalisation of sentence starters, except # for nouns new_np.append(w.lower() if tokens[0].startswith(w) and not tag.startswith('N') else w) else: if len(new_np) > 0: _extracted.append(" ".join(new_np)) new_np = [] return self._filter_extracted(_extracted)
[ "def", "extract", "(", "self", ",", "text", ")", ":", "_extracted", "=", "[", "]", "if", "text", ".", "strip", "(", ")", "==", "\"\"", ":", "return", "_extracted", "parsed_sentences", "=", "self", ".", "_parse_text", "(", "text", ")", "for", "s", "in", "parsed_sentences", ":", "tokens", "=", "s", ".", "split", "(", ")", "new_np", "=", "[", "]", "for", "t", "in", "tokens", ":", "w", ",", "tag", ",", "phrase", ",", "role", "=", "t", ".", "split", "(", "'/'", ")", "# exclude some parser errors (e.g. VB within NP),", "# extend startswith tuple if necessary", "if", "'NP'", "in", "phrase", "and", "not", "self", ".", "_is_verb", "(", "w", ",", "tag", ")", ":", "if", "len", "(", "new_np", ")", ">", "0", "and", "w", ".", "lower", "(", ")", "in", "START_NEW_NP", ":", "_extracted", ".", "append", "(", "\" \"", ".", "join", "(", "new_np", ")", ")", "new_np", "=", "[", "w", "]", "else", ":", "# normalize capitalisation of sentence starters, except", "# for nouns", "new_np", ".", "append", "(", "w", ".", "lower", "(", ")", "if", "tokens", "[", "0", "]", ".", "startswith", "(", "w", ")", "and", "not", "tag", ".", "startswith", "(", "'N'", ")", "else", "w", ")", "else", ":", "if", "len", "(", "new_np", ")", ">", "0", ":", "_extracted", ".", "append", "(", "\" \"", ".", "join", "(", "new_np", ")", ")", "new_np", "=", "[", "]", "return", "self", ".", "_filter_extracted", "(", "_extracted", ")" ]
Return a list of noun phrases (strings) for a body of text. :param str text: A string.
[ "Return", "a", "list", "of", "noun", "phrases", "(", "strings", ")", "for", "a", "body", "of", "text", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/np_extractors.py#L88-L118
markuskiller/textblob-de
textblob_de/np_extractors.py
PatternParserNPExtractor._filter_extracted
def _filter_extracted(self, extracted_list): """Filter insignificant words for key noun phrase extraction. determiners, relative pronouns, reflexive pronouns In general, pronouns are not useful, as you need context to know what they refer to. Most of the pronouns, however, are filtered out by blob.noun_phrase method's np length (>1) filter :param list extracted_list: A list of noun phrases extracted from parser output. """ _filtered = [] for np in extracted_list: _np = np.split() if _np[0] in INSIGNIFICANT: _np.pop(0) try: if _np[-1] in INSIGNIFICANT: _np.pop(-1) # e.g. 'welcher die ...' if _np[0] in INSIGNIFICANT: _np.pop(0) except IndexError: _np = [] if len(_np) > 0: _filtered.append(" ".join(_np)) return _filtered
python
def _filter_extracted(self, extracted_list): """Filter insignificant words for key noun phrase extraction. determiners, relative pronouns, reflexive pronouns In general, pronouns are not useful, as you need context to know what they refer to. Most of the pronouns, however, are filtered out by blob.noun_phrase method's np length (>1) filter :param list extracted_list: A list of noun phrases extracted from parser output. """ _filtered = [] for np in extracted_list: _np = np.split() if _np[0] in INSIGNIFICANT: _np.pop(0) try: if _np[-1] in INSIGNIFICANT: _np.pop(-1) # e.g. 'welcher die ...' if _np[0] in INSIGNIFICANT: _np.pop(0) except IndexError: _np = [] if len(_np) > 0: _filtered.append(" ".join(_np)) return _filtered
[ "def", "_filter_extracted", "(", "self", ",", "extracted_list", ")", ":", "_filtered", "=", "[", "]", "for", "np", "in", "extracted_list", ":", "_np", "=", "np", ".", "split", "(", ")", "if", "_np", "[", "0", "]", "in", "INSIGNIFICANT", ":", "_np", ".", "pop", "(", "0", ")", "try", ":", "if", "_np", "[", "-", "1", "]", "in", "INSIGNIFICANT", ":", "_np", ".", "pop", "(", "-", "1", ")", "# e.g. 'welcher die ...'", "if", "_np", "[", "0", "]", "in", "INSIGNIFICANT", ":", "_np", ".", "pop", "(", "0", ")", "except", "IndexError", ":", "_np", "=", "[", "]", "if", "len", "(", "_np", ")", ">", "0", ":", "_filtered", ".", "append", "(", "\" \"", ".", "join", "(", "_np", ")", ")", "return", "_filtered" ]
Filter insignificant words for key noun phrase extraction. determiners, relative pronouns, reflexive pronouns In general, pronouns are not useful, as you need context to know what they refer to. Most of the pronouns, however, are filtered out by blob.noun_phrase method's np length (>1) filter :param list extracted_list: A list of noun phrases extracted from parser output.
[ "Filter", "insignificant", "words", "for", "key", "noun", "phrase", "extraction", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/np_extractors.py#L120-L146
markuskiller/textblob-de
textblob_de/np_extractors.py
PatternParserNPExtractor._parse_text
def _parse_text(self, text): """Parse text (string) and return list of parsed sentences (strings). Each sentence consists of space separated token elements and the token format returned by the PatternParser is WORD/TAG/PHRASE/ROLE/(LEMMA) (separated by a forward slash '/') :param str text: A string. """ if isinstance(self.tokenizer, PatternTokenizer): parsed_text = pattern_parse(text, tokenize=True, lemmata=False) else: _tokenized = [] _sentences = sent_tokenize(text, tokenizer=self.tokenizer) for s in _sentences: _tokenized.append(" ".join(self.tokenizer.tokenize(s))) parsed_text = pattern_parse( _tokenized, tokenize=False, lemmata=False) return parsed_text.split('\n')
python
def _parse_text(self, text): """Parse text (string) and return list of parsed sentences (strings). Each sentence consists of space separated token elements and the token format returned by the PatternParser is WORD/TAG/PHRASE/ROLE/(LEMMA) (separated by a forward slash '/') :param str text: A string. """ if isinstance(self.tokenizer, PatternTokenizer): parsed_text = pattern_parse(text, tokenize=True, lemmata=False) else: _tokenized = [] _sentences = sent_tokenize(text, tokenizer=self.tokenizer) for s in _sentences: _tokenized.append(" ".join(self.tokenizer.tokenize(s))) parsed_text = pattern_parse( _tokenized, tokenize=False, lemmata=False) return parsed_text.split('\n')
[ "def", "_parse_text", "(", "self", ",", "text", ")", ":", "if", "isinstance", "(", "self", ".", "tokenizer", ",", "PatternTokenizer", ")", ":", "parsed_text", "=", "pattern_parse", "(", "text", ",", "tokenize", "=", "True", ",", "lemmata", "=", "False", ")", "else", ":", "_tokenized", "=", "[", "]", "_sentences", "=", "sent_tokenize", "(", "text", ",", "tokenizer", "=", "self", ".", "tokenizer", ")", "for", "s", "in", "_sentences", ":", "_tokenized", ".", "append", "(", "\" \"", ".", "join", "(", "self", ".", "tokenizer", ".", "tokenize", "(", "s", ")", ")", ")", "parsed_text", "=", "pattern_parse", "(", "_tokenized", ",", "tokenize", "=", "False", ",", "lemmata", "=", "False", ")", "return", "parsed_text", ".", "split", "(", "'\\n'", ")" ]
Parse text (string) and return list of parsed sentences (strings). Each sentence consists of space separated token elements and the token format returned by the PatternParser is WORD/TAG/PHRASE/ROLE/(LEMMA) (separated by a forward slash '/') :param str text: A string.
[ "Parse", "text", "(", "string", ")", "and", "return", "list", "of", "parsed", "sentences", "(", "strings", ")", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/np_extractors.py#L148-L169
markuskiller/textblob-de
textblob_de/taggers.py
PatternTagger.tag
def tag(self, sentence, tokenize=True): """Tag a string `sentence`. :param str or list sentence: A string or a list of sentence strings. :param tokenize: (optional) If ``False`` string has to be tokenized before (space separated string). """ #: Do not process empty strings (Issue #3) if sentence.strip() == "": return [] #: Do not process strings consisting of a single punctuation mark (Issue #4) elif sentence.strip() in PUNCTUATION: if self.include_punc: _sym = sentence.strip() if _sym in tuple('.?!'): _tag = "." else: _tag = _sym return [(_sym, _tag)] else: return [] if tokenize: _tokenized = " ".join(self.tokenizer.tokenize(sentence)) sentence = _tokenized # Sentence is tokenized before it is passed on to pattern.de.tag # (i.e. it is either submitted tokenized or if ) _tagged = pattern_tag(sentence, tokenize=False, encoding=self.encoding, tagset=self.tagset) if self.include_punc: return _tagged else: _tagged = [ (word, t) for word, t in _tagged if not PUNCTUATION_REGEX.match( unicode(t))] return _tagged
python
def tag(self, sentence, tokenize=True): """Tag a string `sentence`. :param str or list sentence: A string or a list of sentence strings. :param tokenize: (optional) If ``False`` string has to be tokenized before (space separated string). """ #: Do not process empty strings (Issue #3) if sentence.strip() == "": return [] #: Do not process strings consisting of a single punctuation mark (Issue #4) elif sentence.strip() in PUNCTUATION: if self.include_punc: _sym = sentence.strip() if _sym in tuple('.?!'): _tag = "." else: _tag = _sym return [(_sym, _tag)] else: return [] if tokenize: _tokenized = " ".join(self.tokenizer.tokenize(sentence)) sentence = _tokenized # Sentence is tokenized before it is passed on to pattern.de.tag # (i.e. it is either submitted tokenized or if ) _tagged = pattern_tag(sentence, tokenize=False, encoding=self.encoding, tagset=self.tagset) if self.include_punc: return _tagged else: _tagged = [ (word, t) for word, t in _tagged if not PUNCTUATION_REGEX.match( unicode(t))] return _tagged
[ "def", "tag", "(", "self", ",", "sentence", ",", "tokenize", "=", "True", ")", ":", "#: Do not process empty strings (Issue #3)", "if", "sentence", ".", "strip", "(", ")", "==", "\"\"", ":", "return", "[", "]", "#: Do not process strings consisting of a single punctuation mark (Issue #4)", "elif", "sentence", ".", "strip", "(", ")", "in", "PUNCTUATION", ":", "if", "self", ".", "include_punc", ":", "_sym", "=", "sentence", ".", "strip", "(", ")", "if", "_sym", "in", "tuple", "(", "'.?!'", ")", ":", "_tag", "=", "\".\"", "else", ":", "_tag", "=", "_sym", "return", "[", "(", "_sym", ",", "_tag", ")", "]", "else", ":", "return", "[", "]", "if", "tokenize", ":", "_tokenized", "=", "\" \"", ".", "join", "(", "self", ".", "tokenizer", ".", "tokenize", "(", "sentence", ")", ")", "sentence", "=", "_tokenized", "# Sentence is tokenized before it is passed on to pattern.de.tag", "# (i.e. it is either submitted tokenized or if )", "_tagged", "=", "pattern_tag", "(", "sentence", ",", "tokenize", "=", "False", ",", "encoding", "=", "self", ".", "encoding", ",", "tagset", "=", "self", ".", "tagset", ")", "if", "self", ".", "include_punc", ":", "return", "_tagged", "else", ":", "_tagged", "=", "[", "(", "word", ",", "t", ")", "for", "word", ",", "t", "in", "_tagged", "if", "not", "PUNCTUATION_REGEX", ".", "match", "(", "unicode", "(", "t", ")", ")", "]", "return", "_tagged" ]
Tag a string `sentence`. :param str or list sentence: A string or a list of sentence strings. :param tokenize: (optional) If ``False`` string has to be tokenized before (space separated string).
[ "Tag", "a", "string", "sentence", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/taggers.py#L60-L96
markuskiller/textblob-de
textblob_de/compat.py
decode_string
def decode_string(v, encoding="utf-8"): """Returns the given value as a Unicode string (if possible).""" if isinstance(encoding, basestring): encoding = ((encoding,),) + (("windows-1252",), ("utf-8", "ignore")) if isinstance(v, binary_type): for e in encoding: try: return v.decode(*e) except: pass return v return unicode(v)
python
def decode_string(v, encoding="utf-8"): """Returns the given value as a Unicode string (if possible).""" if isinstance(encoding, basestring): encoding = ((encoding,),) + (("windows-1252",), ("utf-8", "ignore")) if isinstance(v, binary_type): for e in encoding: try: return v.decode(*e) except: pass return v return unicode(v)
[ "def", "decode_string", "(", "v", ",", "encoding", "=", "\"utf-8\"", ")", ":", "if", "isinstance", "(", "encoding", ",", "basestring", ")", ":", "encoding", "=", "(", "(", "encoding", ",", ")", ",", ")", "+", "(", "(", "\"windows-1252\"", ",", ")", ",", "(", "\"utf-8\"", ",", "\"ignore\"", ")", ")", "if", "isinstance", "(", "v", ",", "binary_type", ")", ":", "for", "e", "in", "encoding", ":", "try", ":", "return", "v", ".", "decode", "(", "*", "e", ")", "except", ":", "pass", "return", "v", "return", "unicode", "(", "v", ")" ]
Returns the given value as a Unicode string (if possible).
[ "Returns", "the", "given", "value", "as", "a", "Unicode", "string", "(", "if", "possible", ")", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/compat.py#L112-L123
markuskiller/textblob-de
textblob_de/compat.py
encode_string
def encode_string(v, encoding="utf-8"): """Returns the given value as a Python byte string (if possible).""" if isinstance(encoding, basestring): encoding = ((encoding,),) + (("windows-1252",), ("utf-8", "ignore")) if isinstance(v, unicode): for e in encoding: try: return v.encode(*e) except: pass return v return str(v)
python
def encode_string(v, encoding="utf-8"): """Returns the given value as a Python byte string (if possible).""" if isinstance(encoding, basestring): encoding = ((encoding,),) + (("windows-1252",), ("utf-8", "ignore")) if isinstance(v, unicode): for e in encoding: try: return v.encode(*e) except: pass return v return str(v)
[ "def", "encode_string", "(", "v", ",", "encoding", "=", "\"utf-8\"", ")", ":", "if", "isinstance", "(", "encoding", ",", "basestring", ")", ":", "encoding", "=", "(", "(", "encoding", ",", ")", ",", ")", "+", "(", "(", "\"windows-1252\"", ",", ")", ",", "(", "\"utf-8\"", ",", "\"ignore\"", ")", ")", "if", "isinstance", "(", "v", ",", "unicode", ")", ":", "for", "e", "in", "encoding", ":", "try", ":", "return", "v", ".", "encode", "(", "*", "e", ")", "except", ":", "pass", "return", "v", "return", "str", "(", "v", ")" ]
Returns the given value as a Python byte string (if possible).
[ "Returns", "the", "given", "value", "as", "a", "Python", "byte", "string", "(", "if", "possible", ")", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/compat.py#L126-L137
markuskiller/textblob-de
textblob_de/compat.py
_shutil_which
def _shutil_which(cmd, mode=os.F_OK | os.X_OK, path=None): """Given a command, mode, and a PATH string, return the path which conforms to the given mode on the PATH, or None if there is no such file. `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result of os.environ.get("PATH"), or can be overridden with a custom search path. """ # Check that a given file can be accessed with the correct mode. # Additionally check that `file` is not a directory, as on Windows # directories pass the os.access check. def _access_check(fn, mode): return (os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)) # If we're given a path with a directory part, look it up directly rather # than referring to PATH directories. This includes checking relative to the # current directory, e.g. ./script if os.path.dirname(cmd): if _access_check(cmd, mode): return cmd return None if path is None: path = os.environ.get("PATH", os.defpath) if not path: return None path = path.split(os.pathsep) if sys.platform == "win32": # The current directory takes precedence on Windows. if not os.curdir in path: path.insert(0, os.curdir) # PATHEXT is necessary to check on Windows. pathext = os.environ.get("PATHEXT", "").split(os.pathsep) # See if the given file matches any of the expected path extensions. # This will allow us to short circuit when given "python.exe". # If it does match, only test that one, otherwise we have to try # others. if any([cmd.lower().endswith(ext.lower()) for ext in pathext]): files = [cmd] else: files = [cmd + ext for ext in pathext] else: # On other platforms you don't have things like PATHEXT to tell you # what file suffixes are executable, so just pass on cmd as-is. files = [cmd] seen = set() for dir in path: normdir = os.path.normcase(dir) if normdir not in seen: seen.add(normdir) for thefile in files: name = os.path.join(dir, thefile) if _access_check(name, mode): return name return None
python
def _shutil_which(cmd, mode=os.F_OK | os.X_OK, path=None): """Given a command, mode, and a PATH string, return the path which conforms to the given mode on the PATH, or None if there is no such file. `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result of os.environ.get("PATH"), or can be overridden with a custom search path. """ # Check that a given file can be accessed with the correct mode. # Additionally check that `file` is not a directory, as on Windows # directories pass the os.access check. def _access_check(fn, mode): return (os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)) # If we're given a path with a directory part, look it up directly rather # than referring to PATH directories. This includes checking relative to the # current directory, e.g. ./script if os.path.dirname(cmd): if _access_check(cmd, mode): return cmd return None if path is None: path = os.environ.get("PATH", os.defpath) if not path: return None path = path.split(os.pathsep) if sys.platform == "win32": # The current directory takes precedence on Windows. if not os.curdir in path: path.insert(0, os.curdir) # PATHEXT is necessary to check on Windows. pathext = os.environ.get("PATHEXT", "").split(os.pathsep) # See if the given file matches any of the expected path extensions. # This will allow us to short circuit when given "python.exe". # If it does match, only test that one, otherwise we have to try # others. if any([cmd.lower().endswith(ext.lower()) for ext in pathext]): files = [cmd] else: files = [cmd + ext for ext in pathext] else: # On other platforms you don't have things like PATHEXT to tell you # what file suffixes are executable, so just pass on cmd as-is. files = [cmd] seen = set() for dir in path: normdir = os.path.normcase(dir) if normdir not in seen: seen.add(normdir) for thefile in files: name = os.path.join(dir, thefile) if _access_check(name, mode): return name return None
[ "def", "_shutil_which", "(", "cmd", ",", "mode", "=", "os", ".", "F_OK", "|", "os", ".", "X_OK", ",", "path", "=", "None", ")", ":", "# Check that a given file can be accessed with the correct mode.", "# Additionally check that `file` is not a directory, as on Windows", "# directories pass the os.access check.", "def", "_access_check", "(", "fn", ",", "mode", ")", ":", "return", "(", "os", ".", "path", ".", "exists", "(", "fn", ")", "and", "os", ".", "access", "(", "fn", ",", "mode", ")", "and", "not", "os", ".", "path", ".", "isdir", "(", "fn", ")", ")", "# If we're given a path with a directory part, look it up directly rather", "# than referring to PATH directories. This includes checking relative to the", "# current directory, e.g. ./script", "if", "os", ".", "path", ".", "dirname", "(", "cmd", ")", ":", "if", "_access_check", "(", "cmd", ",", "mode", ")", ":", "return", "cmd", "return", "None", "if", "path", "is", "None", ":", "path", "=", "os", ".", "environ", ".", "get", "(", "\"PATH\"", ",", "os", ".", "defpath", ")", "if", "not", "path", ":", "return", "None", "path", "=", "path", ".", "split", "(", "os", ".", "pathsep", ")", "if", "sys", ".", "platform", "==", "\"win32\"", ":", "# The current directory takes precedence on Windows.", "if", "not", "os", ".", "curdir", "in", "path", ":", "path", ".", "insert", "(", "0", ",", "os", ".", "curdir", ")", "# PATHEXT is necessary to check on Windows.", "pathext", "=", "os", ".", "environ", ".", "get", "(", "\"PATHEXT\"", ",", "\"\"", ")", ".", "split", "(", "os", ".", "pathsep", ")", "# See if the given file matches any of the expected path extensions.", "# This will allow us to short circuit when given \"python.exe\".", "# If it does match, only test that one, otherwise we have to try", "# others.", "if", "any", "(", "[", "cmd", ".", "lower", "(", ")", ".", "endswith", "(", "ext", ".", "lower", "(", ")", ")", "for", "ext", "in", "pathext", "]", ")", ":", "files", "=", "[", "cmd", "]", "else", ":", "files", "=", "[", "cmd", "+", "ext", "for", "ext", "in", "pathext", "]", "else", ":", "# On other platforms you don't have things like PATHEXT to tell you", "# what file suffixes are executable, so just pass on cmd as-is.", "files", "=", "[", "cmd", "]", "seen", "=", "set", "(", ")", "for", "dir", "in", "path", ":", "normdir", "=", "os", ".", "path", ".", "normcase", "(", "dir", ")", "if", "normdir", "not", "in", "seen", ":", "seen", ".", "add", "(", "normdir", ")", "for", "thefile", "in", "files", ":", "name", "=", "os", ".", "path", ".", "join", "(", "dir", ",", "thefile", ")", "if", "_access_check", "(", "name", ",", "mode", ")", ":", "return", "name", "return", "None" ]
Given a command, mode, and a PATH string, return the path which conforms to the given mode on the PATH, or None if there is no such file. `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result of os.environ.get("PATH"), or can be overridden with a custom search path.
[ "Given", "a", "command", "mode", "and", "a", "PATH", "string", "return", "the", "path", "which", "conforms", "to", "the", "given", "mode", "on", "the", "PATH", "or", "None", "if", "there", "is", "no", "such", "file", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/compat.py#L156-L215
markuskiller/textblob-de
textblob_de/blob.py
Word.translate
def translate(self, from_lang=None, to="de"): """Translate the word to another language using Google's Translate API. .. versionadded:: 0.5.0 (``textblob``) """ if from_lang is None: from_lang = self.translator.detect(self.string) return self.translator.translate(self.string, from_lang=from_lang, to_lang=to)
python
def translate(self, from_lang=None, to="de"): """Translate the word to another language using Google's Translate API. .. versionadded:: 0.5.0 (``textblob``) """ if from_lang is None: from_lang = self.translator.detect(self.string) return self.translator.translate(self.string, from_lang=from_lang, to_lang=to)
[ "def", "translate", "(", "self", ",", "from_lang", "=", "None", ",", "to", "=", "\"de\"", ")", ":", "if", "from_lang", "is", "None", ":", "from_lang", "=", "self", ".", "translator", ".", "detect", "(", "self", ".", "string", ")", "return", "self", ".", "translator", ".", "translate", "(", "self", ".", "string", ",", "from_lang", "=", "from_lang", ",", "to_lang", "=", "to", ")" ]
Translate the word to another language using Google's Translate API. .. versionadded:: 0.5.0 (``textblob``)
[ "Translate", "the", "word", "to", "another", "language", "using", "Google", "s", "Translate", "API", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/blob.py#L100-L109
markuskiller/textblob-de
textblob_de/blob.py
WordList.lemmatize
def lemmatize(self): """Return the lemma of each word in this WordList. Currently using NLTKPunktTokenizer() for all lemmatization tasks. This might cause slightly different tokenization results compared to the TextBlob.words property. """ _lemmatizer = PatternParserLemmatizer(tokenizer=NLTKPunktTokenizer()) # WordList object --> Sentence.string # add a period (improves parser accuracy) _raw = " ".join(self) + "." _lemmas = _lemmatizer.lemmatize(_raw) return self.__class__([Word(l, t) for l, t in _lemmas])
python
def lemmatize(self): """Return the lemma of each word in this WordList. Currently using NLTKPunktTokenizer() for all lemmatization tasks. This might cause slightly different tokenization results compared to the TextBlob.words property. """ _lemmatizer = PatternParserLemmatizer(tokenizer=NLTKPunktTokenizer()) # WordList object --> Sentence.string # add a period (improves parser accuracy) _raw = " ".join(self) + "." _lemmas = _lemmatizer.lemmatize(_raw) return self.__class__([Word(l, t) for l, t in _lemmas])
[ "def", "lemmatize", "(", "self", ")", ":", "_lemmatizer", "=", "PatternParserLemmatizer", "(", "tokenizer", "=", "NLTKPunktTokenizer", "(", ")", ")", "# WordList object --> Sentence.string", "# add a period (improves parser accuracy)", "_raw", "=", "\" \"", ".", "join", "(", "self", ")", "+", "\".\"", "_lemmas", "=", "_lemmatizer", ".", "lemmatize", "(", "_raw", ")", "return", "self", ".", "__class__", "(", "[", "Word", "(", "l", ",", "t", ")", "for", "l", ",", "t", "in", "_lemmas", "]", ")" ]
Return the lemma of each word in this WordList. Currently using NLTKPunktTokenizer() for all lemmatization tasks. This might cause slightly different tokenization results compared to the TextBlob.words property.
[ "Return", "the", "lemma", "of", "each", "word", "in", "this", "WordList", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/blob.py#L305-L318
markuskiller/textblob-de
textblob_de/blob.py
BaseBlob.tokenize
def tokenize(self, tokenizer=None): """Return a list of tokens, using ``tokenizer``. :param tokenizer: (optional) A tokenizer object. If None, defaults to this blob's default tokenizer. """ t = tokenizer if tokenizer is not None else self.tokenizer return WordList(t.tokenize(self.raw))
python
def tokenize(self, tokenizer=None): """Return a list of tokens, using ``tokenizer``. :param tokenizer: (optional) A tokenizer object. If None, defaults to this blob's default tokenizer. """ t = tokenizer if tokenizer is not None else self.tokenizer return WordList(t.tokenize(self.raw))
[ "def", "tokenize", "(", "self", ",", "tokenizer", "=", "None", ")", ":", "t", "=", "tokenizer", "if", "tokenizer", "is", "not", "None", "else", "self", ".", "tokenizer", "return", "WordList", "(", "t", ".", "tokenize", "(", "self", ".", "raw", ")", ")" ]
Return a list of tokens, using ``tokenizer``. :param tokenizer: (optional) A tokenizer object. If None, defaults to this blob's default tokenizer.
[ "Return", "a", "list", "of", "tokens", "using", "tokenizer", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/blob.py#L405-L413
markuskiller/textblob-de
textblob_de/blob.py
BaseBlob.noun_phrases
def noun_phrases(self): """Returns a list of noun phrases for this blob.""" return WordList([phrase.strip() for phrase in self.np_extractor.extract(self.raw) if len(phrase.split()) > 1])
python
def noun_phrases(self): """Returns a list of noun phrases for this blob.""" return WordList([phrase.strip() for phrase in self.np_extractor.extract(self.raw) if len(phrase.split()) > 1])
[ "def", "noun_phrases", "(", "self", ")", ":", "return", "WordList", "(", "[", "phrase", ".", "strip", "(", ")", "for", "phrase", "in", "self", ".", "np_extractor", ".", "extract", "(", "self", ".", "raw", ")", "if", "len", "(", "phrase", ".", "split", "(", ")", ")", ">", "1", "]", ")" ]
Returns a list of noun phrases for this blob.
[ "Returns", "a", "list", "of", "noun", "phrases", "for", "this", "blob", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/blob.py#L456-L460
markuskiller/textblob-de
textblob_de/blob.py
BaseBlob.pos_tags
def pos_tags(self): """Returns an list of tuples of the form (word, POS tag). Example: :: [('At', 'IN'), ('eight', 'CD'), ("o'clock", 'JJ'), ('on', 'IN'), ('Thursday', 'NNP'), ('morning', 'NN')] :rtype: list of tuples """ return [(Word(word, pos_tag=t), unicode(t)) for word, t in self.pos_tagger.tag(self.raw) # new keyword PatternTagger(include_punc=False) # if not PUNCTUATION_REGEX.match(unicode(t)) ]
python
def pos_tags(self): """Returns an list of tuples of the form (word, POS tag). Example: :: [('At', 'IN'), ('eight', 'CD'), ("o'clock", 'JJ'), ('on', 'IN'), ('Thursday', 'NNP'), ('morning', 'NN')] :rtype: list of tuples """ return [(Word(word, pos_tag=t), unicode(t)) for word, t in self.pos_tagger.tag(self.raw) # new keyword PatternTagger(include_punc=False) # if not PUNCTUATION_REGEX.match(unicode(t)) ]
[ "def", "pos_tags", "(", "self", ")", ":", "return", "[", "(", "Word", "(", "word", ",", "pos_tag", "=", "t", ")", ",", "unicode", "(", "t", ")", ")", "for", "word", ",", "t", "in", "self", ".", "pos_tagger", ".", "tag", "(", "self", ".", "raw", ")", "# new keyword PatternTagger(include_punc=False)", "# if not PUNCTUATION_REGEX.match(unicode(t))", "]" ]
Returns an list of tuples of the form (word, POS tag). Example: :: [('At', 'IN'), ('eight', 'CD'), ("o'clock", 'JJ'), ('on', 'IN'), ('Thursday', 'NNP'), ('morning', 'NN')] :rtype: list of tuples
[ "Returns", "an", "list", "of", "tuples", "of", "the", "form", "(", "word", "POS", "tag", ")", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/blob.py#L463-L479
markuskiller/textblob-de
textblob_de/blob.py
BaseBlob.word_counts
def word_counts(self): """Dictionary of word frequencies in this text.""" counts = defaultdict(int) stripped_words = [lowerstrip(word) for word in self.words] for word in stripped_words: counts[word] += 1 return counts
python
def word_counts(self): """Dictionary of word frequencies in this text.""" counts = defaultdict(int) stripped_words = [lowerstrip(word) for word in self.words] for word in stripped_words: counts[word] += 1 return counts
[ "def", "word_counts", "(", "self", ")", ":", "counts", "=", "defaultdict", "(", "int", ")", "stripped_words", "=", "[", "lowerstrip", "(", "word", ")", "for", "word", "in", "self", ".", "words", "]", "for", "word", "in", "stripped_words", ":", "counts", "[", "word", "]", "+=", "1", "return", "counts" ]
Dictionary of word frequencies in this text.
[ "Dictionary", "of", "word", "frequencies", "in", "this", "text", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/blob.py#L484-L490
markuskiller/textblob-de
textblob_de/blob.py
Sentence.dict
def dict(self): """The dict representation of this sentence.""" return { 'raw': self.raw, 'start_index': self.start_index, 'end_index': self.end_index, 'stripped': self.stripped, 'noun_phrases': self.noun_phrases, 'polarity': self.polarity, 'subjectivity': self.subjectivity, }
python
def dict(self): """The dict representation of this sentence.""" return { 'raw': self.raw, 'start_index': self.start_index, 'end_index': self.end_index, 'stripped': self.stripped, 'noun_phrases': self.noun_phrases, 'polarity': self.polarity, 'subjectivity': self.subjectivity, }
[ "def", "dict", "(", "self", ")", ":", "return", "{", "'raw'", ":", "self", ".", "raw", ",", "'start_index'", ":", "self", ".", "start_index", ",", "'end_index'", ":", "self", ".", "end_index", ",", "'stripped'", ":", "self", ".", "stripped", ",", "'noun_phrases'", ":", "self", ".", "noun_phrases", ",", "'polarity'", ":", "self", ".", "polarity", ",", "'subjectivity'", ":", "self", ".", "subjectivity", ",", "}" ]
The dict representation of this sentence.
[ "The", "dict", "representation", "of", "this", "sentence", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/blob.py#L614-L624
markuskiller/textblob-de
textblob_de/blob.py
TextBlobDE.words
def words(self): """Return a list of word tokens. This excludes punctuation characters. If you want to include punctuation characters, access the ``tokens`` property. :returns: A :class:`WordList <WordList>` of word tokens. """ return WordList( word_tokenize(self.raw, self.tokenizer, include_punc=False))
python
def words(self): """Return a list of word tokens. This excludes punctuation characters. If you want to include punctuation characters, access the ``tokens`` property. :returns: A :class:`WordList <WordList>` of word tokens. """ return WordList( word_tokenize(self.raw, self.tokenizer, include_punc=False))
[ "def", "words", "(", "self", ")", ":", "return", "WordList", "(", "word_tokenize", "(", "self", ".", "raw", ",", "self", ".", "tokenizer", ",", "include_punc", "=", "False", ")", ")" ]
Return a list of word tokens. This excludes punctuation characters. If you want to include punctuation characters, access the ``tokens`` property. :returns: A :class:`WordList <WordList>` of word tokens.
[ "Return", "a", "list", "of", "word", "tokens", ".", "This", "excludes", "punctuation", "characters", ".", "If", "you", "want", "to", "include", "punctuation", "characters", "access", "the", "tokens", "property", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/blob.py#L650-L659
markuskiller/textblob-de
textblob_de/blob.py
TextBlobDE.sentiment
def sentiment(self): """Return a tuple of form (polarity, subjectivity ) where polarity is a float within the range [-1.0, 1.0] and subjectivity is a float within the range [0.0, 1.0] where 0.0 is very objective and 1.0 is very subjective. :rtype: named tuple of the form ``Sentiment(polarity=0.0, subjectivity=0.0)`` """ #: Enhancement Issue #2 #: adapted from 'textblob.en.sentiments.py' #: Return type declaration _RETURN_TYPE = namedtuple('Sentiment', ['polarity', 'subjectivity']) _polarity = 0 _subjectivity = 0 for s in self.sentences: _polarity += s.polarity _subjectivity += s.subjectivity try: polarity = _polarity / len(self.sentences) except ZeroDivisionError: polarity = 0.0 try: subjectivity = _subjectivity / len(self.sentences) except ZeroDivisionError: subjectivity = 0.0 return _RETURN_TYPE(polarity, subjectivity)
python
def sentiment(self): """Return a tuple of form (polarity, subjectivity ) where polarity is a float within the range [-1.0, 1.0] and subjectivity is a float within the range [0.0, 1.0] where 0.0 is very objective and 1.0 is very subjective. :rtype: named tuple of the form ``Sentiment(polarity=0.0, subjectivity=0.0)`` """ #: Enhancement Issue #2 #: adapted from 'textblob.en.sentiments.py' #: Return type declaration _RETURN_TYPE = namedtuple('Sentiment', ['polarity', 'subjectivity']) _polarity = 0 _subjectivity = 0 for s in self.sentences: _polarity += s.polarity _subjectivity += s.subjectivity try: polarity = _polarity / len(self.sentences) except ZeroDivisionError: polarity = 0.0 try: subjectivity = _subjectivity / len(self.sentences) except ZeroDivisionError: subjectivity = 0.0 return _RETURN_TYPE(polarity, subjectivity)
[ "def", "sentiment", "(", "self", ")", ":", "#: Enhancement Issue #2", "#: adapted from 'textblob.en.sentiments.py'", "#: Return type declaration", "_RETURN_TYPE", "=", "namedtuple", "(", "'Sentiment'", ",", "[", "'polarity'", ",", "'subjectivity'", "]", ")", "_polarity", "=", "0", "_subjectivity", "=", "0", "for", "s", "in", "self", ".", "sentences", ":", "_polarity", "+=", "s", ".", "polarity", "_subjectivity", "+=", "s", ".", "subjectivity", "try", ":", "polarity", "=", "_polarity", "/", "len", "(", "self", ".", "sentences", ")", "except", "ZeroDivisionError", ":", "polarity", "=", "0.0", "try", ":", "subjectivity", "=", "_subjectivity", "/", "len", "(", "self", ".", "sentences", ")", "except", "ZeroDivisionError", ":", "subjectivity", "=", "0.0", "return", "_RETURN_TYPE", "(", "polarity", ",", "subjectivity", ")" ]
Return a tuple of form (polarity, subjectivity ) where polarity is a float within the range [-1.0, 1.0] and subjectivity is a float within the range [0.0, 1.0] where 0.0 is very objective and 1.0 is very subjective. :rtype: named tuple of the form ``Sentiment(polarity=0.0, subjectivity=0.0)``
[ "Return", "a", "tuple", "of", "form", "(", "polarity", "subjectivity", ")", "where", "polarity", "is", "a", "float", "within", "the", "range", "[", "-", "1", ".", "0", "1", ".", "0", "]", "and", "subjectivity", "is", "a", "float", "within", "the", "range", "[", "0", ".", "0", "1", ".", "0", "]", "where", "0", ".", "0", "is", "very", "objective", "and", "1", ".", "0", "is", "very", "subjective", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/blob.py#L667-L692
markuskiller/textblob-de
textblob_de/blob.py
TextBlobDE.to_json
def to_json(self, *args, **kwargs): """Return a json representation (str) of this blob. Takes the same arguments as json.dumps. .. versionadded:: 0.5.1 (``textblob``) """ return json.dumps(self.serialized, *args, **kwargs)
python
def to_json(self, *args, **kwargs): """Return a json representation (str) of this blob. Takes the same arguments as json.dumps. .. versionadded:: 0.5.1 (``textblob``) """ return json.dumps(self.serialized, *args, **kwargs)
[ "def", "to_json", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "json", ".", "dumps", "(", "self", ".", "serialized", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Return a json representation (str) of this blob. Takes the same arguments as json.dumps. .. versionadded:: 0.5.1 (``textblob``)
[ "Return", "a", "json", "representation", "(", "str", ")", "of", "this", "blob", ".", "Takes", "the", "same", "arguments", "as", "json", ".", "dumps", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/blob.py#L728-L735
markuskiller/textblob-de
textblob_de/blob.py
TextBlobDE._create_sentence_objects
def _create_sentence_objects(self): """Returns a list of Sentence objects from the raw text.""" sentence_objects = [] sentences = sent_tokenize(self.raw, tokenizer=self.tokenizer) char_index = 0 # Keeps track of character index within the blob for sent in sentences: # Compute the start and end indices of the sentence # within the blob. This only works if the sentence splitter # does not perform any character replacements or changes to # white space. # Working: NLTKPunktTokenizer # Not working: PatternTokenizer try: start_index = self.raw.index(sent, char_index) char_index += len(sent) end_index = start_index + len(sent) except ValueError: start_index = None end_index = None # Sentences share the same models as their parent blob s = Sentence( sent, start_index=start_index, end_index=end_index, tokenizer=self.tokenizer, np_extractor=self.np_extractor, pos_tagger=self.pos_tagger, analyzer=self.analyzer, parser=self.parser, classifier=self.classifier) sentence_objects.append(s) return sentence_objects
python
def _create_sentence_objects(self): """Returns a list of Sentence objects from the raw text.""" sentence_objects = [] sentences = sent_tokenize(self.raw, tokenizer=self.tokenizer) char_index = 0 # Keeps track of character index within the blob for sent in sentences: # Compute the start and end indices of the sentence # within the blob. This only works if the sentence splitter # does not perform any character replacements or changes to # white space. # Working: NLTKPunktTokenizer # Not working: PatternTokenizer try: start_index = self.raw.index(sent, char_index) char_index += len(sent) end_index = start_index + len(sent) except ValueError: start_index = None end_index = None # Sentences share the same models as their parent blob s = Sentence( sent, start_index=start_index, end_index=end_index, tokenizer=self.tokenizer, np_extractor=self.np_extractor, pos_tagger=self.pos_tagger, analyzer=self.analyzer, parser=self.parser, classifier=self.classifier) sentence_objects.append(s) return sentence_objects
[ "def", "_create_sentence_objects", "(", "self", ")", ":", "sentence_objects", "=", "[", "]", "sentences", "=", "sent_tokenize", "(", "self", ".", "raw", ",", "tokenizer", "=", "self", ".", "tokenizer", ")", "char_index", "=", "0", "# Keeps track of character index within the blob", "for", "sent", "in", "sentences", ":", "# Compute the start and end indices of the sentence", "# within the blob. This only works if the sentence splitter", "# does not perform any character replacements or changes to", "# white space.", "# Working: NLTKPunktTokenizer", "# Not working: PatternTokenizer", "try", ":", "start_index", "=", "self", ".", "raw", ".", "index", "(", "sent", ",", "char_index", ")", "char_index", "+=", "len", "(", "sent", ")", "end_index", "=", "start_index", "+", "len", "(", "sent", ")", "except", "ValueError", ":", "start_index", "=", "None", "end_index", "=", "None", "# Sentences share the same models as their parent blob", "s", "=", "Sentence", "(", "sent", ",", "start_index", "=", "start_index", ",", "end_index", "=", "end_index", ",", "tokenizer", "=", "self", ".", "tokenizer", ",", "np_extractor", "=", "self", ".", "np_extractor", ",", "pos_tagger", "=", "self", ".", "pos_tagger", ",", "analyzer", "=", "self", ".", "analyzer", ",", "parser", "=", "self", ".", "parser", ",", "classifier", "=", "self", ".", "classifier", ")", "sentence_objects", ".", "append", "(", "s", ")", "return", "sentence_objects" ]
Returns a list of Sentence objects from the raw text.
[ "Returns", "a", "list", "of", "Sentence", "objects", "from", "the", "raw", "text", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/blob.py#L748-L779