repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/util/stats.py | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# TITLE : coordinates initialization file
# AUTHOR : Nathaniel Starkman
# PROJECT : AST1501
#
# -----------------------------------------------------------------------------
# Docstring and Metadata
"""Statistics on SkyCoords."""
__author__ = "Nathaniel Starkman"
__all__ = [
"mean"
]
#############################################################################
# IMPORTS
# GENERAL
from astropy.coordinates import SkyCoord
#############################################################################
# CODE
#############################################################################
def mean(sc):
"""Average of skycoords.
Parameters
----------
sc : SkyCoord
non-scalar SkyCoord
Returns
-------
avg_sc : SkyCoord
average of `sc`
"""
frame = sc.frame
representation_type = sc.representation_type
x, y, z = sc.icrs.cartesian.xyz.mean(axis=1)
v_x, v_y, v_z = sc.icrs.velocity.d_xyz.mean(axis=1)
avg_sc = SkyCoord(
x=x,
y=y,
z=z,
v_x=v_x,
v_y=v_y,
v_z=v_z,
frame="icrs",
representation_type="cartesian",
)
avg_sc.representation_type = representation_type
return avg_sc.transform_to(frame)
# /def
#############################################################################
# END
| 1,456 | 19.236111 | 79 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/util/units_decorators.py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : unit decorator
# AUTHOR : Nathaniel Starkman
# PROJECT : Adapted for AST1501 from astronat
#
# ----------------------------------------------------------------------------
# Docstring and Metadata
"""units_decorator."""
__author__ = "Nathaniel Starkman"
##############################################################################
# IMPORTS
import inspect
from warnings import warn
# 3rd Party Imports
from astropy.units import Unit, dimensionless_unscaled
from astropy.units.decorators import _validate_arg_value, _get_allowed_units
from astropy.units.core import add_enabled_equivalencies, IrreducibleUnit
from astropy.units.physical import _unit_physical_mapping
from astropy.utils.decorators import wraps
from astropy.utils.misc import isiterable
###############################################################################
# CODE
###############################################################################
###############################################################################
# Helper Functions
def unit_helper(
res, unit=None, to_value=False, equivalencies=[], decompose=False
):
"""Helper function for return of Quantities.
ex: How to apply in a function directly
def func(*args, **kw):
result = do stuff
return unit_helper(result,
unit=kw.get('unit', None),
to_value=kw.get('to_value', False),
equivalencies=kw.get('equivalencies', []),
decompose=kw.get('decompose', []))
Parameters
----------
res: Quantity
the result
unit: Astropy Unit
sets the unit for the returned res
if None, returns res unchanged, unless to_value is used
if '', decomposes
to_value: bool
whether to return .to_value(unit)
see Astropy.units.Quantity.to_value
equivalencies: list
equivalencies for .to() and .to_value()
only used if `unit' to `to_value' are not None/False
decompose: bool, list
if bool:
True, False for decomposing
if list:
bases for .decompose(bases=[])
will first decompose, then apply unit, to_value, equivalencies
Decomposing then converting wastes time, since .to(unit, equivalencies) internally does conversions
the only use for combining decompose with other unit_helper params is with:
unit=None, to_value=True, equivalencies=[], decompose=`bool or [user bases here]'
since this will dcompose to desired bases then return the value in those bases
** experimental feature
for things which are not (u.Unit, u.core.IrreducibleUnit), tries wrapping in Unit()
this allows things such as:
x = 5 * u.kpc * u.s
bases = [2 * u.lyr, u.s]
x.decompose(bases=basesConversionFunction(bases))
this would normally return an error
Returns
-------
res: function output
function output, converted / decomposed / evaluated to desired units
Exceptions
----------
ValueError: if unit not astropy compatible
UnitConversionError: if conversion not legit
...
"""
# fast check to do nothing
if (
(unit is None)
& (to_value is False)
& (equivalencies == [])
& (decompose is False)
):
return res
# First decomposing
if decompose is True:
res = res.decompose()
elif decompose: # decompose is NOT empty list
cls = (Unit, IrreducibleUnit)
bases = [
Unit(x) if not issubclass(x.__class__, cls) else x
for x in decompose
]
res = res.decompose(bases=bases)
# else: # decompose is False or empty list
# pass
# Now Converting
if (unit is None) and (to_value is False): # nothing required
return res
elif to_value is True: # return value
return res.to_value(unit, equivalencies)
else: # return with unit
return res.to(unit, equivalencies)
# /def
def _simple_unit_decorator(
unit=None, to_value=False, equivalencies=[], decompose=False
):
"""Decorator for unit_helper
Any wrapped function accepts the additional key-word arguments:
unit, to_value, equivalencies, decompose
see `Wrapped-Arguments' for details
ex:
@unit_decorator
def func(*args, **kw):
result = do stuff
return result
is equivalent to
def func(*args, unit=None, to_value=False, equivalencies=[], decompose=False, **kw):
result = do stuff w/ *args, and **kw
return unit_helper(result, unit, to_value, equivalencies, decompose)
Other Parameters
----------------
If provided to the decorator (ex: @unit_decorator(unit=`unit')),
then `unit' becomes the default value
If provided when calling the function (ex: myfunc(*args, **kwargs, unit=`unit')),
this unit is used in the conversion
unit: Astropy Unit
sets the unit for the returned res
if None, returns res unchanged, unless to_value is used
if '', decomposes
to_value: bool
whether to return .to_value(unit)
see Astropy.units.Quantity.to_value
equivalencies: list
equivalencies for .to() and .to_value()
only used if `unit' to `to_value' are not None/False
decompose: bool, list
if bool:
True, False for decomposing
if list:
bases for .decompose(bases=[])
will first decompose, then apply unit, to_value, equivalencies
Decomposing then converting wastes time, since .to(unit, equivalencies) internally does conversions
the only use for combining decompose with other unit_helper params is with:
unit=None, to_value=True, equivalencies=[], decompose=`bool or [user bases here]'
since this will dcompose to desired bases then return the value in those bases
** experimental feature
for things which are not (u.Unit, u.core.IrreducibleUnit), tries wrapping in Unit()
this allows things such as:
x = 5 * u.kpc * u.s
bases = [2 * u.lyr, u.s]
x.decompose(bases=basesConversionFunction(bases))
this would normally return an error
Returns
-------
res: function result
result of wrapped function, with the unit operations performed by unit_helper
"""
def wrapper(func):
@wraps(func)
def wrapped(
*args,
unit=unit,
to_value=to_value,
equivalencies=equivalencies,
decompose=decompose,
**kw
):
# evaluated function
res = func(*args, **kw)
return unit_helper(
res,
unit=unit,
to_value=to_value,
equivalencies=equivalencies,
decompose=decompose,
)
return wrapped
return wrapper
_aioattrs = (
"unit",
"to_value",
"equivalencies",
"decompose",
"default_units",
"annot2dfu",
)
class QuantityInputOutput(object):
r"""A decorator for validating the units of arguments to functions.
**function must allow kwargs**
Order of Precedence:
- Function Arguments
- Decorator Arguments
- Function Annotation Arguments
Function Arguments
------------------
See `decorator argument' section
function arguments override decorator & function annotation arguments
*func_args: function arguments
unit
to_value
equivalencies
decompose
default_units
**func_kwargs: function key-word argument
Decorator Arguments
-------------------
arguments to the decorator take LOWER precedence
than arguments to the function itself.
func: function
the function to decorate
default: None
unit: astropy Unit / Quantity or str (default None)
sets the unit for the function evaluation
default: {unit}
must be astropy-compatible unit specification
equivalent to func(*args, **kw).to(unit)
if None, skips unit conversion
to_value: bool
whether to return .to_value(unit)
default: {to_value}
see Astropy.units.Quantity.to_value
equivalencies: list
equivalencies for any units provided
default: {equivalencies}
used by .to() and .to_value()
decompose: bool, list
unit decomposition
default: {decompose}
if bool:
True, False for decomposing
if list:
bases for .decompose(bases=[])
will first decompose, then apply unit, to_value, equivalencies
Decomposing then converting wastes time, since .to(unit, equivalencies) internally does conversions
the only use for combining decompose with other unit_helper params is with:
unit=None, to_value=True, equivalencies=[], decompose=`bool or [user bases here]'
since this will dcompose to desired bases then return the value in those bases
** experimental feature
for things which are not (u.Unit, u.core.IrreducibleUnit), tries wrapping in Unit()
this allows things such as:
x = 10 * u.km * u.s
bases = [2 * u.km, u.s]
x.decompose(bases=basesConversionFunction(bases))
>> 5 2 km s
(this would normally return an error)
default_units: dict
dictionary of default units
default: {default_units}
ex: dict(x=u.km)
for func(x, y)
if x has no units, it is assumed to be in u.km
annot2dfu: bool (default False)
whether to interpret function annotations as default units
function annotations have lower precedence than *default_units*
default: {annot2dfu}
Decorator Key-Word Arguments
----------------------------
Unit specifications can be provided as keyword arguments
to the decorator, or by using function annotation syntax.
Arguments to the decorator take precedence
over any function annotations present.
**note**
decorator key-word arguments are NEVER interpreted as *default_units*
ex:
@quantity_io(x=u.m, y=u.s)
def func(x, y):
...
Function Annotation Arguments
-----------------------------
Unit specifications can be provided as keyword arguments
to the decorator, or by using function annotation syntax.
Arguments to the function and decorator take precedence
over any function annotations present.
ex:
def func(x: u.m, y: u.s) -> u.m / u.s:
...
if annot2dfu is True (default False)
function annotations are interpreted as default units
function annotations have lower precedence than *default_units*
Examples
--------
# Simple Example
@quantity_io()
def func(x: 'km', **kw) -> 2 * u.m:
return x
passing the wrong/no units doesn't work
> func(2000 * u.s)
> >> UnitConversionError
> func(2000)
> >> AttributeError
the distance is internally converted
> func(2000 * u.m)
> >> 1000.0 2 m
function annotation is superceded by an argument
> func(2000 * u.m, unit=2 * u.km)
> >> 1.0 2 km
> func(2000 * u.m, unit=2 * u.km, to_value=True)
> >> 1.0
# More Complex Example
this function only accepts
x arguments of type 'length'
t arguments of type 'time'
annotations are assumed to be also be default_units
@quantity_io(x='length', annot2dfu=True,
default_units=dict(t=u.s))
def func(x: 'km', t, **kw) -> 2 * u.m / u.s:
return x * t
arguments have implicit units
> func(2, 2)
> >> 500.0 2 m / s
decorator & annotation supersceded by an argument
> func(2, 2 * u.ms, unit=2 * u.km / s)
> >> 500.0 2 km / s
print(func(2, 2))
print(func(2, 2, unit=2 * u.km / u.s))
"""
__name__ = "QuantityInputOutput"
@classmethod
def as_decorator(
cls,
func=None,
unit=None,
to_value=False,
equivalencies=[],
decompose=False,
default_units={},
annot2dfu=False,
**decorator_kwargs
):
r"""
A decorator for validating the units of arguments to functions.
**function must allow kwargs**
Order of Precedence:
- Function Arguments
- Decorator Arguments
- Function Annotation Arguments
Function Arguments
------------------
See `decorator argument' section
function arguments override decorator & function annotation arguments
*func_args: function arguments
unit
to_value
equivalencies
decompose
default_units
**func_kwargs: function key-word argument
Decorator Arguments
-------------------
arguments to the decorator take LOWER precedence
than arguments to the function itself.
func: function
the function to decorate
unit: astropy Unit / Quantity or str (default None)
sets the unit for the function evaluation
must be astropy-compatible unit specification
equivalent to func(*args, **kw).to(unit)
if None, skips unit conversion
to_value: bool (default False)
whether to return .to_value(unit)
see Astropy.units.Quantity.to_value
equivalencies: list (default [])
equivalencies for any units provided
used by .to() and .to_value()
decompose: bool, list (default [])
if bool:
True, False for decomposing
if list:
bases for .decompose(bases=[])
will first decompose, then apply unit, to_value, equivalencies
Decomposing then converting wastes time, since .to(unit, equivalencies) internally does conversions
the only use for combining decompose with other unit_helper params is with:
unit=None, to_value=True, equivalencies=[], decompose=`bool or [user bases here]'
since this will dcompose to desired bases then return the value in those bases
** experimental feature
for things which are not (u.Unit, u.core.IrreducibleUnit), tries wrapping in Unit()
this allows things such as:
x = 10 * u.km * u.s
bases = [2 * u.km, u.s]
x.decompose(bases=basesConversionFunction(bases))
>> 5 2 km s
(this would normally return an error)
default_units: dict (default {})
dictionary of default units
ex: {x: u.km}
for func(x, y)
if x has no units, it is assumed to be in u.km
annot2dfu: bool (default False)
whether to interpret function annotations as default units
function annotations have lower precedence than {default_units}
Decorator Key-Word Arguments
----------------------------
Unit specifications can be provided as keyword arguments
to the decorator, or by using function annotation syntax.
Arguments to the decorator take precedence
over any function annotations present.
**note**
decorator key-word arguments are NEVER interpreted as {default_units}
ex:
@quantity_io(x=u.m, y=u.s)
def func(x, y):
...
Function Annotation Arguments
-----------------------------
Unit specifications can be provided as keyword arguments
to the decorator, or by using function annotation syntax.
Arguments to the function and decorator take precedence
over any function annotations present.
ex:
def func(x: u.m, y: u.s) -> u.m / u.s:
...
if annot2dfu is True (default False)
function annotations are interpreted as default units
function annotations have lower precedence than {default_units}
Examples
--------
# Simple Example
@quantity_io()
def func(x: 'km', **kw) -> 2 * u.m:
return x
passing the wrong/no units doesn't work
> func(2000 * u.s)
> >> UnitConversionError
> func(2000)
> >> AttributeError
the distance is internally converted
> func(2000 * u.m)
> >> 1000.0 2 m
function annotation is superceded by an argument
> func(2000 * u.m, unit=2 * u.km)
> >> 1.0 2 km
> func(2000 * u.m, unit=2 * u.km, to_value=True)
> >> 1.0
# More Complex Example
this function only accepts
x arguments of type 'length'
t arguments of type 'time'
annotations are assumed to be also be default_units
@quantity_io(x='length', annot2dfu=True,
default_units={'t': u.s})
def func(x: 'km', t, **kw) -> 2 * u.m / u.s:
return x * t
arguments have implicit units
> func(2, 2)
> >> 500.0 2 m / s
decorator & annotation supersceded by an argument
> func(2, 2 * u.ms, unit=2 * u.km / s)
> >> 500.0 2 km / s
print(func(2, 2))
print(func(2, 2, unit=2 * u.km / u.s))
"""
# making instance from base class
self = super(QuantityInputOutput, cls).__new__(cls)
# modifying docstring
_locals = locals()
self.__doc__ = self.__doc__.format(
# classname=func.__repr__() if func is not None else 'SideHists',
**{k: _locals.get(k).__repr__() for k in set(_aioattrs)}
)
self.__init__(
unit=unit,
to_value=to_value,
equivalencies=equivalencies,
decompose=decompose,
default_units=default_units,
annot2dfu=annot2dfu,
**decorator_kwargs
)
if func is not None:
return self(func)
else:
return self
def __init__(
self,
func=None,
unit=None,
to_value=False,
equivalencies=[],
decompose=False,
default_units={},
annot2dfu=False,
**decorator_kwargs
):
super().__init__()
self.unit = unit
self.to_value = to_value
self.equivalencies = equivalencies
self.decompose = decompose
self.default_units = default_units
self.annot2dfu = annot2dfu
self.decorator_kwargs = decorator_kwargs
def __call__(self, wrapped_function):
# Extract the function signature for the function we are wrapping.
wrapped_signature = inspect.signature(wrapped_function)
@wraps(wrapped_function)
def wrapped(
*func_args,
unit=self.unit,
to_value=self.to_value,
equivalencies=self.equivalencies,
decompose=self.decompose,
default_units=self.default_units,
**func_kwargs
):
# make func_args editable
func_args = list(func_args)
# Bind the arguments to our new function to the signature of the original.
bound_args = wrapped_signature.bind(*func_args, **func_kwargs)
# Iterate through the parameters of the original signature
for i, param in enumerate(wrapped_signature.parameters.values()):
# We do not support variable arguments (*args, **kwargs)
if param.kind in (
inspect.Parameter.VAR_KEYWORD,
inspect.Parameter.VAR_POSITIONAL,
):
continue
# Catch the (never triggered) case where bind relied on a default value.
if (
param.name not in bound_args.arguments
and param.default is not param.empty
):
bound_args.arguments[param.name] = param.default
# Get the value of this parameter (argument to new function)
arg = bound_args.arguments[param.name]
# +----------------------------------+
# Get default unit or physical type, either from decorator kwargs
# or annotations
if param.name in default_units:
dfunit = default_units[param.name]
elif self.annot2dfu is True:
dfunit = param.annotation
# elif not default_units:
# dfunit = param.annotation
else:
dfunit = inspect.Parameter.empty
adjargbydfunit = True
# If the dfunit is empty, then no target units or physical
# types were specified so we can continue to the next arg
if dfunit is inspect.Parameter.empty:
adjargbydfunit = False
# If the argument value is None, and the default value is None,
# pass through the None even if there is a dfunit unit
elif arg is None and param.default is None:
adjargbydfunit = False
# Here, we check whether multiple dfunit unit/physical type's
# were specified in the decorator/annotation, or whether a
# single string (unit or physical type) or a Unit object was
# specified
elif isinstance(dfunit, str):
dfunit = _get_allowed_units([dfunit])[0]
elif not isiterable(dfunit):
pass
else:
raise ValueError("target must be one Unit, not list")
if (not hasattr(arg, "unit")) & (adjargbydfunit is True):
if i < len(func_args):
# print(i, len(bound_args.args))
func_args[i] *= dfunit
else:
func_kwargs[param.name] *= dfunit
arg *= dfunit
# +----------------------------------+
# Get target unit or physical type, either from decorator kwargs
# or annotations
if param.name in self.decorator_kwargs:
targets = self.decorator_kwargs[param.name]
else:
targets = param.annotation
# If the targets is empty, then no target units or physical
# types were specified so we can continue to the next arg
if targets is inspect.Parameter.empty:
continue
# If the argument value is None, and the default value is None,
# pass through the None even if there is a target unit
if arg is None and param.default is None:
continue
# Here, we check whether multiple target unit/physical type's
# were specified in the decorator/annotation, or whether a
# single string (unit or physical type) or a Unit object was
# specified
if isinstance(targets, str) or not isiterable(targets):
valid_targets = [targets]
# Check for None in the supplied list of allowed units and, if
# present and the passed value is also None, ignore.
elif None in targets:
if arg is None:
continue
else:
valid_targets = [t for t in targets if t is not None]
if not hasattr(arg, "unit"):
arg = arg * dimensionless_unscaled
valid_targets.append(dimensionless_unscaled)
else:
valid_targets = targets
# Now we loop over the allowed units/physical types and validate
# the value of the argument:
_validate_arg_value(
param.name,
wrapped_function.__name__,
arg,
valid_targets,
self.equivalencies,
)
# # evaluated wrapped_function
with add_enabled_equivalencies(equivalencies):
return_ = wrapped_function(*func_args, **func_kwargs)
# if func_kwargs:
# return_ = wrapped_function(*func_args, **func_kwargs)
# else:
# return_ = wrapped_function(*func_args)
if (
wrapped_signature.return_annotation
not in (inspect.Signature.empty, None)
and unit is None
):
unit = wrapped_signature.return_annotation
return unit_helper(
return_,
unit=unit,
to_value=to_value,
equivalencies=equivalencies,
decompose=decompose,
)
# TODO dedent
wrapped.__doc__ = (wrapped.__doc__ or "") + _funcdec
# /def
return wrapped
# /def
###############################################################################
_funcdec = r"""\n\n\tDecorator Docstring\n\t-------------------
A decorator for validating the units of arguments to functions.
**function must allow kwargs**
Decorator-Added Function Key-Word Arguments
-------------------------------------------
function arguments override decorator & function annotation arguments
unit: astropy Unit / Quantity or str (default None)
sets the unit for the function evaluation
must be astropy-compatible unit specification
equivalent to func(*args, **kw).to(unit)
if None, skips unit conversion
to_value: bool (default False)
whether to return .to_value(unit)
see Astropy.units.Quantity.to_value
equivalencies: list (default [])
equivalencies for any units provided
used by .to() and .to_value()
decompose: bool, list (default [])
if bool:
True, False for decomposing
if list:
bases for .decompose(bases=[])
will first decompose, then apply unit, to_value, equivalencies
Decomposing then converting wastes time, since .to(unit, equivalencies) internally does conversions
the only use for combining decompose with other unit_helper params is with:
unit=None, to_value=True, equivalencies=[], decompose=`bool or [user bases here]'
since this will dcompose to desired bases then return the value in those bases
** experimental feature
for things which are not (u.Unit, u.core.IrreducibleUnit), tries wrapping in Unit()
this allows things such as:
x = 10 * u.km * u.s
bases = [2 * u.km, u.s]
x.decompose(bases=basesConversionFunction(bases))
>> 5 2 km s
(this would normally return an error)
default_units: dict (default {})
dictionary of default units
ex: {x: u.km}
for func(x, y)
if x has no units, it is assumed to be in u.km
"""
###############################################################################
quantity_io = QuantityInputOutput.as_decorator
###############################################################################
# END
| 28,317 | 34.133995 | 111 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/util/isochrone.py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : isochrone
# AUTHOR : Nathaniel Starkman
# PROJECT : AST1501
#
# ----------------------------------------------------------------------------
# Docstring and Metadata
"""isochrone."""
__author__ = "Nathaniel Starkman"
#############################################################################
# IMPORTS
# GENERAL
import numpy as np
from astropy import units as u
from astropy.table import Table
# PROJECT-SPECIFIC
from .units_decorators import quantity_io
from . import table_utils as mqt
#############################################################################
# CODE
#############################################################################
#############################################################################
# Distance Modulus
@quantity_io(d="length", A=u.mag, annot2dfu=True, default_units={"A": u.mag})
def distanceModulus_magnitude(d: u.pc, A=0 * u.mag, obs=True, **kw) -> u.mag:
"""Distance Modulus.
equation: DM = 5 log10(d / 10) + A
mtrue - M = 5 log10(d / 10)
if there is line-of-sight extinction
mobs = mtrue + A
| mobs - M = 5 log10(d / 10) + A
| true - M = 5 log10(d / 10) - A
Arguments
---------
d: scalar, array, Quantity
distance
no units => parsecs
A: scalar, array, Quantity (in mag)
extinction in magnitudes
obs: bool (default True)
whether to return (mobs - M) or (mtrue - M)
defualt: (mobs - M)
**don't change unless specifically required
Returns
-------
DM: scalar, array
default units: u.mag
"""
if not obs:
A *= -1
return (5 * u.mag) * (np.log10(d.to_value(u.pc)) - 1) + A
# /def
#############################################################################
# Table Readers
def CMD3p1TableRead(
fname, detector="ps1", fmat="ascii", header_start=7, distance=None
):
"""CMD3p1TableRead.
Read a CMD table from CMD 3.1 @ http://stev.oapd.inaf.it/cgi-bin/cmd
CMD starts in absolute magnitudes
"""
isocr = Table.read(fname, format=fmat, header_start=header_start)
isocr.meta["comments"] = isocr.meta["comments"][: header_start - 1]
if detector in ("cfht",):
absmagcols = ("Umag", "Gmag", "Rmag", "Imag", "Zmag")
appmagcols = ("u", "g", "r", "i", "z")
mqt.rename_columns(
isocr,
("u*mag", "Umag"),
("g'mag", "Gmag"),
("r'mag", "Rmag"),
("i'mag", "Imag"),
("z'mag", "Zmag"),
)
elif detector in ("ps1", "panstarrs", "panstarrs1"):
absmagcols = ("Gmag", "Rmag", "Imag", "Zmag", "Ymag", "Wmag")
appmagcols = ("g", "r", "i", "z", "y", "w")
mqt.rename_columns(
isocr,
("gP1mag", "Gmag"),
("rP1mag", "Rmag"),
("iP1mag", "Imag"),
("zP1mag", "Zmag"),
("yP1mag", "Ymag"),
("wP1mag", "Wmag"),
)
udict = {
"Age": u.yr,
"Mass": u.Msun,
"mbolmag": u.mag,
"Umag": u.mag,
"Gmag": u.mag,
"Rmag": u.mag,
"Imag": u.mag,
"Zmag": u.mag,
"Ymag": u.mag,
"Wmag": u.mag,
}
isocr = mqt.add_units_to_Table(isocr, udict=udict) # making QTable
if set(["Gmag", "Rmag"]).issubset(isocr.colnames):
mqt.add_color_col(isocr, "Gmag", "Rmag", color="g-r")
# Adjusting to apparent magnitude
if distance is not None:
DM = distanceModulus_magnitude(distance)
appmags = [isocr[n] + DM for n in absmagcols]
isocr.add_columns(appmags, names=appmagcols)
else:
print("Not adding apparent mags (cols ugriz)")
return isocr
# /def
# -------------------------------------------------------------------------
def readCMDTablestevoapd(
fname, detector="ps1", fmat="ascii", header_start=7, distance=None
):
"""Calls CMD3p1TableRead."""
return CMD3p1TableRead(
fname,
detector=detector,
fmat=fmat,
header_start=header_start,
distance=distance,
)
# /def
##############################################################################
# END
| 4,288 | 24.081871 | 78 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/util/table_utils.py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : Table utilities
# AUTHOR : Nathaniel Starkman
# PROJECT : AST1501
#
# ----------------------------------------------------------------------------
# Docstring and Metadata
"""Table utilities.
Routine Listings
----------------
neg_to_nan
add_units_to_Table
add_color_col
add_calculated_col
rename_columns
drop_colnames
"""
__author__ = "Nathaniel Starkman"
__credits__ = ["Astropy"]
#############################################################################
# IMPORTS
# GENERAL
import itertools
import numpy as np
from astropy.table import Table, QTable
##############################################################################
# CODE
##############################################################################
def neg_to_nan(df, col):
"""Set negative values in `col` to NaN.
This edits in-place
Parameters
----------
df: array_like
array
col: index, index str, slicer
the column selector
"""
df[col][df[col] < 0] = np.NaN
# /def
# ----------------------------------------------------------------------------
def add_units_to_Table(df, udict=None):
"""Add units to an astropy Table.
Takes Table and returns QTable
Parameters
----------
df : Table or QTable
udict: dict, optional
dictionary of column names and corresponding units
Returns
-------
qdf: QTable
same table as `df`, with units
"""
# Adding Units, if corresponding column in Table
for key, unit in udict.items():
if key in df.columns and df[key].unit != unit:
setattr(df[key], "unit", unit)
qdf = QTable(df)
return qdf
# /def
# ----------------------------------------------------------------------------
def add_color_col(df, c1, c2, **kw):
"""Add color column.
Parameters
----------
df: Table, QTable
c1: str
c2: str
Other Parameters
----------------
color: str
name of color
Notes
-----
name of color column is `{c1}-{c2}` if not provided as `color`
adds error column
TODO
----
make this a function of add_calculated_col
"""
color = kw.get("color", c1 + "-" + c2)
if color in df.colnames:
print("{} already in table".format(color))
return None
try:
c1ind = df.index_column(c1 + "_err")
except KeyError:
c1ind = df.index_column(c1)
noerr = True
except ValueError:
c1ind = df.index_column(c1)
noerr = True
else:
noerr = False
try:
c2ind = df.index_column(c2 + "_err")
except KeyError:
c2ind = df.index_column(c1)
noerr = True
except ValueError:
c2ind = df.index_column(c2)
noerr = True
else:
noerr = False if noerr is False else True
colindex = max(c1ind, c2ind) + 1
df.add_column(df[c1] - df[c2], index=colindex, name=color)
df[color].info.description = color + " color"
# Adding Errors
if noerr is False:
colindex = df.index_column(color) + 1
df.add_column(
np.sqrt(df[c1 + "_err"] ** 2 + df[c2 + "_err"] ** 2),
index=colindex,
name=color + "_err",
)
df[color + "_err"].info.description = "error in {} color [mag]"
return
# /def
# ----------------------------------------------------------------------------
def add_calculated_col(df, func, *cols, funcargs=[], funckw={}, **kw):
"""Add a calculated column in two column variables.
Parameters
----------
func: function
function over df cols
form function(*columns, *extra_arguments, **keyword_argument)
cols: list of str
the names of the columns in df
funcargs: list
list of extra arguments for func
funckw: dict
dictionary of extra keyword arguments for func
Other Parameters
----------------
name: str
function name
defaults to 'func({},{})'
index: int
index to put column at
description: str
column description
return: bool
whether to return modified df
Returns
-------
if kw['return'] is True:
returns modified df
else:
it modifies in place
"""
name = kw.get("name", "func{}".format(str(tuple(cols))))
if name in df.colnames:
print("{} already in table".format(name))
return None
colindex = kw.get("index", None)
df.add_column(
func(*[df[c] for c in cols], *funcargs, **funckw),
index=colindex,
name=name,
)
df[name].info.description = kw.get("description", name)
if kw.get("return", False) is True:
return df
# /def
# ----------------------------------------------------------------------------
def rename_columns(df, *args, **kw):
"""Rename columns.
Parameters
----------
df: Table, QTable
args: tuple
[(name, rename), (name, rename), ...]
kw:
{name: rename, name: rename, name: rename, ...}
chains together args, kw
kw takes precedence
"""
for n, rn in itertools.chain(args, kw.items()):
df.rename_column(n, rn)
return
# /def
# ----------------------------------------------------------------------------
# def drop_colnames(colnames, *args):
# """helper function for making a table from another table, dropping some names
# Parameters
# ----------
# colnames: list
# list of names in the original table
# args: list
# list of strings of names to drop
# Returns
# -------
# """
# names = np.array(colnames[:]) # shallow copy just in case
# inds = ~np.in1d(names, args)
# return list(names[inds])
# # /def
##############################################################################
# END
| 5,958 | 20.131206 | 83 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/util/util.py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : util
# AUTHOR : Nathaniel Starkman
# PROJECT : AST15r01
#
# ----------------------------------------------------------------------------
# Docstring and Metadata
"""util.
Routine Listings
----------------
quadrature
mad_with_extrema
mad_adj_by_std
adaptive_binning
"""
__author__ = "Nathaniel Starkman"
#############################################################################
# IMPORTS
# GENERAL
import numpy as np
from scipy.stats import binned_statistic as binned_stats
from astropy import units as u
from astropy.stats import median_absolute_deviation
#############################################################################
# CODE
def mad_with_extrema(x, mnm=None, mxm=None, return_func=False):
"""Calculate Bounded Median Absolute Deviation.
with a minimum and maximum allowed mad
Parameters
----------
x: array
the data on which to calculate the MAD
mnm: float
lower bound
if MAD(x) < mnm: return mnm
mxm: float
upper bound
if MAD(x) > mxm: return mxm
return_func: bool
if True, returns a single-parameter function with set mnm & mxm
Returns
-------
MAD: float
if return is True
`mad_with_extrema(., mnm=mnm, mxm=mxm, return_func=False)`
"""
if return_func is True:
return lambda x: mad_with_extrema(
x, mnm=mnm, mxm=mxm, return_func=False
)
# astropy MAD
mad = np.nan_to_num(median_absolute_deviation(x))
if mnm is not None:
if issubclass(x.__class__, u.Quantity):
mnm = mnm * x.unit
try: # array
mad[mad < mnm] = mnm
except TypeError: # single value
if mad < mnm:
mad = mnm
if mxm is not None:
if issubclass(x.__class__, u.Quantity):
mxm = mxm * x.unit
try:
mad[mad > mxm] = mxm
except TypeError:
if mad > mxm:
mad = mxm
return mad
# /def
def mad_adj_by_std(bin_mad, x, binnumber):
"""Adjust bin_mad >= max(std(x(bin)))."""
for n in set(binnumber):
bin_mad[n - 1] = max(np.mean(x[binnumber == n]).value, bin_mad[n - 1])
return bin_mad
# /def
# ----------------------------------------------------------------------------
def adaptive_binning(
x,
y,
statistic="mean",
ibins=10,
pcttol=10,
minpnts=25,
xunits=None,
yunits=None,
):
"""Adaptive binning.
Parameters
----------
x: array
the x data
y: array
the data to be binned
statistic: str or func
statistic method for scipy.stats.binned_statistic
see binned_statistics documentation for options
pcttol: float
percent difference tolerance to stop binning
minpnts: int
minimum number of points in a bin
Returns
-------
bin_stat: ndarray
bin_cents: ndarray
bin_edges: ndarray
binnumber: ndarray
pctdiff: ndarray
"""
_stats = binned_stats(x, y, statistic="median", bins=ibins)
bin_stat, bin_edges, binnumber = _stats
# percent difference between medians
pctdiff = np.array([0, *np.abs(np.diff(bin_stat) / bin_stat[:-1]) * 100])
keepgoing = pctdiff > 10 # keep going on 10% differences
while any(keepgoing):
# new bin edeges, splitting up any with > tol% difference
new_bin_edges = []
for i, v in enumerate(pctdiff):
if v < pcttol:
new_bin_edges.append(bin_edges[i])
elif (
len(binnumber[binnumber == i]) < minpnts
): # too few points to split
new_bin_edges.append(bin_edges[i])
else:
new_bin_edges.append((bin_edges[i] + bin_edges[i - 1]) / 2)
new_bin_edges.append(bin_edges[i])
new_bin_edges.append(bin_edges[-1])
# recalculate bins
_stats = binned_stats(x, y, statistic="median", bins=new_bin_edges)
bin_stat, bin_edges, binnumber = _stats
pctdiff = np.array(
[
0,
*np.abs(np.diff(bin_stat) / bin_stat[:-1]) * 100,
] # (0% diff with self)
)
# test if should keep re-binning
keepgoing = []
for i, v in enumerate(pctdiff):
if v < 10:
keepgoing.append(False)
elif len(binnumber[binnumber == i]) < 25:
keepgoing.append(False)
else:
keepgoing.append(True)
# Bin Centers
bin_cents = bin_edges[:-1] + np.diff(bin_edges) / 2
# Adding back units
if xunits is not None:
bin_cents = bin_cents * xunits
bin_edges = bin_edges * xunits
if yunits is not None:
bin_stat = bin_stat * yunits
return bin_stat, bin_cents, bin_edges, binnumber, pctdiff
# /def
#############################################################################
# END
| 5,076 | 23.17619 | 78 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/util/MegaCamGen1_from_PS1.py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : MegaCamGen1_from_PS1
# AUTHOR : Nathaniel Starkman
# PROJECT : AST1501
#
# ----------------------------------------------------------------------------
# Docstring and Metadata
"""MegaCamGen1_from_PS1."""
__author__ = "Nathaniel Starkman"
__credits__ = [
"http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/en/megapipe/docs/filt.html"
]
##############################################################################
# IMPORTS
# GENERAL
import warnings
from astropy import units
from astropy.table import Table
# PROJECT-SPECIFIC
from .units_decorators import quantity_io
#############################################################################
# CODE
#############################################################################
@quantity_io()
def U_MP9301(ps: Table, **kw) -> units.mag:
"""U_MP9301.
gmi = (gPS-iPS)
uCFHT - gPS = .523 - .343 gmi + 2.44 gmi^2 - .998 gmi^3
limited to .3 mag < gmi < 1.5 mag
filter transformations from
http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/en/megapipe/docs/filt.html
Pan-STARRS to MegaCam plots. Top row, 1st plot
Parameters
----------
ps: Astropy Table
need: g col
either: i, g-i col
Other Parameters
----------------
g: str (default 'g')
g column name
i: str (default 'i')
i column name
gmi: str (default 'g-i')
g-i column name
Returns
-------
U_MP9301: array_like
CFHT u-band
"""
g, i = kw.get("g", "g"), kw.get("i", "i")
gmi = kw.get("gmi", "g-i")
if gmi in ps.colnames:
gmi = ps[gmi]
else:
gmi = ps[g] - ps[i]
ind = (0.3 * units.mag < gmi) & (gmi < 1.5 * units.mag)
if not all(ind):
warnings.warn("MCg1.U: not all .3 mag < (g-i)_ps < 1.5 mag")
c0 = 0.523 * units.mag
c1 = -0.343
c2 = 2.44 / units.mag
c3 = -0.998 / units.mag ** 2
g_ps = ps[g]
u_cfht = g_ps + c0 + (c1 * gmi) + (c2 * gmi ** 2) + (c3 * gmi ** 3)
return u_cfht
# /def
# -------------------------------------------------------------------------
@quantity_io()
def G_MP9401(ps: Table, **kw) -> units.mag:
"""G_MP9401.
gmi = (gPS-iPS)
gCFHT - gPS = -.001 - .004 gmi - .0056 gmi^2 + .00292 gmi^3
limited to -1 mag < gmi < 4 mag
filter transformations from
http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/en/megapipe/docs/filt.html
Pan-STARRS to MegaCam plots. Top row, 2nd plot
Parameters
----------
ps: Astropy Table
need: g col
either: i, g-i col
Other Parameters
----------------
g: str (default 'g')
g column name
i: str (default 'i')
i column name
gmi: str (default 'g-i')
g-i column name
Returns
-------
G_MP9401: array_like
CFHT g-band
"""
g, i = kw.get("g", "g"), kw.get("i", "i")
gmi = kw.get("gmi", "g-i")
if gmi in ps.colnames:
gmi = ps[gmi]
else:
gmi = ps[g] - ps[i]
ind = (-1.0 * units.mag < gmi) & (gmi < 4 * units.mag)
if not all(ind):
warnings.warn("MCg1.G: not all -1 mag < (g-i)_ps < 4 mag")
c0 = -0.001 * units.mag
c1 = -0.004
c2 = -0.0056 / units.mag
c3 = 0.00292 / units.mag ** 2
g_ps = ps[g]
g_cfht = g_ps + c0 + (c1 * gmi) + (c2 * gmi ** 2) + (c3 * gmi ** 3)
return g_cfht
# /def
# -------------------------------------------------------------------------
@quantity_io()
def R_MP9601(ps: Table, **kw) -> units.mag:
"""R_MP9601.
gmi = (gPS-iPS)
rCFHT - rPS = .002 - .017 gmi + .00554 gmi^2 - .000692 gmi^3
limited to -1 mag < gmi < 4 mag
filter transformations from
http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/en/megapipe/docs/filt.html
Pan-STARRS to MegaCam plots. Top row, 3rd plot
Parameters
----------
ps: Astropy Table
need: r, col
either: (g & i), g-i col
Other Parameters
----------------
g: str (default 'g')
g column name
r: str (default 'r')
r column name
i: str (default 'i')
i column name
gmi: str (default 'g-i')
g-i column name
Returns
-------
r_cfht: array_like
"""
g, r, i = kw.get("g", "g"), kw.get("r", "r"), kw.get("i", "i")
gmi = kw.get("gmi", "g-i")
if gmi in ps.colnames:
gmi = ps[gmi]
else:
gmi = ps[g] - ps[i]
ind = (-1.0 * units.mag < gmi) & (gmi < 4 * units.mag)
if not all(ind):
warnings.warn("MCg1.R: not all -1 mag < (g-i)_ps < 4 mag")
c0 = 0.002 * units.mag
c1 = -0.017
c2 = 0.00554 / units.mag
c3 = -0.000692 / units.mag ** 2
r_ps = ps[r]
r_cfht = r_ps + c0 + (c1 * gmi) + (c2 * gmi ** 2) + (c3 * gmi ** 3)
return r_cfht
# /def
# -------------------------------------------------------------------------
@quantity_io()
def I_MP9701(ps: Table, **kw) -> units.mag:
"""I_MP9701.
gmi = (gPS-iPS)
iCFHT - iPS = .001 - .021 gmi + .00398 gmi^2 - .00369 gmi^3
limited to -1 mag < gmi < 4 mag
filter transformations from
http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/en/megapipe/docs/filt.html
Pan-STARRS to MegaCam plots. 2nd row, 1st plot
Parameters
----------
ps: Astropy Table
need: i col
either: g, g-i col
Other Parameters
----------------
g: str (default 'g')
g column name
i: str (default 'i')
i column name
gmi: str (default 'g-i')
g-i column name
Returns
-------
i_cfht: array_like
"""
g, i = kw.get("g", "g"), kw.get("i", "i")
gmi = kw.get("gmi", "g-i")
if gmi in ps.colnames:
gmi = ps[gmi]
else:
gmi = ps[g] - ps[i]
ind = (-1.0 * units.mag < gmi) & (gmi < 4 * units.mag)
if not all(ind):
warnings.warn("MCg1.I: not all -1 mag < (g-i)_ps < 4 mag")
c0 = 0.001 * units.mag
c1 = -0.021
c2 = 0.00398 / units.mag
c3 = -0.00369 / units.mag ** 2
i_ps = ps[i]
i_cfht = i_ps + c0 + (c1 * gmi) + (c2 * gmi ** 2) + (c3 * gmi ** 3)
return i_cfht
# /def
# -------------------------------------------------------------------------
@quantity_io()
def Z_MP9801(ps: Table, **kw) -> units.mag:
"""Z_MP9801.
gmi = (gPS-iPS)
zCFHT - zPS = -.009 - .029 gmi + .012 gmi^2 - .00367 gmi^3
limited to -1 mag < gmi < 4 mag
filter transformations from
http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/en/megapipe/docs/filt.html
Pan-STARRS to MegaCam plots. 2nd row, 2nd plot
Parameters
----------
ps: Astropy Table
need: z col
either: (g & i), g-i col
Other Parameters
----------------
g: str (default 'g')
g column name
i: str (default 'i')
i column name
z: str (default 'z')
z column name
gmi: str (default 'g-i')
g-i column name
Returns
-------
z_cfht: array_like
"""
g, i, z = kw.get("g", "g"), kw.get("i", "i"), kw.get("z", "z")
gmi = kw.get("gmi", "g-i")
if gmi in ps.colnames:
gmi = ps[gmi]
else:
gmi = ps[g] - ps[i]
ind = (-1.0 * units.mag < gmi) & (gmi < 4 * units.mag)
if not all(ind):
warnings.warn("MCg1.Z: not all -1 mag < (g-i)_ps < 4 mag")
c0 = -0.009 * units.mag
c1 = -0.029
c2 = 0.012 / units.mag
c3 = -0.00367 / units.mag ** 2
z_ps = ps[z]
z_cfht = z_ps + c0 + (c1 * gmi) + (c2 * gmi ** 2) + (c3 * gmi ** 3)
return z_cfht
# /def
# -------------------------------------------------------------------------
@quantity_io()
def I_MP9702(ps: Table, **kw) -> units.mag:
"""I_MP9702.
gmi = (gPS-iPS)
iCFHT - iPS = -.005 + .004 gmi + .0124 gmi^2 - .0048 gmi^3
limited to -1 mag < gmi < 4 mag
filter transformations from
http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/en/megapipe/docs/filt.html
Pan-STARRS to MegaCam plots. 2nd row, 3rd plot
Parameters
----------
ps: Astropy Table
need: i col
either: g, g-i col
Other Parameters
----------------
g: str (default 'g')
g column name
i: str (default 'i')
i column name
gmi: str (default 'g-i')
g-i column name
Returns
-------
z_cfht: array_like
"""
g, i = kw.get("g", "g"), kw.get("i", "i")
gmi = kw.get("gmi", "g-i")
if gmi in ps.colnames:
gmi = ps[gmi]
else:
gmi = ps[g] - ps[i]
ind = (-1.0 * units.mag < gmi) & (gmi < 4 * units.mag)
if not all(ind):
warnings.warn("MCg1.I: not all -1 mag < (g-i)_ps < 4 mag")
c0 = -0.005 * units.mag
c1 = +0.004
c2 = 0.0124 / units.mag
c3 = -0.0048 / units.mag ** 2
i_ps = ps[i]
z_cfht = i_ps + c0 + (c1 * gmi) + (c2 * gmi ** 2) + (c3 * gmi ** 3)
return z_cfht
# /def
# -------------------------------------------------------------------------
@quantity_io()
def U_MP9302(ps: Table, **kw) -> units.mag:
"""I_MP9702.
gmi = (gPS-iPS)
uCFHT - gPS = .823 - 1.36 gmi + 4.18 gmi^2 - 1.64 gmi^3
limited to .3 mag < gmi < 1.5 mag
filter transformations from
http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/en/megapipe/docs/filt.html
Pan-STARRS to MegaCam plots. 3rd row, 1st plot
Parameters
----------
ps: Astropy Table
need: g col
either: i, g-i col
Other Parameters
----------------
g: str (default 'g')
g column name
i: str (default 'i')
i column name
gmi: str (default 'g-i')
g-i column name
Returns
-------
u_cfht: array_like
"""
g, i = kw.get("g", "g"), kw.get("i", "i")
gmi = kw.get("gmi", "g-i")
if gmi in ps.colnames:
gmi = ps[gmi]
else:
gmi = ps[g] - ps[i]
ind = (0.3 * units.mag < gmi) & (gmi < 1.5 * units.mag)
if not all(ind):
warnings.warn("MCg1.U: not all .3 mag < (g-i)_ps < 1.5 mag")
c0 = 0.823 * units.mag
c1 = -1.360
c2 = 4.18 / units.mag
c3 = -1.64 / units.mag ** 2
g_ps = ps[g]
u_cfht = g_ps + c0 + (c1 * gmi) + (c2 * gmi ** 2) + (c3 * gmi ** 3)
return u_cfht
# /def
# -------------------------------------------------------------------------
@quantity_io()
def G_MP9402(ps: Table, **kw) -> units.mag:
"""G_MP9402.
gmi = (gPS-iPS)
gCFHT - gPS = .014 - .059 gmi - .00313 gmi^2 - .00178 gmi^3
limited to -1 mag < gmi < 4 mag
filter transformations from
http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/en/megapipe/docs/filt.html
Pan-STARRS to MegaCam plots. 3rd row, 2nd plot
Parameters
----------
ps: Astropy Table
need: g col
either: i, g-i col
Other Parameters
----------------
g: str (default 'g')
g column name
i: str (default 'i')
i column name
gmi: str (default 'g-i')
g-i column name
Returns
-------
g_cfht: array_like
"""
g, i = kw.get("g", "g"), kw.get("i", "i")
gmi = kw.get("gmi", "g-i")
if gmi in ps.colnames:
gmi = ps[gmi]
else:
gmi = ps[g] - ps[i]
ind = (-1.0 * units.mag < gmi) & (gmi < 4 * units.mag)
if not all(ind):
warnings.warn("MCg1.G: not all -1 mag < (g-i)_ps < 4 mag")
c0 = 0.014 * units.mag
c1 = 0.059
c2 = -0.00313 / units.mag
c3 = -0.00178 / units.mag ** 2
g_ps = ps[g]
g_cfht = g_ps + c0 + (c1 * gmi) + (c2 * gmi ** 2) + (c3 * gmi ** 3)
return g_cfht
# /def
# -------------------------------------------------------------------------
@quantity_io()
def R_MP9602(ps: Table, **kw) -> units.mag:
"""R_MP9602.
gmi = (gPS-iPS)
rCFHT - rPS = .003 - .05 gmi - .0125 gmi^2 - .00699 gmi^3
limited to -1 mag < gmi < 3 mag
filter transformations from
http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/en/megapipe/docs/filt.html
Pan-STARRS to MegaCam plots. 3rd row, 3rd plot
Parameters
----------
ps: Astropy Table
need: r col
either: (g & i), g-i col
Other Parameters
----------------
g: str (default 'g')
g column name
r: str (default 'r')
r column name
i: str (default 'i')
i column name
gmi: str (default 'g-i')
g-i column name
Returns
-------
r_cfht: array_like
"""
g, r, i = kw.get("g", "g"), kw.get("r", "r"), kw.get("i", "i")
gmi = kw.get("gmi", "g-i")
if gmi in ps.colnames:
gmi = ps[gmi]
else:
gmi = ps[g] - ps[i]
ind = (-1.0 * units.mag < gmi) & (gmi < 3.0 * units.mag)
if not all(ind):
warnings.warn("MCg1.R: not all -1 mag < (g-i)_ps < 3 mag")
c0 = 0.003 * units.mag
c1 = -0.050
c2 = 0.0125 / units.mag
c3 = -0.00699 / units.mag ** 2
r_ps = ps[r]
r_cfht = r_ps + c0 + (c1 * gmi) + (c2 * gmi ** 2) + (c3 * gmi ** 3)
return r_cfht
# /def
# -------------------------------------------------------------------------
@quantity_io()
def I_MP9703(ps: Table, **kw) -> units.mag:
"""I_MP9703.
gmi = (gPS-iPS)
iCFHT - iPS = .006 - .024 gmi + .00627 gmi^2 - .00523 gmi^3
limited to -1 mag < gmi < 3.6 mag
filter transformations from
http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/en/megapipe/docs/filt.html
Pan-STARRS to MegaCam plots. 4th row, 1st plot
Parameters
----------
ps: Astropy Table
need: i col
either: g, g-i col
Other Parameters
----------------
g: str (default 'g')
g column name
i: str (default 'i')
i column name
gmi: str (default 'g-i')
g-i column name
Returns
-------
i_cfht: array_like
"""
g, i = kw.get("g", "g"), kw.get("i", "i")
gmi = kw.get("gmi", "g-i")
if gmi in ps.colnames:
gmi = ps[gmi]
else:
gmi = ps[g] - ps[i]
ind = (-1.0 * units.mag < gmi) & (gmi < 3.6 * units.mag)
if not all(ind):
warnings.warn("MCg1.I: not all -1 mag < (g-i)_ps < 3.6 mag")
c0 = 0.006 * units.mag
c1 = -0.024
c2 = 0.00627 / units.mag
c3 = -0.00523 / units.mag ** 2
i_ps = ps[i]
i_cfht = i_ps + c0 + (c1 * gmi) + (c2 * gmi ** 2) + (c3 * gmi ** 3)
return i_cfht
# /def
# -------------------------------------------------------------------------
@quantity_io()
def Z_MP9901(ps: Table, **kw) -> units.mag:
"""Z_MP9901.
gmi = (gPS-iPS)
zCFHT - zPS = -.016 - .069 gmi + .0239 gmi^2 - .0056 gmi^3
limited to -1 mag < gmi < 4 mag
filter transformations from
http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/en/megapipe/docs/filt.html
Pan-STARRS to MegaCam plots. 4th row, 2nd plot
Parameters
----------
ps: Astropy Table
need: z col
either: (g & i), g-i col
Other Parameters
----------------
i: str (default 'i')
i column name
z: str (default 'z')
z column name
gmi: str (default 'g-i')
g-i column name
Returns
-------
z_cfht: array_like
"""
g, i, z = kw.get("g", "g"), kw.get("i", "i"), kw.get("z", "z")
gmi = kw.get("gmi", "g-i")
if gmi in ps.colnames:
gmi = ps[gmi]
else:
gmi = ps[g] - ps[i]
ind = (-1.0 * units.mag < gmi) & (gmi < 4.0 * units.mag)
if not all(ind):
warnings.warn("MCg1.Z: not all -1 mag < (g-i)_ps < 4 mag")
c0 = -0.016 * units.mag
c1 = -0.069
c2 = 0.0239 / units.mag
c3 = -0.0056 / units.mag ** 2
z_ps = ps[z]
z_cfht = z_ps + c0 + (c1 * gmi) + (c2 * gmi ** 2) + (c3 * gmi ** 3)
return z_cfht
# /def
# -------------------------------------------------------------------------
@quantity_io()
def GRI_MP9605(ps: Table, **kw) -> units.mag:
"""GRI_MP9605.
gmi = (gPS-iPS)
gCFHT - rPS = .005 + .244 gmi - .0692 gmi^2 - .0014 gmi^3
limited to -1 mag < gmi < 1.2 mag
filter transformations from
http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/en/megapipe/docs/filt.html
Pan-STARRS to MegaCam plots. 4th row, 3rd plot
Parameters
----------
ps: Astropy Table
need: r col
either: (g & i), g-i col
Other Parameters
----------------
g: str (default 'g')
g column name
r: str (default 'r')
r column name
i: str (default 'i')
i column name
gmi: str (default 'g-i')
g-i column name
Returns
-------
r_cfht: array_like
"""
g, r, i = kw.get("g", "g"), kw.get("r", "r"), kw.get("i", "i")
gmi = kw.get("gmi", "g-i")
if gmi in ps.colnames:
gmi = ps[gmi]
else:
gmi = ps[g] - ps[i]
ind = (-1.0 * units.mag < gmi) & (gmi < 1.2 * units.mag)
if not all(ind):
warnings.warn("MCg1.G: not all -1 mag < (g-i)_ps < 1.2 mag")
c0 = 0.005 * units.mag
c1 = 0.244
c2 = -0.0692 / units.mag
c3 = -0.0014 / units.mag ** 2
r_ps = ps[r]
r_cfht = r_ps + c0 + (c1 * gmi) + (c2 * gmi ** 2) + (c3 * gmi ** 3)
return r_cfht
# /def
# -------------------------------------------------------------------------
@quantity_io()
def GmR(ps: Table, gfilter="9401", rfilter="9601", **kw) -> units.mag:
"""GmR.
Parameters
----------
gfilter: {"9401", "9402"}
rfilter: {"9601", "9602"}
Returns
-------
G: array_like
"""
if gfilter in ("9401", "9402"):
G = globals().get("G_MP" + gfilter)
else:
gopts = ("9401", "9402")
raise ValueError(f"{gfilter} wrong not in {gopts}")
if rfilter in ("9601", "9602"):
R = globals().get("R_MP" + rfilter)
else:
ropts = ("9601", "9602")
raise ValueError(f"{rfilter} wrong not in {ropts}")
return G(ps, **kw) - R(ps, **kw)
# /def
| 17,792 | 20.966667 | 78 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/util/__init__.py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : util initialization
# AUTHOR : Nathaniel Starkman
# PROJECT : AST1501
#
# ----------------------------------------------------------------------------
# Docstring and Metadata
"""util.
Routine Listings
----------------
astrarray
"""
__author__ = "Nathaniel Starkman"
#############################################################################
# IMPORTS
# GENERAL
import numpy as np
from collections import OrderedDict
# CUSTOM
from astroPHD import LogFile, ObjDict
# PROJECT-SPECIFIC
from .pickle import dump as _dump, load as _load
#############################################################################
# CODE
#############################################################################
def astrarray(arr):
"""Quantity array.
converts a list of quantities to a Quantity list
Parameters
----------
arr: array_like
Examples
--------
>>> astrarray([0*u.deg, 1*u.deg])
[0, 1] * u.deg
"""
astrclass = arr[0].__class__
unit = arr[0].unit
return astrclass(np.asarray([a.to_value(unit) for a in arr]) * unit)
# /def
#############################################################################
# END
| 1,295 | 18.343284 | 78 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/util/paths/__init__.py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : paths initialization file
# AUTHOR : Nathaniel Starkman
# PROJECT : starkython
#
# ----------------------------------------------------------------------------
# Docstring and Metadata
"""initialization file for util/paths.
Routine Listings
----------------
current_file_directory
"""
__author__ = "Nathaniel Starkman"
__all__ = [
"current_file_directory"
]
##############################################################################
# CODE
##############################################################################
def current_file_directory(__file__):
"""current_file_directory.
Parameters
----------
__file__ : builtin
the furrent file directory
pops the file from the directory, leaving the the folder
Returns
-------
cur_dir : str
the current directory
"""
cur_dir = "/".join(__file__.split("/")[:-1])
return cur_dir
# /def
##############################################################################
# END
| 1,125 | 18.754386 | 78 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/util/coordinates/CustomCoords.py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : Custom Coordinates
# AUTHOR : Nathaniel Starkman
# PROJECT : AST1501
#
# ----------------------------------------------------------------------------
# Docstring and Metadata
"""Custom Coordinates.
TODO
----
replace custom_to_radec with version from galpy.util.bovy_coords
and delete custom_to_radec, from mybovy_coords
"""
__author__ = "Nathaniel Starkman"
#############################################################################
# IMPORTS
# GENERAL
from numpy.linalg import norm
# astropy
from astropy import units as u, constants as consts
import astropy.coordinates as coord
from astropy.coordinates import SkyCoord, frame_transform_graph
from astropy.coordinates.representation import CartesianDifferential
from astropy.coordinates.matrix_utilities import (
rotation_matrix,
matrix_product,
matrix_transpose,
)
# galpy
from galpy.util.bovy_coords import radec_to_custom, custom_to_radec
# CUSTOM
from astroPHD import ObjDict, LogFile
# PROJECT-SPECIFIC
from ..stats import mean as average
#############################################################################
# PARAMETERS
_LOGFILE = LogFile(header=False) # LogPrint, which is compatible with LogFile
#############################################################################
# CODE
#############################################################################
#############################################################################
# Rotated Coordinate Frame Class
class StreamFrame(coord.BaseCoordinateFrame):
"""StreamFrame.
A Heliocentric spherical coordinate system defined to linearize
a stream about a point, using the angular momentum at that point.
http://docs.astropy.org/en/stable/generated/examples/coordinates/
plot_sgr-coordinate-frame.html
#sphx-glr-generated-examples-coordinates-plot-sgr-coordinate-frame-py
"""
default_representation = coord.SphericalRepresentation
default_differential = coord.SphericalCosLatDifferential
frame_specific_representation_info = {
coord.SphericalRepresentation: [
coord.RepresentationMapping("lon", "phi1"),
coord.RepresentationMapping("lat", "phi2"),
coord.RepresentationMapping("distance", "distance"),
],
coord.SphericalCosLatDifferential: [
coord.RepresentationMapping("d_lon_coslat", "pm_phi1_cosphi2"),
coord.RepresentationMapping("d_lat", "pm_phi2"),
coord.RepresentationMapping("d_distance", "radial_velocity"),
],
coord.SphericalDifferential: [
coord.RepresentationMapping("d_lon", "pm_phi1"),
coord.RepresentationMapping("d_lat", "pm_phi2"),
coord.RepresentationMapping("d_distance", "radial_velocity"),
],
}
frame_specific_representation_info[
coord.UnitSphericalRepresentation
] = frame_specific_representation_info[coord.SphericalRepresentation]
frame_specific_representation_info[
coord.UnitSphericalCosLatDifferential
] = frame_specific_representation_info[coord.SphericalCosLatDifferential]
frame_specific_representation_info[
coord.UnitSphericalDifferential
] = frame_specific_representation_info[coord.SphericalDifferential]
# /class
##############################################################################
def register_stream_frame(R_icrs_to_cust, logger=_LOGFILE, verbose=None):
"""Register StreamCoords into the Astropy frame_transform_graph.
Parameters
----------
R_icrs_to_cust: rotation matrix
rotates from Cartesian ICRS coords to Cartesian Custom Coords
"""
logger.report("registered StreamCoordFrame", verbose=verbose)
@frame_transform_graph.transform(
coord.StaticMatrixTransform, coord.ICRS, StreamFrame
)
def icrs_to_cust():
"""Register matrix transformation.
Does ICRS spherical to ICRS-rotated custom coordinates
"""
return R_icrs_to_cust
# /def
@frame_transform_graph.transform(
coord.StaticMatrixTransform, StreamFrame, coord.ICRS
)
def cust_to_icrs():
"""Register matrix transformation.
Does ICRS-rotated custom to ICRS spherical coordinates
"""
return matrix_transpose(R_icrs_to_cust)
# /def
return
# /def
# -----------------------------------------------------------------------------
def reference_to_skyoffset_matrix(skyoffset_frame):
"""Make rotation matrix from reference frame and a skyoffset frame.
this function is useful after using astropy to make a rotation matrix
*modified from reference_to_skyoffset function in astropy
Parameters
----------
skyoffset_frame : SkyCoord frame
the rotated frame
note: SkyCoords are compatible
Returns
-------
R : numpy array
a 3d rotation matrix
"""
origin = skyoffset_frame.origin.spherical
mat1 = rotation_matrix(-skyoffset_frame.rotation, "x")
mat2 = rotation_matrix(-origin.lat, "y")
mat3 = rotation_matrix(origin.lon, "z")
R = matrix_product(mat1, mat2, mat3)
return R
# /def
# -----------------------------------------------------------------------------
def make_stream_frame_transform(
orbit,
point,
method="path",
dt=2 * u.Myr,
is_nbody=False,
max_separation=0.5 * u.kpc,
logger=_LOGFILE,
verbose=None,
):
"""Make_stream_frame_transform.
Parameters
----------
orbit: the orbits
columns are [x, y, z]
point: SkyCoord
starting point
ex: orbit.icrs.[0]
method : str
options:
'path' : using the path of the orbit (TODO average path of stream)
dt : time
only used if *method* = 'path'
"""
logger.report("making make_stream_frame_transform", verbose=verbose)
if method == "path":
if is_nbody: # need to redifine point as average
idx = orbit.separation_3d(point) < max_separation
point = average(orbit[idx])
# offset from point
motion_coords = point.apply_space_motion(dt=dt)
# snap to actual orbit
if is_nbody: # an average of nearby things
idx = orbit.separation_3d(motion_coords) < max_separation
offset_coords = average(orbit[idx])
else:
idx, sep2d, dist3d = motion_coords.match_to_catalog_sky(orbit)
offset_coords = orbit[idx]
# get angle of rotation of rotated frame
rotation = point.position_angle(offset_coords) - 90 * u.deg
# the custom frame
# made by astropy and then used to extract the correct rotation matrix
custframe = point.skyoffset_frame(rotation=rotation)
# icrs to custom rotation matrix
R_icrs_to_cust = reference_to_skyoffset_matrix(custframe)
else:
raise ValueError()
logger.report(R_icrs_to_cust)
# NGP angles
theta_ngp, _ = (
radec_to_custom(180.0, 90.0, T=R_icrs_to_cust, degree=True) * u.deg
)
ra_ngp, dec_ngp = (
custom_to_radec(180.0, 90.0, T=R_icrs_to_cust, degree=True) * u.deg
)
coordtransforms = ObjDict(
"coordinate transforms",
# Rcust_gal=Rcust_gal,
R_cust_eq=R_icrs_to_cust,
R_icrs_to_cust=R_icrs_to_cust,
# Lvec=Lvec,
# nrpnt=nrpnt,
ra_ngp=ra_ngp,
dec_ngp=dec_ngp,
theta_ngp=theta_ngp,
point=point,
)
return coordtransforms
# /def
# -----------------------------------------------------------------------------
def make_stream_frame(
orbit,
point,
method="path",
dt=2 * u.Myr,
is_nbody=False,
max_separation=0.5 * u.kpc,
return_transforms=False,
logger=_LOGFILE,
verbose=None,
):
"""Makes and registers StreamCoord reference frame.
Parameters
----------
orbit :
the orbit to use to rotate
point :
point in cartesian
starting point = orbit['gal'].cartesian.xyz.T[0]
method : str
options:
'angular momentum' : the angular momentum (TODO re-add support)
'path' : using the path of the orbit
is_nbody : bool, optional
default: False
the point is the sp
snap to nearest point and use averaging
if it's not an orbit,
Returns
-------
StreamFrame : astropy Coordinate Frame
(if return_transforms is True)
coordtransforms : ObjDict
Exceptions
----------
ValueError : if method is not in allowed options
"""
if method not in ("path", "angular momentum"):
raise ValueError("method not supported. See documentation.")
coordtransforms = make_stream_frame_transform(
orbit,
point,
method=method,
dt=dt,
is_nbody=is_nbody,
max_separation=max_separation,
logger=logger,
verbose=verbose,
)
# register frame into astropy's frame_transform_graph
register_stream_frame(
coordtransforms.R_icrs_to_cust, logger=logger, verbose=verbose
)
if return_transforms:
return StreamFrame, coordtransforms
return StreamFrame
# /def
##############################################################################
# END
| 9,412 | 25.515493 | 79 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/util/coordinates/frameattrs.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# TITLE : frameattrs
# AUTHOR : Nathaniel Starkman
# PROJECT : AST1501
#
# -----------------------------------------------------------------------------
# Docstring and Metadata
"""Frame Attributes
"""
__author__ = "Nathaniel Starkman"
#############################################################################
# IMPORTS
# General
import warnings
import numpy as np
# astropy
from astropy import units as u
from astropy import coordinates as coord
from astropy.coordinates.baseframe import FrameMeta, _get_repr_cls
from astropy.coordinates.representation import (
MetaBaseRepresentation,
MetaBaseDifferential,
)
#############################################################################
# CODE
##############################################################################
def frameattrs(
frame, representation=None, differential=None, notrecog2custom=False
):
"""frameattrs.
frame: str or SkyCoord
representation: str or astropy representation class
differential: str or astropy differential representation class
TODO use sc.get_representation_component_names() instead of a dictionary
TODO not always kpc & deg!!!
"""
reps = {
coord.CartesianRepresentation: "cartesian",
coord.SphericalRepresentation: "spherical",
coord.CylindricalRepresentation: "cylindrical",
coord.RadialRepresentation: "radial",
}
diffs = {
coord.SphericalCosLatDifferential: "sphericalcoslat",
coord.SphericalDifferential: "spherical",
coord.CartesianDifferential: "cartesian",
coord.CylindricalDifferential: "cylindrical",
coord.RadialDifferential: "radial",
}
recogframe = ("icrs", "galactic", "galactocentric") # known frame
# +--------------- Frame ---------------+
if isinstance(frame, str):
framename = frame
if framename not in (*recogframe, "custom"):
raise ValueError(f"{framename} is not a recognized frame")
# elif isinstance(frame, FrameMeta): # TODO how detect custom frame?
else:
framename = frame.name
if framename not in recogframe:
if not notrecog2custom:
raise ValueError(f"{framename} is not a recognized frame")
framename = "custom"
# else: # TODO how to detect custom frames?
# raise ValueError(f'frame is not <str> or Frame')
# +--------------- Representation ---------------+
if representation is None:
# if not isinstance(frame, FrameMeta): # TODO how detect custom frame?
# raise ValueError('representation cannot be None if frame is str')
if frame.representation_type not in reps.keys():
raise ValueError(
(
f"{frame.representation_type} not representation of"
f"{reps.values()}"
)
)
representation = reps[frame.representation_type]
elif isinstance(representation, str):
if representation not in reps.values():
raise ValueError(f"representation not in {reps.values()}")
pass # already a str
elif isinstance(representation, MetaBaseRepresentation):
if representation not in reps.keys():
raise ValueError(
f"{representation} not representation of {reps.values()}"
)
representation = reps[representation]
# +--------------- Differential ---------------+
if differential is None:
# if not isinstance(frame, FrameMeta): # TODO how detect custom frame?
# raise ValueError('differential cannot be None if frame is str')
if frame.differential_type not in diffs.keys():
raise ValueError(
(
f"{frame.differential_type} not representation of"
f"{diffs.values()}"
)
)
differential = diffs[frame.differential_type]
if isinstance(differential, str):
if differential not in diffs.values():
raise ValueError(f"differential not in {diffs.values()}")
pass
elif isinstance(differential, MetaBaseDifferential):
if differential not in diffs.keys():
raise ValueError(
f"{differential} not representation of {diffs.values()}"
)
differential = diffs[differential]
# +--------------- Attributes & Units ---------------+
attrs = []
units = {}
# ICRS
if framename.lower() == "icrs":
# Representation
if representation == "cartesian":
attrs.extend(("x", "y", "z"))
units.update({"x": u.kpc, "y": u.kpc, "z": u.kpc})
elif representation == "spherical":
attrs.extend(("ra", "dec", "distance"))
units.update({"ra": u.deg, "dec": u.deg, "distance": u.kpc})
elif representation == "cylindrical":
attrs.extend(("rho", "phi", "z"))
units.update({"rho": u.kpc, "phi": u.deg, "z": u.kpc})
elif representation == "radial":
raise Exception
# Differential
if differential == "cartesian":
attrs.extend(("v_x", "v_y", "v_z"))
units.update(
{"v_x": u.km / u.s, "v_y": u.km / u.s, "v_z": u.km / u.s}
)
elif differential == "spherical":
attrs.extend(("pm_ra", "pm_dec", "radial_velocity"))
units.update(
{
"pm_ra": u.mas / u.yr,
"pm_dec": u.mas / u.yr,
"radial_velocity": u.km / u.s,
}
)
elif differential == "sphericalcoslat":
attrs.extend(("pm_ra_cosdec", "pm_dec", "radial_velocity"))
units.update(
{
"pm_ra_cosdec": u.mas / u.yr,
"pm_dec": u.mas / u.yr,
"radial_velocity": u.km / u.s,
}
)
elif differential == "cylindrical":
attrs.extend(("d_rho", "d_phi", "d_z"))
units.update(
{"d_rho": u.km / u.s, "d_phi": u.mas / u.yr, "d_z": u.km / u.s}
)
elif differential == "radial":
raise Exception
# Galactic
elif framename.lower() == "galactic":
# Representation
if representation == "cartesian":
attrs.extend(("u", "v", "w"))
units.update({"u": u.kpc, "v": u.kpc, "w": u.kpc})
elif representation == "spherical":
attrs.extend(("l", "b", "distance"))
units.update({"l": u.deg, "b": u.deg, "distance": u.kpc})
elif representation == "cylindrical":
attrs.extend(("rho", "phi", "z"))
units.update({"rho": u.kpc, "phi": u.deg, "z": u.kpc})
elif representation == "radial":
raise Exception
# Differential
if differential == "cartesian":
attrs.extend(("U", "V", "W"))
units.update({"U": u.km / u.s, "V": u.km / u.s, "W": u.km / u.s})
elif differential == "spherical":
attrs.extend(("pm_l", "pm_b", "radial_velocity"))
units.update(
{
"pm_l": u.mas / u.yr,
"pm_b": u.mas / u.yr,
"radial_velocity": u.km / u.s,
}
)
elif differential == "sphericalcoslat":
attrs.extend(("pm_l_cosb", "pm_b", "radial_velocity"))
units.update(
{
"pm_l_cosb": u.mas / u.yr,
"pm_b": u.mas / u.yr,
"radial_velocity": u.km / u.s,
}
)
elif differential == "cylindrical":
attrs.extend(("d_rho", "d_phi", "d_z"))
units.update(
{"d_rho": u.km / u.s, "d_phi": u.mas / u.yr, "d_z": u.km / u.s}
)
elif differential == "radial":
raise Exception
# Galactocentric
elif framename.lower() == "galactocentric":
# Representation
if representation == "cartesian":
attrs.extend(("x", "y", "z"))
units.update({"x": u.kpc, "y": u.kpc, "z": u.kpc})
elif representation == "spherical":
attrs.extend(("lon", "lat", "distance"))
units.update({"lon": u.deg, "lat": u.deg, "distance": u.kpc})
elif representation == "cylindrical":
attrs.extend(("rho", "phi", "z"))
units.update({"rho": u.kpc, "phi": u.deg, "z": u.kpc})
elif representation == "radial":
raise Exception
# Differential
if differential == "cartesian":
attrs.extend(("v_x", "v_y", "v_z"))
units.update(
{"v_x": u.km / u.s, "v_y": u.km / u.s, "v_z": u.km / u.s}
)
elif differential == "spherical":
attrs.extend(("pm_lon", "pm_lat", "radial_velocity"))
units.update(
{
"pm_lon": u.mas / u.yr,
"pm_lat": u.mas / u.yr,
"radial_velocity": u.km / u.s,
}
)
elif differential == "sphericalcoslat":
attrs.extend(("pm_lon_coslat", "pm_lat", "radial_velocity"))
units.update(
{
"pm_lon_coslat": u.mas / u.yr,
"pm_lat": u.mas / u.yr,
"radial_velocity": u.km / u.s,
}
)
elif differential == "cylindrical":
attrs.extend(("d_rho", "d_phi", "d_z"))
units.update(
{"d_rho": u.km / u.s, "d_phi": u.mas / u.yr, "d_z": u.km / u.s}
)
elif differential == "radial":
raise Exception
# Custom
elif framename.lower() == "custom":
# Representation
if representation == "cartesian":
raise Exception
elif representation == "spherical":
attrs.extend(("phi1", "phi2", "distance"))
units.update({"phi1": u.deg, "phi2": u.deg, "distance": u.kpc})
elif representation == "cylindrical":
raise Exception
elif representation == "radial":
raise Exception
# Differential
if differential == "cartesian":
raise Exception
elif differential == "spherical":
attrs.extend(("pm_phi1", "pm_phi2", "radial_velocity"))
units.update(
{
"pm_phi1": u.mas / u.yr,
"pm_phi2": u.mas / u.yr,
"radial_velocity": u.km / u.s,
}
)
elif differential == "sphericalcoslat":
attrs.extend(("pm_phi1_cosphi2", "pm_phi2", "radial_velocity"))
units.update(
{
"pm_phi1_cosphi2": u.mas / u.yr,
"pm_phi2": u.mas / u.yr,
"radial_velocity": u.km / u.s,
}
)
elif differential == "cylindrical":
raise Exception
elif differential == "radial":
raise Exception
else:
raise ValueError(
f"{frame} not in (ICRS, Galactic, Galactocentric, Custom)"
)
return attrs, units
# /def
# ----------------------------------------------------------------------------
def transform_sc(
sc,
frame=None,
representation=None,
representation_type=None,
differential=None,
differential_type=None,
wrap_angle=None,
**wrap_angles,
):
"""Transform a SkyCoord to the given frame, representation/differential_type.
Arguments
---------
sc: SkyCoord
frame: str, None, astropy frame (default: None)
the desired frame.
Can accept any astropy-compatible frame on the transform graph
None: do not transform
str: must be registered on the transform graph
standard:
icrs, galactic, galactocentric
representation: None, str or astropy representation class
None: get from
available str:
spherical, cartesian, cylindrical, radial
differential: str or astropy differential representation class
available str:
sphericalcoslat, spherical, cartesian, cylindrical, radial
wrap_angle: list
(name, wrap_angle) or [(name, wrap_angle), (name, wrap_angle), ...]
superseded by kwargs
__deprecated__
Other Parameters
----------------
angle and wrapping
"""
recogframe = ("icrs", "galactic", "galactocentric") # known frame
# +--------------- Frame ---------------+
_transform = True # whether to transform
# default frame
if frame is None:
frame = sc.frame
_transform = False # already right frame
# now checking valid frame
if isinstance(frame, str):
if frame == sc.frame.name:
_transform = False
pass
elif frame not in recogframe:
raise ValueError(f"frame is not recognized")
elif isinstance(frame, FrameMeta):
pass
else: # TODO how to detect custom frames?
# raise TypeError(f'frame is not <str> or Frame')
pass
# +--------------- Representation ---------------+
if representation_type is not None:
representation = representation_type
if representation is None:
if isinstance(frame, str):
warnings.warn("no representation_type specified, keeping current")
representation_type = None
else:
try:
_get_repr_cls(frame.representation_type)
except ValueError: # frame is a baseframe, keep original
representation_type = sc.representation_type
else:
representation_type = frame.representation_type
elif isinstance(representation, str):
if representation == "spherical":
representation_type = coord.SphericalRepresentation
elif representation == "cartesian":
representation_type = coord.CartesianRepresentation
elif representation == "cylindrical":
representation_type = coord.CylindricalRepresentation
elif representation == "radial":
representation_type = coord.RadialRepresentation
else:
raise ValueError
elif isinstance(representation, MetaBaseRepresentation):
representation_type = representation
else:
raise TypeError
# +--------------- Differential ---------------+
if differential_type is not None:
differential = differential_type
if differential is None: # no differential
if isinstance(frame, str): # keep original differential
warnings.warn("no differential_type specified, keeping current")
differential_type = None
else: # transform to frame differential
try:
_get_repr_cls(frame.differential_type)
except ValueError: # frame is a baseframe, keep original
differential_type = sc.differential_type
else:
differential_type = frame.differential_type
elif isinstance(differential, str): # override frame differential
if differential == "sphericalcoslat":
differential_type = coord.SphericalCosLatDifferential
elif differential == "spherical":
differential_type = coord.SphericalDifferential
elif differential == "cartesian":
differential_type = coord.CartesianDifferential
elif differential == "cylindrical":
differential_type = coord.CylindricalDifferential
elif differential == "radial":
differential_type = coord.RadialDifferential
else:
raise ValueError
elif isinstance(differential, MetaBaseDifferential):
differential_type = differential
else:
raise TypeError
# +--------------- Converting ---------------+
if _transform:
sc = sc.transform_to(frame)
if representation_type is not None:
sc.representation_type = representation_type
if differential_type is not None:
sc.differential_type = differential_type
# TODO deprecate
if wrap_angle is not None:
if np.isscalar(wrap_angle[0]): # just (name, wrapangle)
setattr(getattr(sc, wrap_angle[0]), "wrap_angle", wrap_angle[1])
else: # list [(name, wrapangle), (name, wrapangle), ...]
for name, angle in wrap_angle:
setattr(getattr(sc, name), "wrap_angle", angle)
for name, angle in wrap_angles.items():
setattr(getattr(sc, name), "wrap_angle", angle)
return sc
# /def
# ----------------------------------------------------------------------------
def convert_frame_and_repr(
sc, frame=None, representation=None, differential=None
):
"""convert_frame_and_repr."""
return transform_sc(
sc,
frame=frame,
representation=representation,
differential=differential,
)
convert_frame_and_repr.__doc__ = transform_sc.__doc__
# /def
# ----------------------------------------------------------------------------
def convert_repr(sc, representation=None, differential=None):
"""convert_repr."""
return transform_sc(
sc,
frame=None,
representation=representation,
differential=differential,
)
# /def
##############################################################################
# END
| 17,748 | 32.551985 | 81 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/util/coordinates/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# TITLE : coordinates initialization file
# AUTHOR : Nathaniel Starkman
# PROJECT : AST1501
#
# -----------------------------------------------------------------------------
# Docstring and Metadata
"""coordinates
"""
__author__ = "Nathaniel Starkman"
#############################################################################
# IMPORTS
# PROJECT-SPECIFIC
from .CustomCoords import make_stream_frame
#############################################################################
# END
| 625 | 22.185185 | 79 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/progenitors/load_progenitor.py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : load_progenitor
# AUTHOR : Nathaniel Starkman
# PROJECT : AST1501
#
# ----------------------------------------------------------------------------
# Docstring and Metadata
"""Load Progenitors."""
__author__ = "Nathaniel Starkman"
##############################################################################
# IMPORTS
# GENERAL
import os
import json
import numpy as np
import importlib
# Astropy
from astropy import units as u
from astropy.coordinates import SkyCoord
# CUSTOM
from astroPHD import LogFile, ObjDict
# PROJECT-SPECIFIC
from ..util import astrarray
##############################################################################
# PARAMETERS
_LOGFILE = LogFile(header=False) # LogPrint, which is compatible with LogFile
##############################################################################
# Seeing Progenitors
def available_progenitors(_print=True):
"""Print list of available progenitors."""
lookupdir = get_lookupdir()
lookup = {}
for k, v in lookupdir.items():
lookup.setdefault(v, []).append(k)
if _print:
print(lookup)
return lookup
# /def
# ----------------------------------------------------------------------------
def get_lookupdir():
"""Return lookup directory."""
# lookup directory
basedir = os.path.dirname(os.path.realpath(__file__))
fpath = basedir + "/lookup.json"
with open(fpath, "r") as file:
lookup = json.load(file)
return lookup
# /def
# ----------------------------------------------------------------------------
def available_coords_in_progenitor(name):
"""Print list of available progenitors."""
# if no fpath is directly provided
lookup = get_lookupdir()
# getting actual file path from lookup
fpath = ".{}".format(lookup[name])
pkg = os.path.dirname(__file__)
# delete bad possible prefixes
pkg = pkg.replace("/", ".") # changing to importlib format
pkg = pkg.replace("...", "").replace("..", "") # stripping prefixes
# open file with information
file = importlib.import_module(fpath, pkg)
return file.progenitor.coords.keys()
# /def
##############################################################################
### Load Progenitors
def loadProgenitor(
name=None, pmnsigma=7, coord="main", logger=_LOGFILE, verbose=None
):
r"""Load progenitor from name.
Parameters
----------
name: str, None (default None)
name of progenitor
pmnsigma: float
for making proper motion ranges
coord: str (default = 'coord')
key of coord to use in coord
this is only used if 'coord' is not a SkyCoord itself
ex: for coord='name'
'coord': {'name': SkyCoord(
ra=229.018 * u.deg, dec=-0.124 * u.deg, distance=23.2 * u.kpc,
pm_ra_cosdec=-2.296 * u.mas/u.yr, pm_dec=-2.257 * u.mas/u.yr,
radial_velocity=-58.7 * u.km/u.s)}
"""
logger.report(f"Loaded Progenitor {name} Info:", verbose=verbose)
# +---------------------------------------------------+
logger.report(
"loaded progenitor file", verbose=verbose, start_at=2, start="\t"
)
# progenitor name file path directory
lookup = get_lookupdir()
# getting actual file path from lookup
pkg = f"{os.path.dirname(__file__)}/{lookup[name]}.py"
# open file with information
spec = importlib.util.spec_from_file_location(lookup[name], pkg)
file = importlib.util.module_from_spec(spec)
spec.loader.exec_module(file)
prog = file.progenitor
# +---------------------------------------------------+
logger.report(
"loaded coordinates", verbose=verbose, start_at=2, start="\t"
)
# coord
if "coord" in prog:
pass
else: # it's in coords
crd = prog["coords"]
if isinstance(crd, SkyCoord):
prog.coord = crd
else:
prog.coord = crd[coord] # kw.get('coord', 'coord')
# parallax
prog.prlx = prog.coord.distance.to(u.mas, equivalencies=u.parallax())
# +---------------------------------------------------+
# Proper Motion
logger.report(
"made proper motions", verbose=verbose, start_at=2, start="\t"
)
# RA
errange = np.array([-1, 1]) * prog.info["pmra_err"] * pmnsigma
prog.pm_ra_cosdec_range = prog.info["pmra"] + errange
# Dec
errange = np.array([-1, 1]) * prog.info["pmdec_err"] * pmnsigma
prog.pm_dec_range = prog.info["pmdec"] + errange
return prog
# /def
##############################################################################
# Specific Progenitors
def loadPal5(pmnsigma=7, coord="main", logger=_LOGFILE, verbose=None):
"""Load PAL 5.
Cluster center at: `15 16 05.30 -00 06 41.0` (https://arxiv.org/abs/astro-ph/0511128)
Approximate angular size (data flag of D in A-E): `13.18 11.48 arcmin` (SIMBAD)
"""
return loadProgenitor(
"Palomar5",
pmnsigma=pmnsigma,
coord=coord,
logger=logger,
verbose=verbose,
)
# /def
##############################################################################
# END
| 5,287 | 23.943396 | 89 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/progenitors/__init__.py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : progenitors initialization
# AUTHOR : Nathaniel Starkman
# PROJECT : AST1501
#
# ----------------------------------------------------------------------------
# Docstring and Metadata
"""progenitors: initialization."""
__author__ = "Nathaniel Starkman"
#############################################################################
# IMPORTS
# PROJECT-SPECIFIC
from .load_progenitor import (
loadProgenitor,
available_progenitors,
available_coords_in_progenitor,
)
from .load_progenitor import loadPal5
#############################################################################
# END
| 724 | 21.65625 | 78 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/progenitors/Palomar5.py | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# TITLE : Palomar5
# AUTHOR : Nathaniel Starkman
# PROJECT : AST1501
#
# -----------------------------------------------------------------------------
# Docstring and Metadata
"""Palomar 5 Information."""
__author__ = "Nathaniel Starkman"
##############################################################################
# IMPORTS
# Astropy
from astropy import units as u
from astropy.table import QTable
from astropy.coordinates import SkyCoord
# PROJECT-SPECIFIC
from astroPHD import ObjDict
##############################################################################
# PARAMETERS
_mas_yr = u.mas / u.yr
##############################################################################
# CODE
##############################################################################
progenitor = ObjDict(
"Palomar 5",
coords={
"main": SkyCoord(
ra=229.018 * u.deg,
dec=-0.124 * u.deg,
distance=23.2 * u.kpc,
pm_ra_cosdec=-2.296 * _mas_yr,
pm_dec=-2.257 * _mas_yr,
radial_velocity=-58.7 * u.km / u.s,
),
"vasiliev": SkyCoord(
ra=229.022 * u.deg,
dec=-0.223 * u.deg,
distance=23.2 * u.kpc,
pm_ra_cosdec=-2.736 * _mas_yr,
pm_dec=-2.646 * _mas_yr,
radial_velocity=-58.6 * u.km / u.s,
),
},
info=QTable(
{
"angular size ra": [13.18] * u.arcmin,
"angular size dec": [11.48] * u.arcmin,
"tidal radius": [0.145] * u.kpc,
"tidal radius error": [0.009] * u.kpc,
"tidal radius angular": [21.2] * u.arcmin,
"tidal radius angular error": [None],
"half-mass radius": [2.73] * u.arcmin,
"pmra": [-2.296] * _mas_yr,
"pmra_err": [0.3] * _mas_yr,
"pmdec": [-2.257] * _mas_yr,
"pmdec_err": [0.3] * _mas_yr,
}
),
references=[
"main coord from Bovy prog Orbit",
"angular size from SIMBAD",
"tidal radius from Ibata 2017",
"half-mass radius from http://physwww.mcmaster.ca/~harris/mwgc.dat",
"proper motion from bovy orbit",
],
)
progenitor["t+"] = {"end": 0.1 * u.Gyr, "num": 10000}
progenitor["t-"] = {"end": -0.1 * u.Gyr, "num": 10000}
| 2,418 | 28.144578 | 79 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/orbit/exceptions.py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : exceptions
# AUTHOR : Nathaniel Starkman
# PROJECT : AST1501
#
# ----------------------------------------------------------------------------
# Docstring and Metadata
"""Exceptions."""
__author__ = "Nathaniel Starkman"
#############################################################################
# IMPORTS
# GENERAL
import warnings
from warnings import warn
#############################################################################
# CODE
#############################################################################
class integrationWarning(UserWarning):
"""integrationWarning"""
pass
warnings.simplefilter("always", integrationWarning)
#############################################################################
# END
| 867 | 20.7 | 78 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/orbit/_indexdict.py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : indexdict
# AUTHOR : Nathaniel Starkman
# PROJECT : AST1501
#
# ----------------------------------------------------------------------------
# Docstring and Metadata
"""IndexDict."""
__author__ = "Nathaniel Starkman"
#############################################################################
# IMPORTS
# GENERAL
import numpy as np
from collections import OrderedDict
###############################################################################
# CODE
###############################################################################
class IndexDict(OrderedDict):
"""An integer-keyed dictionary.
The order of the keys and the keys each encode information
for example:
the keys encode the order added to IndexDict
the key order encodes the time-order of the values
Methods
-------
.__getitem__
.__setitem__
.set
.__getstate__
.__setstate__
.copy
.move_to_start
.add_to_start
.keyslist
.valueslist
.itemslist
.maxkey
.minkey
.indmax
.indmin
.prepend
.append
"""
# +------------------------------+
# Get / Set:
def __getitem__(self, keys):
"""__getitem__.
Parameters
----------
keys
**No slices (except slice(None))
int: item from key (integer key)
Ellipsis, None, slice(None): self
tuple: multidimensional slice
equiv to self[key0][key1, key2, ...]
other array:
return indexdict with these keys
equive to IndexDict([self[k] for k in keys])
can combine:
ex: [(key00, key01, ..), key1, key2, ...]
equiv to IndexDict([self[k][key1, key2, ...] for k in keys[0]])
Returns
-------
value for key, IndexDict for keys
"""
# single key
if np.isscalar(keys): # get key by order number
return super().__getitem__(keys)
elif keys in (Ellipsis, None, slice(None)):
return self
# multidimensional
elif isinstance(keys, tuple): # multidim
if np.isscalar(keys[0]): # single IndexDict key
# self[key0][key1, key2, ...]
return super().__getitem__(keys[0])[keys[1:]]
elif keys[0] in (Ellipsis, None, slice(None)):
keys0 = self.keys()
else: # many IndexDict keys
# [(key00, key01, ..), key1, key2, ...]
keys0 = keys[0]
if len(keys) == 2: # necessary b/c most [] can't use tuples
return IndexDict([(k, self[k][keys[1]]) for k in keys0])
else:
return IndexDict([(k, self[k][keys[1:]]) for k in keys0])
# not multidimensional
else: # return indexdict
# [self[k] for k in keys]
return IndexDict([(k, self[k]) for k in keys])
# /def
def __setitem__(self, key, value):
"""__setitem__.
Parameters
----------
key: int
only accepts integer keys
value: anything
"""
if not isinstance(key, int):
raise ValueError("key is not int")
super().__setitem__(key, value)
# self._ind = key # update current index
# /def
def set(self, key, value):
"""set.
Parameters
----------
key: int
only accepts integer keys
value: anything
"""
self.__setitem__(key, value)
# /def
# +------------------------------+
# Move / Add @ positions
def move_to_start(self, key):
"""Move to start of OrderedDict.
Parameters
----------
key: int
only accepts integer keys
"""
super().move_to_end(key, last=False)
# /def
def add_to_start(self, key, value):
"""Add item @ start of OrderedDict.
Parameters
----------
key: int
only accepts integer keys
value: anything
"""
self[key] = value # adding
self.move_to_start(key) # moving
# /def
# move_to_end (already implemented)
# add_to_end (already implemented)
# ----------- Serialize -----------
def __getstate__(self):
"""Get state.
same as self.items().
"""
return self.items()
# /def
def __setstate__(self, state):
"""Set state from .items()."""
for i, v in state:
self.set(i, v)
# /def
def copy(self):
"""Return copy of self.
uses self.__class__
"""
instance = super().__new__(self.__class__)
instance.__init__(self.items())
return instance
# /def
# ----------- List Methods -----------
def keyslist(self, ind=None):
"""List of keys.
(instead of odict_view)
"""
if ind in (None, Ellipsis):
ind = slice(None)
return list(self.keys())[ind]
# /def
def valueslist(self, ind=None):
"""Array of values.
(instead of odict_view)
"""
if ind in (None, Ellipsis):
ind = slice(None)
return list(self.values())[ind]
# /def
def itemslist(self, ind=None):
"""Array of items().
(instead of odict_view)
"""
if ind in (None, Ellipsis):
ind = slice(None)
return list(self.items())[ind]
# /def
# +---------- indmax/min ----------+
@property
def curkey(self):
"""Maximum key.
generally most recent key
"""
return max(self.keyslist())
# /def
@property
def maxkey(self):
"""Maximum key.
generally most recent key
"""
return max(self.keyslist())
# /def
@property
def minkey(self):
"""Minimum key.
generally 0
"""
return min(self.keyslist())
# /def
@property
def indmax(self):
"""Last key/index in list."""
return self.keyslist(-1)
# /def
@property
def indmin(self):
"""First key/index in list."""
return self._bounds.keyslist(0)
# /def
# +---------- pre/append ----------+
def _aprependkey(self, _zeroind):
"""Append/prepend get next key helper."""
try: # add after maxkey
key = self.maxkey + 1
except IndexError: # need to add starting value
key = _zeroind
return key
# /def
def prepend(self, value, _zeroind=0):
"""Add value to start of IndexDict."""
key = self._aprependkey(_zeroind)
self.add_to_start(key, value)
# /def
def append(self, value, _zeroind=0):
"""Add value to end of IndexDict."""
key = self._aprependkey(_zeroind)
self[key] = value
# /def
def __eq__(self, other):
"""Test for equality based on keys being the same."""
try: # single item
return super().__eq__(other)
except ValueError: # many keys, some are arrays
tests = []
for k1, k2 in zip(self.values(), other.values()):
try:
tests.append(all(k1 == k2))
except ValueError: # not array
tests.append(k1 == k2)
return tests
# /def
##############################################################################
# END
| 7,639 | 21.339181 | 79 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/orbit/OrbitKeeper.py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : OrbitKeeper
# AUTHOR : Nathaniel Starkman
# PROJECT : AST1501
#
# ----------------------------------------------------------------------------
# Docstring and Metadata
"""OrbitKeeper."""
__author__ = "Nathaniel Starkman"
#############################################################################
# IMPORTS
# GENERAL
import numpy as np
# PROJECT-SPECIFIC
from .exceptions import warn, integrationWarning
from ._indexdict import IndexDict
#############################################################################
# CODE
#############################################################################
class OrbitKeeper(object):
"""OrbitKeeper.
store orbit and time information for SequentialOrbits
indices are integers with corresponding orbit in _pworbits
The location is the time organization
"""
# def __new__(cls, *args):
# """OrbitKeeper __new__ method
# used for copying OrbitKeeper
# """
# print(args)
# if len(args) == 1: # orbit first argument
# print('inargs')
# orbit = args[0] # orbitkeeper
#
# # for copying
# if isinstance(orbit, OrbitKeeper):
# # instance = cls.__new__(cls)
# # instance.__init__(orbit._orbits[0], orbit.t0)
# return orbit[:] # using copying
# else:
# raise Exception('must be OrbitKeeper')
# else:
# print('outargs')
# self = super().__new__(cls)
# return self
# # /def
def __init__(self, orbit, t0):
"""Instantiate OrbitKeeper.
sets:
_ind: current orbit index
_direction: 'forward'
t0: start time for starting orbit
tref: start time of current orbit
_times: IndexDict of orbit evaluation times
_bounds: IndexDict of orbit time bounds
_orbits: IndexDict of orbit instances
"""
self._ind = 0 # current orbit index
self._direction = "forward" # direction
# The Time
self.t0 = t0 # start time of 1st orbit
self.tref = t0 # start time of current orbit
self._times = IndexDict([(0, None)]) # set(0, None)
self._bounds = IndexDict([(0, None)]) # set(0, None)
# The Orbits
if isinstance(orbit, IndexDict):
self._orbits = orbit
else:
self._orbits = IndexDict([(0, orbit)]) # set(0, orbit)
# self._orbits.direction = self.direction
return
# /def
# @classmethod
# def loadOrbitKeeper(orbits): # TODO
# self._ind = orbits._ind
# self.direction
# self.t0 = orbits[orbits.minkey].time()[0]
# self._times =
def __getitem__(self, ind):
"""Get a slice of OrbitKeeper.
for slice objects, can only accept slice(None)
TODO: view vs copy?
"""
if isinstance(ind, slice) and not ind == slice(None):
raise ValueError
ok = OrbitKeeper(None, self.t0)
ok._ind = self._ind
ok._direction = self._direction
ok.tref = self.tref
ok._times = self._times[ind]
ok._orbits = self._orbits[ind]
# ensure never subindex _bounds
if np.isscalar(ind) or ind in (Ellipsis, None, slice(None)):
ok._bounds = self._bounds[ind]
elif not isinstance(
ind, tuple
): # not multidimensional # TODO improve
ok._bounds = self._bounds[ind]
else:
ok._bounds = self._bounds[ind[0]]
return ok
def __eq__(self, other):
"""Test for complete equality."""
tests = [
(getattr(self, n) == getattr(other, n))
for n in ("_ind", "_direction", "t0", "tref")
]
tests.append(self._bounds == other._bounds)
tests.append(self._orbits == other._orbits)
tests.append(all(self._times == other._times))
if all(tests):
return True
return False
def copy(self):
"""Copy method."""
ok = OrbitKeeper(None, self.t0)
ok._ind = self._ind
ok._direction = self._direction
ok.tref = self.tref
ok._times = self._times
ok._orbits = self._orbits
ok._bounds = self._bounds
return ok
# +---------- time ----------+
@property
def time(self):
"""Current time."""
return self._times[self._ind]
# /def
@time.setter
def time(self, value):
"""Set current time."""
self._times[self._ind] = value
# /def
@property
def alltimes(self):
"""All times."""
return self._times
# /def
# +---------- bounds ----------+
@property
def bounds(self):
"""Current bounds."""
return self._bounds[self._ind]
# /def
@bounds.setter
def bounds(self, value):
"""Current bounds."""
self._bounds[self._ind] = value
# /def
@property
def allbounds(self):
"""Current bounds."""
return self._bounds
# /def
# +---------- tmax/min ----------+
@property
def tmax(self):
"""Maximum time."""
# last of time-ordered bounds, take forward bound
return self._bounds.valueslist(-1)[-1]
# /def
@property
def tmin(self):
"""Minimum time."""
# last of time-ordered bounds, take forward bound
return self._bounds.valueslist(0)[0]
# /def
# +---------- indmax/min ----------+
@property
def _indmax(self):
"""key/index at end."""
# last of time-ordered bounds, take forward bound
return self._bounds.keyslist(-1)
# /def
@property
def _indmin(self):
"""key/index at start."""
# last of time-ordered bounds, take forward bound
return self._bounds.keyslist(0)
# /def
# +---------- orbit ----------+
@property
def orbit(self):
"""Current orbit."""
return self._orbits[self._ind]
# /def
@property
def allorbits(self):
"""All orbit segments."""
return self._orbits
# /def
# +---------- direction ----------+
@property
def direction(self):
"""direction."""
return self._direction
# /def
@direction.setter
def direction(self, value):
self._direction = value
# self._orbits.direction = value
# /def
# +---------- keys / keyslist ----------+
def keys(self):
"""keys."""
return self._times.keys()
# /def
def keyslist(self):
"""keyslist."""
return self._times.keyslist()
# /def
# +---------- remove times, bounds, orbits ----------+
def remove(self, key):
"""remove."""
print(f"removing orbit #{key}")
del self._times[key]
del self._bounds[key]
del self._orbits[key]
# /def
# +---------- pre/append ----------+
def prepend(self, tstart, orbit):
"""prepend."""
if not (
self._ind
== self._times.maxkey
== self._bounds.maxkey
== self._orbits.maxkey
):
raise IndexError("some index went wrong")
self._ind += 1 # increment orbit index
# setting
self._times.prepend(None)
self._bounds.prepend(None)
self._orbits.prepend(orbit)
# cleanup
self.tref = tstart # ref time -> current time
# /def
def append(self, tstart, orbit):
"""Append."""
if not (
self._ind
== self._times.maxkey
== self._bounds.maxkey
== self._orbits.maxkey
):
raise IndexError("some index went wrong")
self._ind += 1 # increment orbit index
# setting
self._times.append(None)
self._bounds.append(None)
self._orbits.append(orbit)
# cleanup
self.tref = tstart # ref time -> current time
# /def
# +---------- time iteration ----------+
def iterator(self, time):
"""Make time iterator.
Parameters
----------
time: str, list, int
the times at which to evaluate the orbits
options:
- str: 'full'
- list: list of times
- integer: the orbit index
Returns
-------
t: list of lists
the times
"""
# time is integer index of an orbit
if isinstance(time, (int, np.integer)):
i = time # time is index
return ((i, self._times[i]),)
# the time
t = []
if isinstance(time, str):
if time not in ("full", "start", "end"):
ValueError("f{time} not 'full', 'start', 'end'")
# time shortcuts
elif time == "full":
tind = slice(None) # time array at evaluation
elif time == "start":
tind = 0
elif time == "ending":
tind = -1
# break up the time into the correct ranges for the orbit segments
bnds = self._bounds.itemslist()
for i, bnd in bnds:
if bnd is None: # bound is not integrated
warn("some segment is not integrated", integrationWarning)
continue # skip to next bounds
t.append((i, self._times[i][tind]))
return t
# break up the time into the correct ranges for the orbit segments
bnds = self._bounds.itemslist()
# bnds = [b for b in bnds if b is not None] # skip not integrated
# iterating over orbit segments
# i is not in numerical order
for i, bnd in bnds:
if bnd is None: # bound is not integrated
warn("some segment is not integrated", integrationWarning)
continue # skip to next bounds
ind = (bnd[0] <= time) & (time < bnd[1]) # w/in bounds
# single bool (b/c any(True) raises an error)
if isinstance(ind, (bool, np.bool_)):
if ind: # True
t.append((i, time))
# bool array
elif any(ind): # w/in bounds ?
t.append((i, time[ind]))
# figuring out if evaluating outside of any bounds
indbnd = np.array(
[(b[0] <= time) & (time < b[1]) for i, b in bnds]
).sum(
axis=0, dtype=bool
) # time in any bins
splitat = np.where(np.diff(indbnd) != 0)[0] + 1 # split at bin edges
hasvals = np.array(
list(map(any, np.split(indbnd, splitat)))
) # in /out of bin
indsplit = np.split(np.arange(len(time)), splitat) # bin edges indices
for ind, used in zip(indsplit, hasvals): # iterating through bins
if not used: # time outside orbit integrations
warn(
"Not including between {}:{}".format(
time[ind[0]], time[ind[-1]]
),
integrationWarning,
)
return t
# /def
| 11,370 | 25.692488 | 79 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/orbit/__init__.py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : orbit initialization
# AUTHOR : Nathaniel Starkman
# PROJECT : AST1501
#
# ----------------------------------------------------------------------------
# Docstring and Metadata
"""orbit initialization file."""
__author__ = "Nathaniel Starkman"
#############################################################################
# IMPORTS
# PROJECT-SPECIFIC
from ._sequentialorbits import SequentialOrbits
#############################################################################
# END
| 609 | 22.461538 | 78 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/src/orbit/_sequentialorbits.py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : Sequential Orbits
# AUTHOR : Nathaniel Starkman
# PROJECT : AST1501
#
# ----------------------------------------------------------------------------
# Docstring and Metadata
"""SequentialOrbits."""
__author__ = "Nathaniel Starkman"
#############################################################################
# IMPORTS
# GENERAL
import numpy as np
from typing import Optional, Union
# Galpy
from galpy.potential import MWPotential2014, Potential
from galpy.orbit import Orbit
# Astropy
from astropy.coordinates import concatenate, SkyCoord
from astropy import units as u
# CUSTOM
from astroPHD.community import starkplot as plt
from astroPHD.community.starkplot import mpl_decorator
# PROJECT-SPECIFIC
from .OrbitKeeper import OrbitKeeper
from .exceptions import warn, integrationWarning
from ..util import astrarray
from ..util.pickle import dump as _dump, load as _load
###############################################################################
# Code
###############################################################################
class SequentialOrbits(object):
"""SequentialOrbit.
A class which holds many segments of one orbit.
"""
def __new__(cls, *args, **kw):
"""Make new SequentialOrbits.
used for copying SequentialOrbits
"""
# assume first argument is a real argument
if len(args) > 0: # vxvv first argument
vxvv = args[0]
else: # vxvv kwarg
vxvv = kw.get("vxvv", None)
# # for making a blank instance # TODO obviate
# if vxvv is None: # needed for pickling, but calls __new__ twice
# return super().__new__(cls) # empty new instance
# for copying
if isinstance(vxvv, SequentialOrbits): # copies by __new__() again :(
instance = cls.__new__(cls)
instance.__init__(
vxvv=vxvv._data, # using internal OrbitKeeper
pot=vxvv._pot,
ro=vxvv._ro,
vo=vxvv._vo,
zo=vxvv._zo,
solarmotion=vxvv._solarmotion,
)
return instance
else:
self = super().__new__(cls) # make blank instnce to start __init__
return self
# /def
def __init__(
self,
vxvv=None,
lb: bool = False,
radec: bool = False,
ro: Optional[u.Quantity] = None,
vo: Optional[u.Quantity] = None,
zo: Optional[u.Quantity] = None,
solarmotion: Union[str, list] = "stream",
tstart: Optional[u.Quantity] = None,
pot: Optional[Potential] = None,
):
"""Instantiate SequentialOrbits.
Parameters
----------
vxvv: Orbits or any Orbits-compatible input
the initial conditions
lb:
radec:
ro: Quantity (default = 8 kpc)
distance from vantage point to GC (kpc)
vo: Quantity (default = 220 km/s)
circular velocity at ro (km/s)
zo: Quantity (default = 25 pc)
offset toward the NGP of the Sun wrt the plane (kpc)
solarmotion: str or array
str: 'stream', 'local', 'hogg' or 'dehnen', or 'schoenrich',
array: value in [-U,V,W]. can be Quantity
tstart: Quantity (default = 0 gyr)
start time of first orbit segment
pot: Galpy potential
potential
TODO
------
allow instantiation from IndexDict
"""
# +---------- Galactic Properties ----------+
self._ro = (
ro if ro is not None else 8.0 * u.kpc
) # TODO set by default?
self._vo = vo if vo is not None else 220.0 * u.km / u.s
self._zo = zo if zo is not None else 25 * u.pc
# solarmotion
if isinstance(solarmotion, str): # TODO support all galpy defaults
if solarmotion.lower() == "stream":
self._solarmotion = [-11.1, 24.0, 7.25] * u.km / u.s
elif solarmotion.lower() == "local":
self._solarmotion = [-11.1, 12.24, 7.25] * u.km / u.s
else:
self._solarmotion = solarmotion
# +---------- Orbit Properties ----------+
# starting time
t0: u.Quantity = 0 * u.Gyr if tstart is None else tstart
# making orbits
if isinstance(vxvv, OrbitKeeper): # internal use only
self._data = vxvv
# elif isinstance(vxvv, IndexDict): # TODO
# self._data = OrbitKeeper(None, vxvv)
# self._data._ind = vxvv._ind
# self._data.direction = vxvv.direction
# self._data.t0 = vxvv[vxvv.minkey].time()[0] # overriding t0
# self._data.tref = vxvv[vxvv.maxkey].time()[0] # overriding t0
# # self._data._times =
# # self._data._bounds =
elif isinstance(vxvv, Orbit):
self._data = OrbitKeeper(vxvv, t0)
else: # accepts: lists, SkyCoord
orbit = Orbit(
vxvv=vxvv,
ro=self._ro,
vo=self._vo,
zo=self._zo,
solarmotion=self._solarmotion,
radec=radec,
uvw=False,
lb=lb,
)
self._data = OrbitKeeper(orbit, t0)
# potential
# defaults to MWPotential2014
self._pot = pot if pot is not None else MWPotential2014
return
# /def
# +---------- time ----------+
@property
def times(self):
"""Return all times."""
return self._data._times
@property
def time(self):
"""Return current time."""
return self._data.time
@time.setter
def time(self, value):
"""Return current time."""
self._data.time = value
@property
def t(self):
"""__deprecated__ current time."""
return self._data.time
def set_time(self, time):
"""Set current time array."""
self.time = time
# +---------- bounds ----------+
# bounds are kept mostly hidden
@property
def _allbounds(self):
"""All bounds."""
return self._data.allbounds
@property
def _bounds(self):
"""Return current bounds."""
return self._data.bounds
@_bounds.setter
def _bounds(self, value):
"""Return current bounds."""
self._data.bounds = value
def _set_bounds(self, bounds):
"""Set current bounds."""
self._data.bounds = bounds
# +---- Orbits ----+
@property
def _orbits(self):
"""All orbits."""
return self._data._orbits
@property
def orbit(self):
"""Return current orbit."""
return self._data.orbit
@property
def o(self):
"""Return current orbit."""
return self._data.orbit
@property
def _vxvv(self):
"""Return current orbit initial phase parameters."""
return self._data.orbit.vxvv
@property
def _init_vxvv(self):
"""Return initial phase parameters of 1st parameters."""
return self._data._orbits[0].vxvv
@property
def segmentlist(self):
"""Orbit segments list."""
return self._data.keyslist()
@property
def numsegs(self):
"""Return number of orbit segments."""
# can't use self._data._ind b/c deleted orbits
return len(self._data.keyslist())
@property
def numorbs(self):
"""Return number of orbits (per segment)."""
return len(self._data.orbit)
# +---------- potential ----------+
@property
def potential(self):
"""Potential."""
return self._pot
@potential.setter
def potential(self, value):
self._pot = value
##############################
def __getitem__(self, ind):
"""__getitem__.
slices operate on self.segmentlist!
TODO support reindex option so have 0, ...
"""
def _helper(ind0):
if ind0 in (Ellipsis, None):
return self._data
elif isinstance(ind0, slice):
print("slice", self.segmentlist[ind0])
return self._data[self.segmentlist[ind0]]
else: # many indices return SequentialOrbits
return self._data[ind0]
# +----- single dimensional -----+
if (
np.isscalar(ind)
or isinstance(ind, slice)
or ind in (Ellipsis, None)
):
if np.isscalar(ind): # single index return Orbits
return self._orbits[ind] # list b/c IndexDict __getitem__
else:
vxvv = _helper(ind)
orb = SequentialOrbits(
vxvv=vxvv,
ro=self._ro,
vo=self._vo,
zo=self._zo,
solarmotion=self._solarmotion,
pot=self.potential,
)
return orb
# +----- multi-dimensional -----+
elif len(ind) <= 3:
# +----- single 0th index -----+
if np.isscalar(ind[0]): # single index return Orbits
if len(ind) == 2: # get Orbits subset
return self._orbits[ind] # (tuple)
elif len(ind) == 3: # get Orbits subset at specific times
return self._orbits[ind[:2]].SkyCoord(ind[2])
# +----- slice/array 0th index -----+
else:
vxvv = _helper(ind[0])
if len(ind) == 2: # TODO SequentialOrbits
vxvv = vxvv[:, ind[1]] # (tuple)
elif len(ind) == 3:
return vxvv[:, ind[1]].SkyCoord(ind[2])
orb = SequentialOrbits(
vxvv=vxvv,
ro=self._ro,
vo=self._vo,
zo=self._zo,
solarmotion=self._solarmotion,
pot=self.potential,
)
return orb
else:
raise ValueError()
# /def
def __getattr__(self, name: str):
"""__getattr__.
get or evaluate an attribute for this CompositeOrbit
called if not already defined in Class
Parameters
----------
name: str
name of the attribute
Returns
-------
attr
if the attribute is callable, a function to evaluate the attribute
for each Orbit; otherwise a list of attributes
"""
# Catch all plotting functions
if "plot" in name:
@mpl_decorator(fig=None)
def _plot(*args, **kw):
if len(args) == 0:
if "d1" not in kw:
kw["d1"] = "t"
elif isinstance(args[0], str):
kw["d1"] = args[0]
args = args[1:]
ind = self._data.keyslist()[0]
# Hack to get the plot properties correct
# since overplot doesn't make them
fig, ax = plt.gcf(), plt.gca()
plt.figure()
getattr(self._orbits[ind], name)(*args, **kw)
xl, yl = plt.gca().get_xlabel(), plt.gca().get_ylabel()
plt.close()
plt.figure(fig.number)
plt.sca(ax)
# actually plotting
[
getattr(self._orbits[i], name)(*args, overplot=True, **kw)
for i in self._data.keys()
]
# axisLabels(ax=None, x=xl, y=yl, units=False, **kw)
ax.set_xlabel(xl)
ax.set_ylabel(yl)
return None
# Assign documentation
_plot.__doc__ = self._orbits[0][0].__getattribute__(name).__doc__
return _plot
# Do rest of functions
# TODO don't make a new orbit every time
attribute = getattr(Orbit(), name)
if callable(attribute):
def _func(t=None, *args, **kwargs):
res = [
getattr(self._orbits[i], name)(t=time, *args, **kwargs)
for i, time in self.iterator(t)
]
if len(res) == 1:
return res[0]
else:
try:
return astrarray(res)
except AttributeError:
return res
_func.__doc__ = attribute.__doc__
return _func
else:
return [getattr(orbit, name) for orbit in self._orbits]
# /def
# +------------------------------+
# Equality:
def __eq__(self, other):
"""Test for equality.
in: ro, vo, zo, solarmotion, potential, and orbits
"""
if not isinstance(other, SequentialOrbits):
return False
# else:
tests = [
getattr(self, n) == getattr(other, n)
for n in ("_ro", "_vo", "_zo", "_data")
]
tests.append(all(self._solarmotion == other._solarmotion))
# tests.append(self._pot == other._pot)
if all(tests):
return True
return False
# /def
# +------------------------------+
# Serialize:
def __getstate__(self):
"""__getstate__."""
return self.__dict__
# /def
def __setstate__(self, state):
"""__setstate__."""
self.__dict__ = state
# /def
# def __getstate__(self):
# return (self._data,
# self._ro, self._vo, self._zo, self._solarmotion,
# self._pot)
# # /def
#
# def __setstate__(self, state):
# print(state)
# _data, ro, vo, zo, solarmotion, pot = state
# self._data = _data
# self._ro = ro
# self._vo = vo
# self._zo = zo
# self._solarmotion = solarmotion
# self._pot = pot
# # /def
def dump(self, fname, protocol=None, *, fopt="b", fix_imports=True):
"""dump."""
_dump(
self, fname, protocol=protocol, fopt=fopt, fix_imports=fix_imports
)
# /def
def save(self, fname, protocol=None, *, fopt="b", fix_imports=True):
"""save."""
self.dump(fname, protocol=protocol, fopt=fopt, fix_imports=fix_imports)
# /def
@staticmethod
def load(
fname, *, fopt="b", fix_imports=True, encoding="ASCII", errors="strict"
):
"""load."""
self = _load(
fname,
fopt=fopt,
fix_imports=fix_imports,
encoding=encoding,
errors=errors,
)
return self
# /def
def copy(self):
"""copy."""
instance = self.__class__(
vxvv=self._data,
pot=self._pot,
solarmotion=self._solarmotion,
ro=self._ro,
vo=self._vo,
zo=self._zo,
)
return instance
# /def
# +---------- time iteration ----------+
def iterator(self, time=None):
"""iterator."""
if time is None:
timeiter = ((0, self._data.t0),)
else:
timeiter = self._data.iterator(time)
return timeiter
# /def
# +------------------------------+
# Add Orbits
def _check_curr_orbit_integrated(self):
"""_check_curr_orbit_integrated.
TODO check for None in _times instead
"""
# Checking integrated previous orbit (has non-zero time domain)
try:
len(self.time)
except TypeError: # time is scalar, not a range
warn(
"Never integrated previous Orbit. Keeping current.",
category=integrationWarning,
)
return False
return True
# /def
def add_orbit(self, drct="previous", pot=None, vxvv=None, **kw):
"""Add orbit.
Parameters
----------
drct: str
'previous': add orbit from previous orbit
'forward': add orbit going forward in time
'backward': add orbit going backward in time
pot: Potential
vxvv: list
Returns
-------
self
"""
if drct in ("prev", "previous"):
self.add_orbit_from_prev(pot=pot, vxvv=vxvv, **kw)
elif drct in ("for", "forward", "forwards"):
self.add_forward_orbit(pot=pot, vxvv=vxvv, **kw)
elif drct in ("back", "backward", "backwards"):
self.add_backward_orbit(pot=pot, vxvv=vxvv, **kw)
else:
raise ValueError(drct)
return self
# /def
def add_orbit_from_prev(self, pot=None, vxvv=None, **kw):
"""Add orbit from prev conditions.
Parameters
----------
pot: Potential
vxvv: list
Returns
-------
self
"""
# Checking integrated previous orbit (has non-zero time domain)
if not self._check_curr_orbit_integrated():
return
# Forward or Backwards?
drct = self._data.direction
if drct == "forward":
oldt = self._bounds[1] # getting current time
if vxvv is None: # use most recent orbit
orbit = Orbit(vxvv=self.o.SkyCoord(oldt))
elif isinstance(vxvv, Orbit):
orbit = vxvv
else:
orbit = Orbit(vxvv=vxvv, **kw)
self._data.append(oldt, orbit)
else:
oldt = self._bounds[0] # getting current time
if vxvv is None: # use most recent orbit
orbit = Orbit(vxvv=self.o.SkyCoord(oldt))
elif isinstance(vxvv, Orbit):
orbit = vxvv
else:
orbit = Orbit(vxvv=vxvv, **kw)
self._data.prepend(oldt, orbit)
# potential
if pot is not None:
self._pot = pot
return self
# /def
def add_forward_orbit(self, pot=None, vxvv=None, **kw):
"""add_forward_orbit.
Parameters
----------
pot
potential
vxvv
initial phase parameters
kw
into Orbit
Returns
-------
self
"""
# Checking integrated previous orbit (has non-zero time domain)
if not self._check_curr_orbit_integrated():
return
ind = self._data._indmax # forward orbit key
oldt = self._data.tmax
# adding orbit
if vxvv is None: # use forwardest orbit
orbit = Orbit(vxvv=self._orbits[ind].SkyCoord(oldt))
elif isinstance(vxvv, Orbit):
orbit = vxvv
else:
orbit = Orbit(vxvv=vxvv, **kw)
self._data.append(oldt, orbit)
self._data.direction = "forward"
# potential
# need to check to switch potential b/c can be switching location
if pot is not None: # set new default potential
self._pot = pot
else: # get potential from forwardest orbit
self._pot = self._orbits[ind]._pot
return self
# /def
def add_backward_orbit(self, pot=None, vxvv=None, **kw):
"""Start a backward orbit.
Parameters
----------
pot: Potential, optional
change the potential
vxvv: array, optional
change initial conditions for this orbit
Returns
-------
self
"""
ind = self._data._indmin # backward orbit key
oldt = self._data.tmin
# adding orbit
if vxvv is None: # use backwardest orbit
orbit = Orbit(vxvv=self._orbits[ind].SkyCoord(t=oldt))
elif isinstance(vxvv, Orbit): # add orbit as is
orbit = vxvv
else:
orbit = Orbit(vxvv=vxvv, **kw) # standard add orbit
self._data.prepend(oldt, orbit)
self._data.direction = "backward"
# potential
# need to check to switch potential b/c can be switching location
if pot is not None: # set new default potential
self._pot = pot
else: # get potential from backwardest orbit
self._pot = self._orbits[ind]._pot
return self
# /def
# +------------------------------+
# Integrating
def integrate(
self,
t,
pot=None,
method="symplec4_c",
dt=None,
fromprevt=True,
_print=False,
):
"""Galpy Orbit integrate wrapper.
potential always should be the same
In Galpy, the starting time value does not matter
sets the time based off integration time, so don't just do (0,...)
actually set the correct time
Parameters
----------
fromprevt:
adjust time so that integrating from the end of the last orbit
Returns
-------
self
TODO
----
assume that anything without times is in Gyr
"""
# Adjust time
if not issubclass(t.__class__, u.Quantity):
t = t * u.Gyr
if self._data.direction == "backward":
if t[1] > t[0]:
t = t[::-1]
if fromprevt:
t = self._data.tref + (t - t[0])
# Potential
if pot is not None: # keep potential current
self._pot = pot # store potential
else: # potential already set
pot = self._pot # set potential
# Integrating
if _print:
print(f"integrating: {t[0]:.2f} : {t[-1]:.2f} : ({len(t)})")
self.o.integrate(t, pot, method=method, dt=dt)
if _print:
print(f"integrated") # TODO make write on previous line
# Cleanup
drct = self._data.direction
if drct == "forward":
self.time = t
self._bounds = [t[0], t[-1]]
else:
self.time = t[::-1]
self._bounds = [t[-1], t[0]]
return self
# /def
# +---------- SKYCOORD ----------+
def SkyCoord(
self,
t=None,
ind=None,
*args,
frame: Optional[SkyCoord]=None,
return_t: bool=False,
T: bool=False,
**kw,
):
"""SkyCoord.
Parameters
----------
t: array_like
time
ind: int
the index of orbits within each segment to take skycoord
.. note::
args and kw do nothing right now
Returns
-------
SkyCoord
ts
if return_t is True
"""
# output frame
if frame is None: # inherit current frame
frame = self.o.SkyCoord()
# astropy bug. not the same galcen_v_sun
if hasattr(frame, "galcen_v_sun"):
frame_galcen_v_sun = frame.galcen_v_sun
else:
frame_galcen_v_sun = None # will update later
# taking care of index options, slices & lists are naturally supported
if ind in (None, Ellipsis):
ind = slice(None)
scs, ts = [], [] # initializing SkyCoord, time arrays
# iterating through orbits
for i, time in self.iterator(t): # orbit segment, time w/in segment
if not isinstance(i, int):
raise ValueError("i not int")
try: # getting at times
sc = self._orbits[i][ind].SkyCoord(time).transform_to(frame)
except ValueError: # not integrated, get initial conditions
sc = self._orbits[i][ind].SkyCoord().transform_to(frame)
except IndexError:
sc = self._orbits[i].SkyCoord(time).transform_to(frame)
# astropy bug. not the same, even when should be
if hasattr(sc, "galcen_v_sun"):
if frame_galcen_v_sun is None: # b/c no provided frame
frame_galcen_v_sun = sc.galcen_v_sun
sc.galcen_v_sun = frame_galcen_v_sun
scs.append(sc.T)
ts.append(time)
# /for
try:
scs = concatenate(scs)
except ValueError: # only 1 point
scs = scs[0]
if not T:
scs = scs.T
if return_t:
return scs, astrarray(ts).flatten()
else:
return scs
# if return_t: # return time as well as SkyCoord
# if T is True: # untranspose
# return concatenate(scs), astrarray(ts).flatten()
# else:
# return concatenate(scs).T, astrarray(ts).flatten()
# else: # just return SkyCoord
# if T is True:
# return concatenate(scs)
# else:
# return concatenate(scs).T
# /def
def getOrbit(self):
"""Get Orbit.
concatenate along time axis
"""
orbs = [self._orbits[i].getOrbit() for i in range(self.numsegs)]
return np.concatenate(orbs, axis=1)
# /def
###########################################################################
# Plotting
def _selfinstr(self, s):
if not isinstance(s, str):
raise TypeError(f"{s} is not <str>")
if s.startswith("self."):
return s
else:
return "self." + s
# /def
def _attrprep(self, x, frame=None):
if isinstance(x, str):
return eval(f"{self._selfinstr(x)}")
else:
return x
# /def
@mpl_decorator(funcdoc=plt.hist)
def hist(self, attr, bins=10, **kw):
"""Histogram.
Parameters
----------
attr: str or array
bins:
the bins for plt.hist
"""
if "label" in kw:
kw["label"] = str(kw["label"])
# print(self._attrprep(attr))
try:
res = plt.hist(self._attrprep(attr), bins=bins, **kw)
except AttributeError:
res = plt.hist(self._attrprep(attr + "._data"), bins=bins, **kw)
return res
# /def
@mpl_decorator(funcdoc=plt.scatter)
def plot_orbit(
self,
d1="ra",
d2="dec",
t="full",
ind=None,
label=None,
frame=None,
**kw,
):
"""Plot the orbits.
# TODO support changing representation_type
"""
label = None if label in (None, False) else f"orbit {self.orbind}"
# origin = self.SkyCoord(t=0*u.s, frame=frame)
orbit = self.SkyCoord(
t=t, ind=ind, T=True, frame=frame
) # TODO not SkyCoord method?
xlabel = kw.pop("xlabel", d1)
ylabel = kw.pop("ylabel", d2)
# plt.scatter(getattr(origin, d1), getattr(origin, d2), label=label,
# s=20, c='r')
res = plt.plot(
getattr(orbit, d1),
getattr(orbit, d2),
label=label,
xlabel=xlabel,
ylabel=ylabel,
**kw,
)
return res
# /def
def plotOrbit(
self,
d1="ra",
d2="dec",
t="full",
ind=None,
label=None,
frame=None,
**kw,
):
"""Plot the orbits."""
return self.plot_orbit(
d1=d1, d2=d2, t=t, ind=ind, label=label, frame=frame, **kw
)
# /def
@mpl_decorator()
def plot(self, x, y, pltype="plot", **kw):
"""General plot function.
Parameters
----------
x: str
y: str
pltype: str
pltype for plt.plot
TODO
----
support changing frame, representation_type
"""
# x & y
x = self._attrprep(x)
y = self._attrprep(y)
if pltype == "scatter":
line = plt.scatter(x, y, **kw)
elif pltype == "errorbar":
xerr = self._attrprep(kw.pop("xerr", None))
yerr = self._attrprep(kw.pop("yerr", None))
line = plt.errorbar(x, y, xerr=xerr, yerr=yerr, **kw)
else:
line = plt.plot(x, y, pltype=pltype, **kw)
return line
# /def
@mpl_decorator(funcdoc=plt.scatter)
def scatter(self, x, y, **kw):
"""Plot with pltype='scatter'."""
return self.plot(x, y, pltype="scatter", **kw)
# /def
@mpl_decorator(funcdoc=plt.errorbar)
def errorbar(self, x, y, xerr=None, yerr=None, **kw):
"""Plot errorbar.
Parameters
----------
x: str
y: str
xerr
yerr
key-word arguments
"""
return self.plot(x, y, pltype="errorbar", xerr=xerr, yerr=yerr, **kw)
# /def
#############################################
# Plot Helper Methods
@classmethod
def plotoptions(cls):
"""Print plotting functions & their signatures.
Info
----
General
.plot(x, y, pltype, )
.scatter(x, y, )
.errorbar(x, y, xerr, yerr, )
.hist(attr, bins, )
Specific
.plotOrbit(label, s):
plot an orbit
"""
print(cls.plothelp.__doc__)
# /def
@classmethod
def printcmaps(cls, **kw):
"""Colormap options.
Perceptually Uniform Sequential:
'viridis', 'plasma', 'inferno', 'magma'
Sequential:
'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',
'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',
'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn'
Sequential:
'binary', 'gist_yarg', 'gist_gray', 'gray', 'bone', 'pink',
'spring', 'summer', 'autumn', 'winter', 'cool', 'Wistia',
'hot', 'afmhot', 'gist_heat', 'copper'
Diverging:
'PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu',
'RdYlBu', 'RdYlGn', 'Spectral', 'coolwarm', 'bwr', 'seismic'
Qualitative:
'Pastel1', 'Pastel2', 'Paired', 'Accent',
'Dark2', 'Set1', 'Set2', 'Set3',
'tab10', 'tab20', 'tab20b', 'tab20c'
Miscellaneous:
'flag', 'prism', 'ocean', 'gist_earth', 'terrain', 'gist_stern',
'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix', 'brg', 'hsv',
'gist_rainbow', 'rainbow', 'jet', 'nipy_spectral', 'gist_ncar'
"""
print(cls.printcmaps.__doc__, **kw)
# /def
| 30,641 | 26.31016 | 79 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/scripts/imports.py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : Standard Import File
# AUTHOR : Nathaniel Starkman
# PROJECT : Pal-5-in-Gaia-DR2
#
# ----------------------------------------------------------------------------
# Docstring and MetaData
"""Standard Imports File.
Returns
-------
scipy.stats.binned_statistic, poisson
src:
makefilepaths, load_Pal5_orbit_and_stream, progenitors, select
.plot.plot_sky_window, plot_proper_motions, plot_data_along_orbit,
plot_data_along_orbit_in_window, plot_data_orbit_projection
projection.split_arms, get_data_along_orbit, get_data_along_orbit,
select_stars_in_an_arm, digitize_star_along_stream
select.inRange
util.pickle, quadrature
.isochrone.CMD3p1TableRead
.coordinates.make_stream_frame
.CustomCoords.register_stream_frame
"""
__author__ = "Nathaniel Starkman"
##############################################################################
# HELPER FUNCTIONS
from astroPHD.config import __config__
from astroPHD.decorators.docstring import (
_set_docstring_import_file_helper,
_import_file_docstring_helper
)
##############################################################################
# IMPORTS
# GENERAL
import copy
import sys; sys.path.insert(0, '../'); sys.path.insert(0, '../../')
# numpy
from scipy.stats import binned_statistic, poisson
# CUSTOM
from astroPHD.plot import starkplot as plt
# PROJECT-SPECIFIC
from src import (
makefilepaths,
load_Pal5_orbit_and_stream,
progenitors,
select)
from src.orbit import SequentialOrbits
from src.select import inRange
from src.util.isochrone import CMD3p1TableRead
from src.util.coordinates import make_stream_frame
from src.util import pickle
from src.util.coordinates.CustomCoords import register_stream_frame
# from src.projection import split_arms, get_data_along_orbit
from src.projection import get_data_along_orbit
from src.projection import select_stars_in_an_arm, digitize_star_along_stream
from src.plot import (
plot_sky_window, plot_proper_motions, plot_data_along_orbit,
plot_data_along_orbit_in_window, plot_data_orbit_projection
)
##############################################################################
# Printing Information
@_set_docstring_import_file_helper('base', __doc__) # doc from __doc__
def base_imports_help():
"""Help for Matplotlib base imports."""
_import_file_docstring_helper(base_imports_help.__doc__) # formatting
# /def
if __config__.getboolean('verbosity', 'verbose-imports'):
base_imports_help()
##############################################################################
# END
| 2,738 | 26.118812 | 78 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/scripts/make_ibata16_modified/make_ibata16_modified.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : make Ibata 2016 CFHT table
# AUTHOR : Nathaniel Starkman
# PROJECT : Palomar 5 in Gaia DR2
#
# ----------------------------------------------------------------------------
### Docstring and Metadata
"""script: Make Modified Ibata 2016 Table
"""
__author__ = "Nathaniel Starkman"
##############################################################################
### Imports
## General
import os, sys, time, pdb, copy
# import numpy as np
## Astropy
from astropy.table import Table, QTable
## Gaia-tools
from gaia_tools.util import table_utils as tutil
## Custom
sys.path.insert(0, '../../')
from src.util.logging import LogFile
## Project-Specific
##############################################################################
### Parameters & Setup
# General
_PLOT = True # Plot the output
# Data
_IN_FILE = '../../data/ibata2016cfht/pal5.fit'
_IN_FILE_FORMAT = 'fits'
_OUT_FILE = '../../data/ibata2016cfht/pal5_modified.fit'
_OUT_FILE_FORMAT = 'fits'
# Log file
print(__name__)
_LOGFILE = LogFile.open('./log.txt', verbose=2, mode='w',
header='Make Ibata 2016 Modified')
# ----------------------------------------------------------------------------
### Setup
_LOGFILE.write('opening ' + _IN_FILE)
dfo = Table.read(_IN_FILE, format=_IN_FILE_FORMAT);
_LOGFILE.write(f'opened\ntable columns: {dfo.colnames}', endsection=True)
##############################################################################
### Running the Script
# Renaming columns
_LOGFILE.write('renaming columns to...')
tutil.rename_columns(
dfo,
['E_B-V_', 'E(B-V)'],
_RAJ2000='ra', _DEJ2000='dec',
g0mag='g', e_g0mag='g_err', r0mag="r", e_r0mag='r_err',
_Glon='L', _Glat='B'
)
_LOGFILE.write(dfo.colnames)
# Adding color column
_LOGFILE.write('adding g-r color column', start='\n')
tutil.add_color_col(dfo, 'g', 'r', color='g-r')
# New Table
_LOGFILE.write('Making new QTable w/ all columns except RAJ2000, DEJ2000',
start='\n')
names = tutil.drop_colnames(dfo.colnames, 'RAJ2000', 'DEJ2000')
df = QTable(dfo[names])
_LOGFILE.write(f'new table columns: {df.colnames}', start='\n')
# Writing
_LOGFILE.write('saving to ' + _OUT_FILE, start='\n')
df.write(_OUT_FILE, format=_OUT_FILE_FORMAT, overwrite=True)
##############################################################################
### Closing
_LOGFILE.close()
| 2,535 | 24.877551 | 78 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/scripts/get_gaia_window/make_gaia_query.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : make gaia query
# AUTHOR : Nathaniel Starkman
# PROJECT : Palomar 5 in Gaia DR2
#
# ----------------------------------------------------------------------------
### Docstring and Metadata
"""script: Make Gaia Query
"""
__author__ = "Nathaniel Starkman"
##############################################################################
### IMPORTS
## General
import numpy as np
from scipy.linalg import norm
import copy
# astropy
from astropy import units as u
from astropy.table import Table, QTable, vstack
from astropy.coordinates import SkyCoord
# galpy & gaia-tools & mwdust
from galpy.potential import MWPotential2014
from gaia_tools.query import cache, make_query
from gaia_tools.util import table_utils, json
from gaia_tools.xmatch import xmatch
## Custom
import starkplot as plt
import sys; sys.path.insert(0, '../../')
from src import LogFile, ObjDict
## Project-Specific
from src import makefilepaths, load_Pal5_orbit_and_stream
from src.orbit import SequentialOrbits
# util
from src.util import MegaCamGen1_from_PS1 as MCG1_PS1
from src.util.mwdust_util import load_dust_gri
from src.util.pickle import dump
# coordinates
from src.util.coordinates.CustomCoords import make_stream_frame, register_stream_frame
##############################################################################
### PARAMETERS & SETUP
# Log file
_LOGFILE = LogFile(header=False) # LogPrint, which is compatible
##############################################################################
### FUNCTION
def query_at_point(orbit, point, philims,
dt=2 * u.Myr, is_nbody=False,
max_separation=.5 * u.kpc, return_transforms=False,
as_qtable=True,
# for make_query
local=False,
_random_index=None,
# logging
plot=False, pprint=False,
save_window=False, save_frame=False,
logger=_LOGFILE, verbose=None):
"""
Parameters
----------
orbit : SkyCoord
icrs orbit
point : SkyCoord
philims : lists
[phi1min, phi1max, phi2min, phi2max]
dt : Quantity
physical_motion timestep
is_nbody : bool
max_separation : Quantity
return_transforms : bool
local : bool
TODO options for make_query
"""
# -------------------------------
# Rotated frame
Frame, coordtransforms= make_stream_frame(
orbit, point, method='path', dt=dt,
is_nbody=is_nbody, max_separation=max_separation,
return_transforms=True,
logger=logfile, verbose=verbose)
Frame = copy.copy(Frame) # TODO copying
coordtransforms = copy.copy(coordtransforms) # TODO copying
rotm = coordtransforms['R_icrs_to_cust']
ra_ngp, dec_ngp = coordtransforms['ra_ngp', 'dec_ngp']
# -------------------------------
# the query
(phi1min, phi1max, phi2min, phi2max) = philims
# user asdict
l3userasdict = {
'K00': rotm[0, 0], 'K01': rotm[0, 1], 'K02': rotm[0, 2],
'K10': rotm[1, 0], 'K11': rotm[1, 1], 'K12': rotm[1, 2],
'K20': rotm[2, 0], 'K21': rotm[2, 1], 'K22': rotm[2, 2],
'sindecngp': np.sin(dec_ngp),
'cosdecngp': np.cos(dec_ngp),
'mcosdecngp': -np.cos(dec_ngp),
'mrangp': -ra_ngp.to_value('deg'),
'phi1min': phi1min.to_value('rad'), 'phi1max': phi1max.to_value('rad'),
'phi2min': phi2min.to_value('rad'), 'phi2max': phi2max.to_value('rad'),
# 'pm_phi1min': pmphi1min, 'pm_phi1max': pmphi1max,
# 'pm_phi2min': pmphi2min, 'pm_phi2max': pmphi2max
}
with open('skydataselection.json', 'r') as file:
querystr = json.strjoinall(json.load(file))
df = make_query(
# gaia_mags=True,
panstarrs1=True, defaults='full',
user_cols=querystr['l3cols'], use_AS=True, user_ASdict=l3userasdict,
WHERE=querystr['l3sel'],
ORDERBY='gaia.source_id',
pprint=pprint,
# doing query
do_query=True, units=True, local=local,
# cache=f'pal5_{opts.nbody}_phi1_m25p25_phi2_m5p20', # TODO
# Inner Query
FROM=make_query(
# gaia_mags=True,
user_cols=querystr['l2cols'], defaults='full',
# Second Query
FROM=make_query(
# gaia_mags=True,
user_cols=querystr['l1cols'], defaults='full',
# Innermost Query
FROM=make_query(
# gaia_mags=True,
defaults='full', inmostquery=True,
user_cols=querystr['l0cols'],
# random index for testing
random_index=_random_index,
))))
if not as_qtable:
df = Table(df)
# -------------------------------
# Saving
if save_window:
if isinstance(save_window, str):
fpath = save_window
else:
fpath = 'output/window.fit'
dump(df, fpath)
if save_frame:
if isinstance(save_frame, str):
fpath = save_frame
else:
fpath = 'output/MasterFrame.pkl'
dump([Frame, coordtransforms], fpath)
# -------------------------------
# Plotting
plt.scatter(df['ra'], df['dec'], s=.1, c='b')
plt.plot(orbit.icrs.ra, orbit.icrs.dec, c='k')
plt.scatter(point.icrs.ra, point.icrs.dec, s=100, c='r')
plt.set(title=f'Query around point {point.icrs.ra:.2},{point.icrs.dec:.2}',
xlabel='\alpha [deg]', ylabel='\delta [deg]',
savefig='plots/query_around_point{point.icrs.ra:.2}-{point.icrs.dec:.2}.png')
# -------------------------------
# Returning
if return_transforms:
return df, Frame, coordtransforms
else:
return df
# /def
##############################################################################
### query_along_orbit
def query_along_orbit(orbit, frame_point, frame_philims,
# points
points_inds=[], points_philims=None,
# options
frame_dt=1.9 * u.Myr, points_dt=None,
max_separation=.5 * u.kpc, return_transforms=True,
# for make_query
local=False,
_random_index=None,
#
distance=23.2 * u.kpc,
duststr='output/ps1dust_{}.dat',
# saving and logging
pprint=False,
save_window=False, save_frame=False,
logger=_LOGFILE, verbose=None):
"""
Parameters
----------
orbit : SkyCoord
icrs orbit
frame_point : SkyCoord
points_philims : list
list (as long points_inds) of philim lists (len 4)
save_window : bool or str
False : nothing
True : save to 'output/window.fits'
str : save at this location
save_frame : bool or str
save the main Frame and coordtransforms
False : nothing
True : save to 'output/window.fits'
str : save at this location
"""
# -------------------------------------------------------------------------
# Preparing
## checks
assert isinstance(save_window, (bool, str))
assert isinstance(save_frame, (bool, str))
## points_philims
if points_philims is None:
points_philims = (frame_philims, ) * len(points_inds)
elif len(points_philims) != len(points_inds):
raise ValueError('points_philims do not match points_inds in length')
## dt
if points_dt is None:
points_dt = (frame_dt, ) * len(points_inds)
elif len(points_dt) != len(points_inds):
return ValueError('points_dt do not match points_inds in length')
# -------------------------------------------------------------------------
# preplotting
plt.plot(orbit.icrs.ra, orbit.icrs.dec, c='k')
plt.scatter(frame_point.icrs.ra, frame_point.icrs.dec, s=100, c='r')
for i in points_inds:
plt.scatter(orbit.icrs.ra[i], orbit.icrs.dec[i], s=50, c='k')
plt.set(title='Query Along Orbit',
xlabel='\alpha [deg]', ylabel='\delta [deg]',
savefig='plots/query_along_point.png')
# -------------------------------------------------------------------------
# the master custom frame
logfile.newsection(title=f'Starting Query:', div='=')
df, MasterFrame, crd_xfm = query_at_point(
orbit, frame_point, frame_philims, dt=frame_dt, is_nbody=False,
max_separation=max_separation, return_transforms=True,
as_qtable=False,
local=local, _random_index=_random_index,
pprint=pprint,
save_window=False, save_frame=save_frame,
logger=logfile, verbose=verbose
)
# -------------------------------------------------------------------------
# Querying at points
for i, (ind, pdt, philims) in enumerate(zip(points_inds, points_dt, points_philims)):
point = orbit[ind]
logfile.newsection(title=f'Query @ Point {i+1}:', div='=')
pointdf = query_at_point(
orbit, point, philims, dt=pdt, is_nbody=False,
max_separation=max_separation, return_transforms=False,
as_qtable=False,
local=local, _random_index=_random_index,
pprint=pprint,
logger=logfile, verbose=verbose)
# xmatch the catalogs to identify duplicates
_, idx2, _ = xmatch(
df, pointdf,
colRA1='ra', colDec1='dec', epoch1=2015.5,
colRA2='ra', colDec2='dec', epoch2=2015.5)
# finding distinct values in pointdf
inds = np.arange(len(pointdf)) # full index array for comparison
nm = list(set(inds).difference(idx2)) # not matched values
# merging tables
df = vstack([df, pointdf[nm]])
# -------------------------------------------------------------------------
# modifying table
register_stream_frame(crd_xfm.R_icrs_to_cust)
# replace the custom coordinates by values from MasterFrame
sc = SkyCoord(ra=df['ra'], dec=df['dec'],
pm_ra_cosdec=df['pmra'], pm_dec=df['pmdec'],
frame='icrs', representation_type='spherical'
).transform_to(MasterFrame)
sc.phi1.wrap_angle = '180d'
df['phi1'] = sc.phi1.to(deg)
df['phi2'] = sc.phi2.to(deg)
df['pmphi1'] = sc.pm_phi1_cosphi2
df['pmphi2'] = sc.pm_phi2
df = QTable(df)
df['prlx'][df['prlx'] < 0] = np.NaN
table_utils.add_color_col(df, 'g', 'r')
table_utils.add_color_col(df, 'g', 'i')
table_utils.add_abs_pm_col(df, 'pmra', 'pmdec')
logfile.write("adding color columns", "did g-r", "did g-i", "did |pm|",
sep='\n\t', end='\n')
# -------------------------------------------------------------------------
# Dust
# distance = pal5['sc'][0].distance.to_value(u.kpc)
ps1dust = load_dust_gri(duststr, df=df, distance=distance)
## Making color corrections
logfile.write("Dextincting",
"g -> g dx", "r -> r dx", "i -> i dx",
"g-r -> g-r dx", "g-i -> g-i dx", "r-i -> r-i dx",
sep='\n\t', end='\n')
df['g dx'] = df['g'] - ps1dust['g']
df['r dx'] = df['r'] - ps1dust['r']
df['i dx'] = df['i'] - ps1dust['i']
df['g-r dx'] = df['g dx'] - df['r dx']
df['g-i dx'] = df['g dx'] - df['i dx']
df['r-i dx'] = df['r dx'] - df['i dx']
for c in ('g', 'r', 'i', 'g-r', 'g-i', 'r-i'):
setattr(df[c + ' dx'].info, 'description', 'de-extincted w/ mwdust')
## Adding in Conversion to MegaCam
df['g MC'] = MCG1_PS1.G_MP9401(df, g='g dx', r='r dx', gmi='g-r dx')
df['r MC'] = MCG1_PS1.R_MP9601(df, g='g dx', r='r dx', gmi='g-r dx')
df['g-r MC'] = df['g MC'] - df['r MC']
for c in ('g MC', 'r MC', 'g-r MC'):
setattr(df[c].info, 'description', 'de-extincted & converted to MegaCam')
# -------------------------------------------------------------------------
# Saving
df = QTable(df)
if save_window: # True or str
if isinstance(save_window, str):
fpath = save_window
else:
fpath = 'output/window.fits'
logfile.write(f'saving to {fpath}')
df.write(fpath, format='fits', overwrite=True)
# -------------------------------------------------------------------------
# postplotting
plt.scatter(df['ra'].to_value(u.deg), df['dec'].to_value(u.deg), s=.1, c='k')
plt.plot(orbit.icrs.ra, orbit.icrs.dec, c='k')
plt.scatter(frame_point.icrs.ra, frame_point.icrs.dec, s=100, c='r')
for i in points_inds:
plt.scatter(orbit.icrs.ra[i], orbit.icrs.dec[i], s=50, c='k')
plt.set(title='Query Along Orbit',
xlabel='\alpha [deg]', ylabel='\delta [deg]',
savefig='plots/query_along_point.png')
# plt.scatter(df['phi1'], df['phi2'], s=.1, c='k', fig='new')
# plt.plot(orbit.icrs.ra, orbit.icrs.dec, c='k')
# plt.scatter(frame_point.icrs.ra, frame_point.icrs.dec, s=100, c='r')
# for i in points_inds:
# plt.scatter(orbit.icrs.ra[i], orbit.icrs.dec[i], s=50, c='k')
# plt.set(title='Query Along Orbit',
# xlabel='\alpha [deg]', ylabel='\delta [deg]',
# savefig='plots/query_along_point.png')
# -------------------------------------------------------------------------
# returning
return df, MasterFrame, crd_xfm
# /def
# ##############################################################################
# ### DONE
| 13,824 | 31.151163 | 89 | py |
Pal-5-in-Gaia-DR2 | Pal-5-in-Gaia-DR2-master/scripts/get_gaia_window/2019-6-11/do_query_script.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
#
# TITLE : make gaia query
# AUTHOR : Nathaniel Starkman
# PROJECT : Palomar 5 in Gaia DR2
#
# ----------------------------------------------------------------------------
### Docstring and Metadata
"""script: Make Gaia Query
"""
__author__ = "Nathaniel Starkman"
##############################################################################
### IMPORTS
# ## General
import numpy as np
# from scipy.linalg import norm
# # astropy
from astropy import units as u
# from astropy.table import Table, QTable, vstack
from astropy.coordinates import SkyCoord
# # galpy & gaia-tools & mwdust
from galpy.potential import MWPotential2014
# from gaia_tools.query import cache, make_query
# from gaia_tools.util import table_utils, json
# from gaia_tools.xmatch import xmatch
# ## Custom
# from matplotlib import pyplot as plt
import sys; sys.path.insert(0, '../../../')
from src import LogFile, ObjDict
# ## Project-Specific
from src import makefilepaths # , load_Pal5_orbit_and_stream
from src.orbit import SequentialOrbits
from scripts.get_gaia_window.make_gaia_query import query_along_orbit
# # util
# from src.util import MegaCamGen1_from_PS1 as MCG1_PS1
# from src.util.mwdust_util import load_dust_gri
# # coordinates
# from src.util.coordinates.CustomCoords import make_stream_frame
##############################################################################
### PARAMETERS & SETUP
# TODO have this in a __name__ == '__main__':
# Log file
_VERBOSE = 2
_LOGFILE = LogFile.open(f'./log.txt', verbose=_VERBOSE,
header='Make Gaia Query')
## General
_PLOT = True # Plot the output
_LOCAL = True
_DUSTSTR = 'output/ps1dust_{}.dat'
## Orbit Parameters
vxvv_lead = SkyCoord(
ra=229.022 * u.deg, dec=-.223 * u.deg, distance=22.5 * u.kpc,
pm_ra=-2.128 * u.mas / u.yr, pm_dec=-2.18 * u.mas / u.yr,
radial_velocity=-50 * u.km / u.s,
differential_type='spherical'
)
vxvv_trail = SkyCoord(
ra=229.022*u.deg, dec=-.223*u.deg, distance=22.5*u.kpc,
pm_ra=-2.0*u.mas/u.yr, pm_dec=-2.18*u.mas/u.yr,
radial_velocity=-50*u.km/u.s,
differential_type='spherical'
)
t_arr = np.linspace(0, 90, num=10000) * u.Myr
## Query Parameters
_RANDOM_INDEX = None
_PHI1MIN, _PHI1MAX = -10 * u.deg, 10 * u.deg
_PHI2MIN, _PHI2MAX = -5 * u.deg, 5 * u.deg
_PHILIMS = [_PHI1MIN, _PHI1MAX, _PHI2MIN, _PHI2MAX]
_POINTS_INDS = [0, 3000, 6000, 13500, 17500, -1]
# _PHI1MIN, _PHI1MAX = -1 * u.deg, 1 * u.deg # just for testing
# _PHI2MIN, _PHI2MAX = -1 * u.deg, 1 * u.deg # just for testing
# _PHILIMS = [_PHI1MIN, _PHI1MAX, _PHI2MIN, _PHI2MAX] # just for testing
# _POINTS_INDS = [0, 3000, 6000, 13500, 17500, -1] # just for testing
# Logging
_LOGFILE.write('Parameters:')
_LOGFILE.newsection(title='General:', div='.')
_LOGFILE.write(f'plot: {_PLOT}', 'local: {_LOCAL}', sep='\n', end='\n\n')
_LOGFILE.newsection(title='Orbit Parameters', div='.')
_LOGFILE.write('vxvv_lead:', vxvv_lead)
_LOGFILE.write('vxvv_trail:', vxvv_trail)
_LOGFILE.write(f'integrating over {t_arr[0]}:{t_arr[-1]}:{len(t_arr)}')
_LOGFILE.newsection(title='Query Parameters', div='.')
_LOGFILE.write('Phi window dimensions:', f'phi1: {_PHI1MIN} : {_PHI1MAX}',
f'phi2: {_PHI2MIN} : {_PHI2MAX}',
f'random_index : {_RANDOM_INDEX}',
sep='\n\t', end='\n\n')
# ----------------------------------------------------------------------------
## Setup
_LOGFILE.newsection(title='Setup:')
opts = makefilepaths(
datapath='../../../',
nbody='data/nbody/pal5.dat', nbodyformat='ascii.ecsv',
skydata='scripts/get_gaia_window/base/output/window.fits',
duststr=_DUSTSTR,
logger=_LOGFILE, verbose=None)
# # loading orbit
# pal5, stream = load_Pal5_orbit_and_stream(
# nbodypath=opts.nbodypath, nbodyformat=opts.nbodyformat,
# plot=False, returnplots=False,
# logger=_LOGFILE
# )
_LOGFILE.newsection(title='Stream Hand Fit Orbit:', div='.')
o = SequentialOrbits(vxvv_lead).integrate(t_arr, MWPotential2014)
o.add_backward_orbit(vxvv_trail).integrate(t_arr, MWPotential2014)
stream_hand_fit = ObjDict(
'Stream Hand Fit Orbit',
o=o,
icrs=o.SkyCoord(t='full', frame='icrs')
)
##############################################################################
### CODE
df, MasterFrame, crd_xfm = query_along_orbit(
stream_hand_fit.icrs, stream_hand_fit.o[0].SkyCoord(),
_PHILIMS,
points_inds=_POINTS_INDS,
_random_index=_RANDOM_INDEX,
duststr='output/ps1dust_{}.dat',
local=_LOCAL,
pprint=True,
save_window=True, save_frame=True,
logger=_LOGFILE
)
##############################################################################
### PLOTTING
# fig, ax = plt.subplots(1, 1)
# ax.scatter(df['ra'], df['dec'])
# fig.savefig('output/radec.png')
##############################################################################
### CLOSING
stream_hand_fit.save('output/stream_hand_fit.pkl')
_LOGFILE.close()
##############################################################################
### DONE
| 5,170 | 28.890173 | 78 | py |
irbl | irbl-master/src/main.py | import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
from sklearn import datasets
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import os
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
import torch
from sklearn.isotonic import IsotonicRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression as SKLearnLogisticRegression
from sklearn.base import clone
from scipy import stats
import math
import Orange
from skorch import NeuralNetClassifier,NeuralNetBinaryClassifier
import itertools
import random
class LogisticRegressionSoftmax(torch.nn.Module):
def __init__(self, input_size, output_size):
super(LogisticRegressionSoftmax, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.fc = torch.nn.Linear(self.input_size, self.output_size)
def forward(self, x):
return torch.nn.functional.softmax(self.fc(x), dim=1)
class BinaryLogisticRegression(torch.nn.Module):
def __init__(self, input_size):
super(BinaryLogisticRegression, self).__init__()
self.input_size = input_size
self.fc = torch.nn.Linear(self.input_size, 1)
def forward(self, x):
return self.fc(x)
class LogisticRegression(torch.nn.Module):
def __init__(self, input_size, output_size):
super(LogisticRegression, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.fc = torch.nn.Linear(self.input_size, self.output_size)
def forward(self, x):
return self.fc(x)
class MLP(torch.nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(MLP, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.fc1 = torch.nn.Linear(self.input_size, self.hidden_size)
self.ReLU = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(self.hidden_size, self.output_size)
def forward(self, x):
hidden = self.fc1(x)
ReLU = self.ReLU(hidden)
output = self.fc2(ReLU)
return output
def friedman_test(*args, reverse=False):
k = len(args)
if k < 2:
raise ValueError('Less than 2 levels')
n = len(args[0])
if len(set([len(v) for v in args])) != 1:
raise ValueError('Unequal number of samples')
rankings = []
for i in range(n):
row = [col[i] for col in args]
row_sort = sorted(row, reverse=reverse)
rankings.append([row_sort.index(v) + 1 + (row_sort.count(v) - 1) / 2. for v in row])
rankings_avg = [np.mean([case[j] for case in rankings]) for j in range(k)]
rankings_cmp = [r / np.sqrt(k * (k + 1) / (6. * n)) for r in rankings_avg]
chi2 = ((12 * n) / float((k * (k + 1)))) * ((np.sum(r**2 for r in rankings_avg)) - ((k * (k + 1)**2) / float(4)))
iman_davenport = ((n - 1) * chi2) / float((n * (k - 1) - chi2))
p_value = 1 - stats.f.cdf(iman_davenport, k - 1, (k - 1) * (n - 1))
return iman_davenport, p_value, rankings_avg, rankings_cmp
def wilcoxon_test(score_A, score_B):
# compute abs delta and sign
delta_score = [score_B[i] - score_A[i] for i in range(len(score_A))]
sign_delta_score = list(np.sign(delta_score))
abs_delta_score = list(np.abs(delta_score))
N_r = float(len(delta_score))
# hadling scores
score_df = pd.DataFrame({'abs_delta_score': abs_delta_score, 'sign_delta_score': sign_delta_score})
# sort
score_df.sort_values(by='abs_delta_score', inplace=True)
score_df.index = range(1, len(score_df) + 1)
# adding ranks
score_df['Ranks'] = score_df.index
score_df['Ranks'] = score_df['Ranks'].astype('float64')
score_df.dropna(inplace=True)
# z : pouput value
W = sum(score_df['sign_delta_score'] * score_df['Ranks'])
z = W / (math.sqrt(N_r * (N_r + 1) * (2 * N_r + 1) / 6.0))
# rejecte or not the null hypothesis
null_hypothesis_rejected = False
if z < -1.96 or z > 1.96:
null_hypothesis_rejected = True
return z, null_hypothesis_rejected
def noisy_completly_at_random(y, ratio):
n = y.shape
is_missing = np.random.binomial(1, ratio, n)
missing_value = np.random.binomial(1, 0.5, len(y[is_missing == 1]))
y_missing = np.copy(y)
y_missing[is_missing == 1] = missing_value
return y_missing
def noisy_not_at_random(proba, y, ratio):
n = y.shape
if ratio == 1:
scaled = np.full_like(proba, 1)
else:
scaled = 1-(1-ratio)*np.power(np.abs(1-2*proba), 1/(1-ratio))
is_missing = np.random.binomial(1, scaled, n)
missing_value = np.random.binomial(1, 0.5, len(y[is_missing == 1]))
y_missing = np.copy(y)
y_missing[is_missing == 1] = missing_value
return y_missing
def split_dataset(dataset, split):
train_idx, val_idx = train_test_split(list(range(len(dataset))), test_size=split, stratify=dataset[:][1])
train = torch.utils.data.Subset(dataset, train_idx)
val = torch.utils.data.Subset(dataset, val_idx)
return (XYDataset(train[:][0], train[:][1]), XYDataset(val[:][0], val[:][1]))
def split_scale_dataset(dataset, split):
train_idx, val_idx = train_test_split(list(range(len(dataset))), test_size=split, stratify=dataset[:][1])
train = torch.utils.data.Subset(dataset, train_idx)
val = torch.utils.data.Subset(dataset, val_idx)
scaler = StandardScaler().fit(train[:][0])
scaled_train = XYDataset(scaler.transform(train[:][0]), train[:][1])
if val[:][0].shape[0] == 0:
scaled_val = val
else:
scaled_val = XYDataset(scaler.transform(val[:][0]), val[:][1])
return (scaled_train, scaled_val)
def corrupt_dataset(dataset, corrupt_fn, cr):
return XYDataset(dataset[:][0], corrupt_fn(dataset[:][1], cr))
def split_corrupt_dataset(dataset, corrupt_fn, split, cr):
trusted_idx, untrusted_idx = train_test_split(list(range(len(dataset))), test_size=split, stratify=dataset[:][1])
trusted = torch.utils.data.Subset(dataset, trusted_idx)
untrusted = torch.utils.data.Subset(dataset, untrusted_idx)
corrupted = XYDataset(untrusted[:][0], corrupt_fn(untrusted[:][1], cr))
return (XYDataset(trusted[:][0], trusted[:][1]), corrupted)
class UnhingedLoss(torch.nn.Module):
def __init__(self):
super(UnhingedLoss, self).__init__()
def forward(self, X, y):
return 1 - (2*y-1) * X[:, 1]
class ad(torch.utils.data.Dataset):
def __init__(self):
X, y = (pd.read_csv("data/ad/train.csv").iloc[:, :-1].to_numpy(),
(pd.read_csv("data/ad/train.csv").iloc[:, -1].to_numpy() == 'ad.').astype(int))
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class web(torch.utils.data.Dataset):
def __init__(self):
X, y = (datasets.load_svmlight_file("data/web/train")[0].todense(), datasets.load_svmlight_file("data/web/train")[1])
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class fourclass(torch.utils.data.Dataset):
def __init__(self):
X, y = (datasets.load_svmlight_file("data/fourclass/train")[0].todense(), datasets.load_svmlight_file("data/fourclass/train")[1])
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class svmguide3(torch.utils.data.Dataset):
def __init__(self):
X, y = (datasets.load_svmlight_file("data/svmguide3/train")[0].todense(), datasets.load_svmlight_file("data/svmguide3/train")[1])
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class svmguide1(torch.utils.data.Dataset):
def __init__(self):
X, y = (datasets.load_svmlight_file("data/svmguide1/train")[0].todense(), datasets.load_svmlight_file("data/svmguide1/train")[1])
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class ionosphere(torch.utils.data.Dataset):
def __init__(self):
X, y = (pd.read_csv("data/ionosphere/train.csv").iloc[:, :-1].to_numpy(),
(pd.read_csv("data/ionosphere/train.csv").iloc[:, -1].to_numpy() == "b").astype(int))
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class banknote(torch.utils.data.Dataset):
def __init__(self):
X, y = (pd.read_csv("data/banknote/train.csv", header=None).iloc[:, :-1].to_numpy(),
(pd.read_csv("data/banknote/train.csv", header=None).iloc[:, -1].to_numpy()).astype(int))
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class musk(torch.utils.data.Dataset):
def __init__(self):
X, y = (pd.read_csv("data/musk/train.csv", header=None).iloc[:, 2:-1].to_numpy(),
(pd.read_csv("data/musk/train.csv", header=None).iloc[:, -1].to_numpy()).astype(int))
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class ijcnn1(torch.utils.data.Dataset):
def __init__(self):
X, y = (datasets.load_svmlight_file("data/ijcnn1/train")
[0].todense(), datasets.load_svmlight_file("data/ijcnn1/train")[1])
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class eeg(torch.utils.data.Dataset):
def __init__(self):
X, y = (pd.read_csv("data/eeg/train.csv", header=None).iloc[:, :-1].to_numpy(),
pd.read_csv("data/eeg/train.csv", header=None).iloc[:, -1].to_numpy())
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class hiva(torch.utils.data.Dataset):
def __init__(self):
X, y = (pd.read_csv("data/hiva/train.csv", sep=" ", header=None).iloc[:, :-1].to_numpy(),
pd.read_csv("data/hiva/label.csv", sep=" ", header=None).iloc[:, 0].to_numpy())
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class ibn_sina(torch.utils.data.Dataset):
def __init__(self):
X, y = (pd.read_csv("data/ibn-sina/train.csv", sep=" ", header=None).iloc[:, :-1].to_numpy(),
pd.read_csv("data/ibn-sina/label.csv", sep=" ", header=None).iloc[:, 0].to_numpy())
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class zebra(torch.utils.data.Dataset):
def __init__(self):
X, y = (pd.read_csv("data/zebra/train.csv", sep=" ", header=None).iloc[:, :-1].to_numpy(),
pd.read_csv("data/zebra/label.csv", sep=" ", header=None).iloc[:, 0].to_numpy())
self.X, self.y = np.nan_to_num(np.squeeze(np.asarray(X)).astype(
np.float32), nan=0.0, posinf=0.0, neginf=0.0), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class sylva(torch.utils.data.Dataset):
def __init__(self):
X, y = (pd.read_csv("data/sylva/train.csv", sep=" ", header=None).iloc[:, :-1].to_numpy(),
pd.read_csv("data/sylva/label.csv", sep=" ", header=None).iloc[:, 0].to_numpy())
self.X, self.y = np.nan_to_num(np.squeeze(np.asarray(X)).astype(
np.float32), nan=0.0, posinf=0.0, neginf=0.0), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class australian(torch.utils.data.Dataset):
def __init__(self):
X, y = (datasets.load_svmlight_file("data/australian/train")
[0].todense(), datasets.load_svmlight_file("data/australian/train")[1])
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class diabetes(torch.utils.data.Dataset):
def __init__(self):
X, y = (datasets.load_svmlight_file("data/diabetes/train")
[0].todense(), datasets.load_svmlight_file("data/diabetes/train")[1])
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class breast(torch.utils.data.Dataset):
def __init__(self):
X, y = (datasets.load_svmlight_file("data/breast-cancer/train")
[0].todense(), datasets.load_svmlight_file("data/breast-cancer/train")[1])
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class adult(torch.utils.data.Dataset):
def __init__(self):
X, y = (datasets.load_svmlight_file("data/adult/train", n_features=123)
[0].todense(), datasets.load_svmlight_file("data/adult/train", n_features=123)[1])
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class german(torch.utils.data.Dataset):
def __init__(self):
X, y = (datasets.load_svmlight_file("data/german/train")
[0].todense(), datasets.load_svmlight_file("data/german/train")[1])
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class phishing(torch.utils.data.Dataset):
def __init__(self):
X, y = (datasets.load_svmlight_file("data/phishing/train")
[0].todense(), datasets.load_svmlight_file("data/phishing/train")[1])
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class spam(torch.utils.data.Dataset):
def __init__(self):
X, y = (pd.read_csv("data/spam/train.csv", header=None).iloc[:, :-1].to_numpy(),
pd.read_csv("data/spam/train.csv", header=None).iloc[:, -1].to_numpy())
self.X, self.y = np.squeeze(np.asarray(X)).astype(
np.float32), LabelEncoder().fit_transform(y).astype(np.long)
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class XYDataset(torch.utils.data.Dataset):
def __init__(self, X, y):
self.X, self.y = X, y
pass
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class MergedDataset(torch.utils.data.Dataset):
def __init__(self, trusted, untrusted):
self.trusted, self.untrusted = trusted, untrusted
pass
def __getitem__(self, index):
if index < len(self.trusted):
item = self.trusted.__getitem__(index)
return item[0], (item[1], 0)
else:
item = self.untrusted.__getitem__(index - len(self.trusted))
return item[0], (item[1], 1)
def __len__(self):
return len(self.trusted) + len(self.untrusted)
class WeightedDataset(torch.utils.data.Dataset):
def __init__(self, dataset, weights):
self.dataset, self.weights = dataset, weights
pass
def __getitem__(self, index):
return (self.dataset.__getitem__(index)[0], self.weights.__getitem__(index)), self.dataset.__getitem__(index)[1]
def __len__(self):
return self.dataset.__len__()
def normal(train, test, optimizer, batch_size, epochs, lr, weight_decay, hidden_size, loss="cross_entropy"):
input_size = len(train[0][0])
num_classes = 2
if hidden_size == 0:
model = LogisticRegression(input_size, num_classes)
else:
model = MLP(input_size, hidden_size, num_classes)
if optimizer == "adam":
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
elif optimizer == "sgd":
optimizer = torch.optim.SGD(model.parameters(), lr=lr, weight_decay=weight_decay)
cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction="none")
if loss == "cross_entropy":
train_loss = torch.nn.CrossEntropyLoss(reduction="none")
elif loss == "unhinged":
train_loss = UnhingedLoss()
train_loader = torch.utils.data.DataLoader(dataset=train,
batch_size=batch_size,
shuffle=True,
num_workers=1,
drop_last=False)
valid_loader = torch.utils.data.DataLoader(dataset=test,
batch_size=batch_size,
shuffle=True,
num_workers=1,
drop_last=False)
mean_train_losses = []
mean_valid_losses = []
accs = []
for epoch in range(epochs):
model.train()
train_losses = []
valid_losses = []
valid_preds = []
valid_labels = []
for i, (data, labels) in enumerate(train_loader):
optimizer.zero_grad()
outputs = model(data)
loss = train_loss(outputs, labels).mean()
loss.backward()
optimizer.step()
train_losses.append(loss.item())
model.eval()
with torch.no_grad():
for i, (data, labels) in enumerate(valid_loader):
outputs = model(data)
loss = cross_entropy_loss(outputs, labels).mean()
valid_losses.append(loss.item())
valid_preds.append(torch.nn.functional.softmax(outputs, dim=1).numpy()[:, 1])
valid_labels.append(labels.numpy())
mean_train_losses.append(np.mean(train_losses))
mean_valid_losses.append(np.mean(valid_losses))
acc = accuracy_score(np.concatenate(valid_labels), np.concatenate(valid_preds) > 0.5)
accs.append(acc)
print('epoch : {}, train loss : {:.4f}, valid loss : {:.4f}, valid acc : {:.2f}'
.format(epoch + 1, np.mean(train_losses), np.mean(valid_losses), acc))
return model, pd.DataFrame(list(zip(mean_train_losses, mean_valid_losses, accs)),
columns=["mean_train_losses", "mean_valid_losses", "accs"])
def irbl(trusted, untrusted, test, ft, fu, optimizer, batch_size, epochs, lr, weight_decay, hidden_size):
input_size = len(train[0][0])
num_classes = 2
if hidden_size == 0:
model = LogisticRegression(input_size, num_classes)
else:
model = MLP(input_size, hidden_size, num_classes)
if optimizer == "adam":
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
elif optimizer == "sgd":
optimizer = torch.optim.SGD(model.parameters(), lr=lr, weight_decay=weight_decay)
cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction="none")
valid_loader = torch.utils.data.DataLoader(dataset=test,
batch_size=batch_size,
shuffle=True,
num_workers=1,
drop_last=False)
mean_train_losses = []
mean_valid_losses = []
accs = []
#predict the beta as you go in the last training loop if your data can't fit in memory
total_data = torch.from_numpy(untrusted[:][0])
total_labels = torch.from_numpy(untrusted[:][1])
if hasattr(ft, "predict_proba") and hasattr(fu, "predict_proba"):
ft_proba = np.take_along_axis(ft.predict_proba(total_data.numpy()),
total_labels.numpy().reshape(-1, 1), axis=1).flatten()
fu_proba = np.take_along_axis(fu.predict_proba(total_data.numpy()),
total_labels.numpy().reshape(-1, 1), axis=1).flatten()
beta = np.divide(ft_proba,
fu_proba,
out=np.zeros_like((total_labels.numpy()), dtype=float),
where=fu_proba != 0)
beta = torch.from_numpy(beta).float()
else:
ft_proba = torch.flatten(torch.gather(torch.nn.functional.softmax(
ft(total_data), dim=1), 1, total_labels.view(-1, 1)))
fu_proba = torch.flatten(torch.gather(torch.nn.functional.softmax(
fu(total_data), dim=1), 1, total_labels.view(-1, 1)))
beta = torch.div(ft_proba,
fu_proba)
beta[torch.isnan(beta)] = 0.0
beta[torch.isinf(beta)] = 0.0
total_beta = torch.cat([torch.ones(len(trusted)), beta]).detach()
total_loader = torch.utils.data.DataLoader(dataset=WeightedDataset(MergedDataset(trusted, untrusted), total_beta),
batch_size=batch_size,
shuffle=True,
num_workers=1,
drop_last=False)
for epoch in range(epochs):
model.train()
train_losses = []
valid_losses = []
valid_preds = []
valid_labels = []
for i, ((data, weights), (labels, is_corrupteds)) in enumerate(total_loader):
optimizer.zero_grad()
outputs = model(data)
loss = (cross_entropy_loss(outputs, labels) * weights).mean()
loss.backward()
optimizer.step()
train_losses.append(loss.item())
model.eval()
with torch.no_grad():
for i, (data, labels) in enumerate(valid_loader):
outputs = model(data)
loss = cross_entropy_loss(outputs, labels).mean()
valid_losses.append(loss.item())
valid_preds.append(torch.nn.functional.softmax(outputs, dim=1).float().numpy()[:, 1])
valid_labels.append(labels.numpy())
mean_train_losses.append(np.mean(train_losses))
mean_valid_losses.append(np.mean(valid_losses))
acc = accuracy_score(np.concatenate(valid_labels), np.concatenate(valid_preds) > 0.5)
accs.append(acc)
print('epoch : {}, train loss : {:.4f}, valid loss : {:.4f}, valid acc : {:.2f}'
.format(epoch + 1, np.mean(train_losses), np.mean(valid_losses), acc))
return model, pd.DataFrame(list(zip(mean_train_losses, mean_valid_losses, accs)),
columns=["mean_train_losses", "mean_valid_losses", "accs"]), pd.Series(total_beta.detach().numpy())
def normal_sklearn(train, test, estimator, calibration_method="isotonic", sample_weight=None):
X_train = train[:][0]
y_train = train[:][1]
X_test = test[:][0]
y_test = test[:][1]
if calibration_method == "nothing":
model = clone(estimator).fit(X_train, y_train)
elif calibration_method == "isotonic":
model = CalibratedClassifierCV(estimator, method=calibration_method).fit(
X_train, y_train, sample_weight=sample_weight)
acc = accuracy_score(y_test, model.predict(X_test))
print('valid acc : {:.2f}'
.format(acc))
return model, pd.DataFrame([[acc]],
columns=["acc"])
def kdr(trusted, untrusted, test, optimizer, batch_size, epochs, lr, weight_decay, hidden_size):
input_size = len(trusted[0][0])
num_classes = 2
if hidden_size == 0:
model = LogisticRegression(input_size, num_classes)
else:
model = MLP(input_size, hidden_size, num_classes)
if optimizer == "adam":
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
elif optimizer == "sgd":
optimizer = torch.optim.SGD(model.parameters(), lr=lr, weight_decay=weight_decay)
cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction="none")
valid_loader = torch.utils.data.DataLoader(dataset=test,
batch_size=batch_size,
shuffle=True,
num_workers=1,
drop_last=False)
mean_train_losses = []
mean_valid_losses = []
accs = []
#predict the beta as you go in the last training loop if your data can't fit in memory
X_trusted = torch.from_numpy(trusted[:][0])
y_trusted = torch.from_numpy(trusted[:][1])
X_untrusted = torch.from_numpy(untrusted[:][0])
y_untrusted = torch.from_numpy(untrusted[:][1])
n_samples_trusted, _ = X_trusted.shape
n_samples_untrusted, _ = X_untrusted.shape
beta = torch.zeros(n_samples_untrusted)
prior = torch.true_divide(torch.true_divide(torch.bincount(y_trusted),n_samples_trusted),torch.true_divide(torch.bincount(y_untrusted),n_samples_untrusted))
for i in range(num_classes):
X_trusted_i = X_trusted[y_trusted == i]
X_untrusted_i = X_untrusted[y_untrusted == i]
n_trusted_i, _ = X_trusted_i.shape
n_untrusted_i, _ = X_untrusted_i.shape
s_trusted_i = torch.ones(n_trusted_i).float()
s_untrusted_i = torch.zeros(n_untrusted_i).float()
ratio = n_untrusted_i/n_trusted_i
lr,_ = normal_sklearn(
XYDataset(np.vstack((X_trusted_i,X_untrusted_i)),np.hstack((s_trusted_i,s_untrusted_i))), test, NeuralNetBinaryClassifier(
module=BinaryLogisticRegression,
module__input_size=input_size,
max_epochs=epochs,
train_split=None,
lr=learning_rate,
batch_size=batch_size,
optimizer__weight_decay=weight_decay,
iterator_train__shuffle=True),calibration_method="nothing")
ratio = ratio * torch.exp(lr.forward(X_untrusted_i))
beta[y_untrusted == i] = ratio * prior[i]
total_beta = torch.cat([torch.ones(len(trusted)), beta]).detach()
total_loader = torch.utils.data.DataLoader(dataset=WeightedDataset(MergedDataset(trusted, untrusted), total_beta),
batch_size=batch_size,
shuffle=True,
num_workers=1,
drop_last=False)
for epoch in range(epochs):
model.train()
train_losses = []
valid_losses = []
valid_preds = []
valid_labels = []
for i, ((data, weights), (labels, is_corrupteds)) in enumerate(total_loader):
optimizer.zero_grad()
outputs = model(data)
loss = (cross_entropy_loss(outputs, labels) * weights).mean()
loss.backward()
optimizer.step()
train_losses.append(loss.item())
model.eval()
with torch.no_grad():
for i, (data, labels) in enumerate(valid_loader):
outputs = model(data)
loss = cross_entropy_loss(outputs, labels).mean()
valid_losses.append(loss.item())
valid_preds.append(torch.nn.functional.softmax(outputs, dim=1).float().numpy()[:, 1])
valid_labels.append(labels.numpy())
mean_train_losses.append(np.mean(train_losses))
mean_valid_losses.append(np.mean(valid_losses))
acc = accuracy_score(np.concatenate(valid_labels), np.concatenate(valid_preds) > 0.5)
accs.append(acc)
print('epoch : {}, train loss : {:.4f}, valid loss : {:.4f}, valid acc : {:.2f}'
.format(epoch + 1, np.mean(train_losses), np.mean(valid_losses), acc))
return model, pd.DataFrame(list(zip(mean_train_losses, mean_valid_losses, accs)),
columns=["mean_train_losses", "mean_valid_losses", "accs"]), pd.Series(total_beta.detach().numpy())
def glc(trusted, untrusted, test, fu, optimizer, batch_size, epochs, lr, weight_decay, hidden_size):
input_size = len(trusted[0][0])
num_classes = int(max(test[:][1]) + 1)
cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction="none")
nll_loss = torch.nn.NLLLoss(reduction="none")
trusted_loader = torch.utils.data.DataLoader(dataset=trusted,
batch_size=beta_batch_size[0],
shuffle=True,
num_workers=1,
drop_last=False)
total_loader = torch.utils.data.DataLoader(dataset=MergedDataset(trusted, untrusted),
batch_size=batch_size,
shuffle=True,
num_workers=1,
drop_last=False)
valid_loader = torch.utils.data.DataLoader(dataset=test,
batch_size=batch_size,
shuffle=True,
num_workers=1,
drop_last=False)
if hidden_size == 0:
model = LogisticRegression(input_size, num_classes)
else:
model = MLP(input_size, hidden_size, num_classes)
if optimizer == "adam":
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
elif optimizer == "sgd":
optimizer = torch.optim.SGD(model.parameters(), lr=lr, weight_decay=weight_decay)
mean_train_losses = []
mean_valid_losses = []
accs = []
C = torch.zeros((num_classes, num_classes))
for k in range(num_classes):
num_examples = 0
for i, (data, labels) in enumerate(trusted_loader):
data_k = data[labels.numpy() == k]
num_examples += len(data_k)
if hasattr(fu, "predict_proba"):
if not len(data_k.numpy()) == 0:
C[k] += np.sum(fu.predict_proba(data_k.numpy()), axis=0)
else:
C[k] += torch.sum(torch.nn.functional.softmax(fu(data_k), dim=1), axis=0)
if num_examples == 0:
C[k] = torch.ones(num_classes) / num_classes
else:
C[k] = C[k] / num_examples
C = C.detach()
print(C)
print(C.t())
for epoch in range(epochs):
model.train()
train_losses = []
valid_losses = []
valid_preds = []
valid_labels = []
for i, (data, (labels, is_corrupteds)) in enumerate(total_loader):
optimizer.zero_grad()
outputs = model(data)
loss_trusted = (cross_entropy_loss(outputs, labels) * (1 - is_corrupteds)).sum()
loss_untrusted = (nll_loss(torch.log(torch.matmul(torch.nn.functional.softmax(outputs, dim=1), C)),
labels) * is_corrupteds).sum()
loss = (loss_trusted + loss_untrusted) / len(data)
loss.backward()
optimizer.step()
train_losses.append(loss.item())
model.eval()
with torch.no_grad():
for i, (data, labels) in enumerate(valid_loader):
outputs = model(data)
loss = cross_entropy_loss(outputs, labels).mean()
valid_losses.append(loss.item())
valid_preds.append(torch.nn.functional.softmax(outputs, dim=1).numpy()[:, 1])
valid_labels.append(labels.numpy())
mean_train_losses.append(np.mean(train_losses))
mean_valid_losses.append(np.mean(valid_losses))
acc = accuracy_score(np.concatenate(valid_labels), np.concatenate(valid_preds) > 0.5)
accs.append(acc)
print('c : epoch : {}, train loss : {:.4f}, valid loss : {:.4f}, valid acc : {:.2f}'
.format(epoch + 1, np.mean(train_losses), np.mean(valid_losses), acc))
return model, pd.DataFrame(list(zip(mean_train_losses, mean_valid_losses, accs)),
columns=["mean_train_losses", "mean_valid_losses", "accs"]), pd.DataFrame(C)
def loop(dir, trusted, untrusted, test, optimizer, beta_batch_size, batch_size, beta_epochs, epochs,
beta_learning_rate, learning_rate, beta_weight_decay, weight_decay, beta_hidden_size, hidden_size, calibration_method):
print("ft-torched")
ft_torched, ft_torched_data = normal(
trusted, test, optimizer, beta_batch_size[0], beta_epochs[0], beta_learning_rate[0], beta_weight_decay[0], beta_hidden_size[0])
ft_torched_data.to_csv("{}/ft-torched-perfs.csv".format(dir), index=False)
print("fu-torched")
fu_torched, fu_torched_data = normal(
untrusted, test, optimizer, beta_batch_size[1], beta_epochs[1], beta_learning_rate[1], beta_weight_decay[1], beta_hidden_size[1])
fu_torched_data.to_csv("{}/fu-torched-perfs.csv".format(dir), index=False)
print("full-torched")
full_torched, full_torched_data, full_torched_beta = irbl(
trusted, untrusted, test, ft_torched, fu_torched, optimizer, batch_size, epochs, learning_rate, weight_decay, hidden_size)
full_torched_data.to_csv("{}/full-torched-perfs.csv".format(dir), index=False)
full_torched_beta.to_csv("{}/full-torched-beta.csv".format(dir), index=False, header=False)
print("ft-calibrated")
ft_torched_calibrated, ft_torched_calibrated_data = normal_sklearn(
trusted, test, NeuralNetClassifier(
module=LogisticRegressionSoftmax,
module__input_size=len(trusted[0][0]),
module__output_size=2,
max_epochs=beta_epochs[0],
train_split=None,
lr=beta_learning_rate[0],
batch_size=beta_batch_size[0],
optimizer__weight_decay=beta_weight_decay[0],
iterator_train__shuffle=True,
verbose=0))
ft_torched_calibrated_data.to_csv("{}/ft-torched-calibrated-perfs.csv".format(dir), index=False)
print("fu-calibrated")
fu_torched_calibrated, fu_torched_calibrated_data = normal_sklearn(
untrusted, test, NeuralNetClassifier(
module=LogisticRegressionSoftmax,
module__input_size=len(trusted[0][0]),
module__output_size=2,
max_epochs=beta_epochs[1],
train_split=None,
lr=beta_learning_rate[1],
batch_size=beta_batch_size[1],
optimizer__weight_decay=beta_weight_decay[1],
iterator_train__shuffle=True,
verbose=0)
)
fu_torched_calibrated_data.to_csv("{}/fu-torched-calibrated-perfs.csv".format(dir), index=False)
print("full-calibrated")
full_torched_calibrated, full_torched_data_calibrated, full_torched_beta_calibrated = irbl(
trusted, untrusted, test, ft_torched_calibrated, fu_torched_calibrated, optimizer, batch_size, epochs, learning_rate, weight_decay, hidden_size)
full_torched_data_calibrated.to_csv("{}/full-torched-calibrated-perfs.csv".format(dir), index=False)
full_torched_beta_calibrated.to_csv("{}/full-torched-calibrated-beta.csv".format(dir), index=False, header=False)
print("glc")
_, glc_data, C = glc(
trusted, untrusted, test, fu_torched, optimizer, batch_size, epochs, learning_rate, weight_decay, hidden_size)
glc_data.to_csv("{}/glc-perfs.csv".format(dir), index=False)
C.to_csv("{}/glc-beta.csv".format(dir), index=False, header=False)
print("mixed")
_, mixed_data = normal(torch.utils.data.ConcatDataset([trusted, untrusted]), test, optimizer, batch_size, epochs,
learning_rate, weight_decay, hidden_size)
mixed_data.to_csv("{}/mixed-perfs.csv".format(dir), index=False)
print("symetric")
_, symetric_data = normal(torch.utils.data.ConcatDataset([trusted, untrusted]), test, optimizer, batch_size, epochs,
learning_rate, weight_decay, hidden_size, loss="unhinged")
symetric_data.to_csv("{}/symetric-perfs.csv".format(dir), index=False)
print("kdr")
_, kdrnc_data, kdrnc_beta = kdr(
trusted, untrusted, test, optimizer, batch_size, epochs, learning_rate, weight_decay, hidden_size)
kdrnc_data.to_csv("{}/kdr-perfs.csv".format(dir), index=False)
kdrnc_beta.to_csv("{}/kdr-beta.csv".format(dir), index=False, header=False)
return
def learning_curve_plot(figdir, resdir, name, p, q, criteria):
total = pd.read_csv("{}/{}/total-perfs.csv".format(resdir, name))
figures_directory = "{}/{}-{}-{}".format(figdir, name, p, q)
if not os.path.exists(figures_directory):
os.makedirs(figures_directory)
results_directory = "{}/{}-{}-{}".format(resdir, name, p, q)
ftt = pd.read_csv("{}/ft-torched-perfs.csv".format(results_directory))
fut = pd.read_csv("{}/fu-torched-perfs.csv".format(results_directory))
bt = pd.read_csv("{}/full-torched-perfs.csv".format(results_directory))
btc = pd.read_csv("{}/full-torched-calibrated-perfs.csv".format(results_directory))
mixed = pd.read_csv("{}/mixed-perfs.csv".format(results_directory))
glc = pd.read_csv("{}/glc-perfs.csv".format(results_directory))
symetric = pd.read_csv("{}/symetric-perfs.csv".format(results_directory))
if criteria == "mean_valid_losse":
ftt_error = ftt[criteria + "s"]
fut_error = fut[criteria + "s"]
bt_error = bt[criteria + "s"]
btc_error = btc[criteria + "s"]
mixed_error = mixed[criteria + "s"]
glc_error = glc[criteria + "s"]
symetric_error = symetric[criteria + "s"]
total_error = total[criteria + "s"]
else:
ftt_error = 1 - ftt[criteria + "s"]
fut_error = 1 - fut[criteria + "s"]
bt_error = 1 - bt[criteria + "s"]
btc_error = 1 - btc[criteria + "s"]
mixed_error = 1 - mixed[criteria + "s"]
glc_error = 1 - glc[criteria + "s"]
symetric_error = 1 - symetric[criteria + "s"]
total_error = 1 - total[criteria + "s"]
fig, ax = plt.subplots()
ax.set_xlabel("epochs")
ax.set_xticks(range(len(ftt_error)))
ax.set_xticklabels(range(1,len(ftt_error)+1))
ax.set_ylabel("error")
ax.plot(ftt_error, label='trusted')
ax.plot(fut_error, label='untrtusted')
ax.plot(bt_error, label='irbl')
ax.plot(btc_error, label='irblc')
ax.plot(mixed_error, label='mixed')
ax.plot(glc_error, label='glc')
ax.plot(symetric_error, label='symmetric')
ax.plot(total_error, label='total')
ax.legend()
fig.savefig("{}/learning-curve-{}.pdf".format(figures_directory, criteria), bbox = 'tight', bbox_inches="tight", format="pdf")
plt.close(fig)
fig, ax = plt.subplots(figsize=(8,4))
ax.set_xlabel("epochs")
ax.set_xticks(range(len(ftt_error)))
ax.set_xticklabels(range(1,len(ftt_error)+1))
ax.set_ylabel("loss")
ax.plot(btc_error, label='irbl', color="black")
ax.plot(total_error, label='total', color="black", linestyle="--")
ax.plot(mixed_error, label='mixed', color="black", linestyle="-.")
ax.plot(ftt_error, label='trusted', color="black", linestyle=":")
ax.plot(fut_error, label='untrusted', color="black", linestyle="--",marker=".")
ax.legend(loc = 'upper right')
fig.savefig("{}/learning-curve-simple-{}.pdf".format(figures_directory, criteria), bbox = 'tight', bbox_inches="tight", format="pdf")
plt.close(fig)
fig, ax = plt.subplots(figsize=(8,4))
ax.set_xlabel("epochs")
ax.set_xticks(range(len(ftt_error)))
ax.set_xticklabels(range(1,len(ftt_error)+1))
ax.set_ylabel("loss")
ax.plot(btc_error, label='irbl', color="black")
ax.plot(total_error, label='total', color="black", linestyle="--")
ax.plot(glc_error, label='glc', color="black", linestyle="-.")
ax.plot(symetric_error, label='rll', color="black", linestyle=":")
ax.legend(loc = 'upper right')
fig.savefig("{}/learning-curve-competitors-{}.pdf".format(figures_directory, criteria), bbox = 'tight', bbox_inches="tight", format="pdf")
plt.close(fig)
def hist_plot(figdir, resdir, name, p, q):
figures_directory = "{}/{}-{}-{}".format(figdir, name, p, q)
if not os.path.exists(figures_directory):
os.makedirs(figures_directory)
results_directory = "{}/{}-{}-{}".format(resdir, name, p, q)
flipped = pd.read_csv("{}/flipped.csv".format(results_directory)).to_numpy().flatten()
bt = pd.read_csv("{}/full-torched-beta.csv".format(results_directory)).to_numpy().flatten()
btc = pd.read_csv("{}/full-torched-calibrated-beta.csv".format(results_directory)).to_numpy().flatten()
kdr = pd.read_csv("{}/kdr-beta.csv".format(results_directory)).to_numpy().flatten()
fig, ax = plt.subplots(figsize=(8,4))
ax.hist([bt[flipped == 0], bt[flipped == 1]], label=["cleaned", "corrupted"], bins=20, color = ["lightgray","dimgray",])
ax.legend(loc = 'upper right')
fig.savefig("{}/full-torched-hist.pdf".format(figures_directory), bbox = 'tight', bbox_inches="tight", format="pdf")
plt.close(fig)
fig1, ax1 = plt.subplots(figsize=(8,4))
ax1.hist([btc[flipped == 0], btc[flipped == 1]], #btc[flipped == 2]],
label=["cleaned", "corrupted"], bins=20, color = ["lightgray","dimgray",])
ax1.legend(loc = 'upper right')
fig1.savefig("{}/full-torched-calibrated-hist.pdf".format(figures_directory), bbox = 'tight', bbox_inches="tight", format="pdf")
plt.close(fig1)
fig2, ax2 = plt.subplots(figsize=(8,4))
ax2.hist([kdr[flipped == 0], kdr[flipped == 1]], #kdr[flipped == 2]],
label=["cleaned", "corrupted"], bins=20, color = ["lightgray","dimgray",])
ax2.legend(loc = 'upper right')
fig2.savefig("{}/kdr-hist.pdf".format(figures_directory), bbox = 'tight', bbox_inches="tight", format="pdf")
plt.close(fig2)
return
def box_plot2(figdir, resdir, name, p, qs):
figures_directory = "{}/{}-{}".format(figdir, name, p)
if not os.path.exists(figures_directory):
os.makedirs(figures_directory)
bt_list = []
bt_f_list = []
bt_t_list = []
btc_list = []
btc_f_list = []
btc_t_list = []
kdr_list = []
kdr_f_list = []
kdr_t_list = []
for q_idx, q in enumerate(qs):
results_directory = "{}/{}-{}-{}".format(resdir, name, p, q)
flipped = pd.read_csv("{}/flipped.csv".format(results_directory)).to_numpy().flatten()
bt = pd.read_csv("{}/full-torched-beta.csv".format(results_directory)).to_numpy().flatten()
btc = pd.read_csv("{}/full-torched-calibrated-beta.csv".format(results_directory)).to_numpy().flatten()
kdr = pd.read_csv("{}/kdr-beta.csv".format(results_directory)).to_numpy().flatten()
bt_list.append(bt[flipped == 0])
bt_f_list.append(bt[flipped == 1])
bt_t_list.append(bt[flipped == 2])
btc_list.append(btc[flipped == 0])
btc_f_list.append(btc[flipped == 1])
btc_t_list.append(btc[flipped == 2])
kdr_list.append(kdr[flipped == 0])
kdr_f_list.append(kdr[flipped == 1])
kdr_t_list.append(kdr[flipped == 2])
c = 'lightgray'
c0_dict = {
'patch_artist': True,
'boxprops': dict(facecolor=c,color="black"),
'capprops': dict(color="black"),
'flierprops': dict(color="black"),
'medianprops': dict(color="black"),
'whiskerprops': dict(color="black")}
c = 'dimgray'
c1_dict = {
'patch_artist': True,
'boxprops': dict(facecolor=c,color="black"),
'capprops': dict(color="black"),
'flierprops': dict(color="black"),
'medianprops': dict(color="black"),
'whiskerprops': dict(color="black")}
fig, ax = plt.subplots(figsize=(8,4))
ax.set_xlabel("q = 1-r")
bp1 = ax.boxplot(bt_list[::-1], showfliers=False, labels=sorted(qs), **c0_dict)
bp2 = ax.boxplot(bt_f_list[::-1], showfliers=False, labels=sorted(qs), **c1_dict)
ax.legend([bp1["boxes"][0], bp2["boxes"][0]], ['cleaned', 'corrupted'], loc='upper right')
fig.savefig("{}/full-torched-box-plot-2.pdf".format(figures_directory), bbox = 'tight', bbox_inches="tight", format="pdf")
plt.close(fig)
fig2, ax2 = plt.subplots(figsize=(8,4))
ax2.set_xlabel("q = 1-r")
bp1 = ax2.boxplot(btc_list[::-1], showfliers=False, labels=sorted(qs), **c0_dict)
bp2 = ax2.boxplot(btc_f_list[::-1], showfliers=False, labels=sorted(qs), **c1_dict)
ax2.legend([bp1["boxes"][0], bp2["boxes"][0]], ['cleaned', 'corrupted'], loc='upper right')
fig2.savefig("{}/full-torched-calibrated-box-plot-2.pdf".format(figures_directory), bbox = 'tight', bbox_inches="tight", format="pdf")
plt.close(fig2)
fig3, ax3 = plt.subplots(figsize=(8,4))
ax3.set_xlabel("q = 1-r")
bp1 = ax3.boxplot(kdr_list[::-1], showfliers=False, labels=sorted(qs), **c0_dict)
bp2 = ax3.boxplot(kdr_f_list[::-1], showfliers=False, labels=sorted(qs), **c1_dict)
ax3.legend([bp1["boxes"][0], bp2["boxes"][0]], ['cleaned', 'corrupted'], loc='upper right')
fig3.savefig("{}/kdr-box-plot-2.pdf".format(figures_directory), bbox = 'tight', bbox_inches="tight", format="pdf")
plt.close(fig3)
return
def mean_area_under_error_curve(resdir, criteria):
results = pd.read_csv("{}/results-{}.csv".format(resdir,criteria))
agg = results.groupby(["p", "name"]).agg(list).reset_index()
agg["area_under_error_curve_trusted"] = agg.apply(lambda row: np.trapz(row["trusted"]), axis=1)
agg["area_under_error_curve_untrusted"] = agg.apply(lambda row: np.trapz(row["untrusted"]), axis=1)
agg["area_under_error_curve_irbl"] = agg.apply(lambda row: np.trapz(row["irbl"]), axis=1)
agg["area_under_error_curve_irblc"] = agg.apply(lambda row: np.trapz(row["irblc"]), axis=1)
agg["area_under_error_curve_glc"] = agg.apply(lambda row: np.trapz(row["glc"]), axis=1)
agg["area_under_error_curve_mixed"] = agg.apply(lambda row: np.trapz(row["mixed"]), axis=1)
agg["area_under_error_curve_symetric"] = agg.apply(lambda row: np.trapz(row["symetric"]), axis=1)
agg["area_under_error_curve_total"] = agg.apply(lambda row: np.trapz(row["total"]), axis=1)
final = agg.groupby("p").mean().reset_index()
final.to_csv("{}/area-{}.csv".format(resdir,criteria), index=False)
def wilcoxon_area_under_error_curve(resdir, criteria):
results = pd.read_csv("{}/results-{}.csv".format(resdir,criteria))
agg = results.groupby(["p", "name"]).agg(list).reset_index()
agg["area_under_error_curve_trusted"] = agg.apply(lambda row: np.trapz(row["trusted"]), axis=1)
agg["area_under_error_curve_untrusted"] = agg.apply(lambda row: np.trapz(row["untrusted"]), axis=1)
agg["area_under_error_curve_irbl"] = agg.apply(lambda row: np.trapz(row["irbl"]), axis=1)
agg["area_under_error_curve_irblc"] = agg.apply(lambda row: np.trapz(row["irblc"]), axis=1)
agg["area_under_error_curve_glc"] = agg.apply(lambda row: np.trapz(row["glc"]), axis=1)
agg["area_under_error_curve_mixed"] = agg.apply(lambda row: np.trapz(row["mixed"]), axis=1)
agg["area_under_error_curve_symetric"] = agg.apply(lambda row: np.trapz(row["symetric"]), axis=1)
agg["area_under_error_curve_total"] = agg.apply(lambda row: np.trapz(row["total"]), axis=1)
agg = agg.drop(
["q", "trusted", "untrusted", "irbl", "irblc", "mixed", "glc", "symetric", "total"], axis=1)
final = agg.groupby("p").agg(list).reset_index()
final[["area_under_error_curve_irblc_glc_score","area_under_error_curve_irblc_glc_hypothesis"]] = pd.DataFrame(
final.apply(lambda row: wilcoxon_test(row["area_under_error_curve_irblc"], row["area_under_error_curve_glc"]), axis=1).values.tolist())
final[["area_under_error_curve_irblc_trusted_score","area_under_error_curve_irblc_trusted_hypothesis"]] = pd.DataFrame(
final.apply(lambda row: wilcoxon_test(row["area_under_error_curve_irblc"], row["area_under_error_curve_trusted"]), axis=1).values.tolist())
final[["area_under_error_curve_irblc_untrusted_score","area_under_error_curve_irblc_untrusted_hypothesis"]] = pd.DataFrame(
final.apply(lambda row: wilcoxon_test(row["area_under_error_curve_irblc"], row["area_under_error_curve_untrusted"]), axis=1).values.tolist())
final[["area_under_error_curve_irblc_mixed_score","area_under_error_curve_irblc_mixed_hypothesis"]] = pd.DataFrame(
final.apply(lambda row: wilcoxon_test(row["area_under_error_curve_irblc"], row["area_under_error_curve_mixed"]), axis=1).values.tolist())
final[["area_under_error_curve_irblc_symetric_score","area_under_error_curve_irblc_symetric_hypothesis"]] = pd.DataFrame(
final.apply(lambda row: wilcoxon_test(row["area_under_error_curve_irblc"], row["area_under_error_curve_symetric"]), axis=1).values.tolist())
final[["area_under_error_curve_irblc_total_score","area_under_error_curve_irblc_total_hypothesis"]] = pd.DataFrame(
final.apply(lambda row: wilcoxon_test(row["area_under_error_curve_irblc"], row["area_under_error_curve_total"]), axis=1).values.tolist())
final = final.drop(
["name", "area_under_error_curve_trusted", "area_under_error_curve_untrusted", "area_under_error_curve_irbl",
"area_under_error_curve_irblc", "area_under_error_curve_glc", "area_under_error_curve_mixed", "area_under_error_curve_symetric",
"area_under_error_curve_total"], axis=1)
final.to_csv("{}/wilcoxon-area-{}.csv".format(resdir,criteria), index=False)
def error_curve_plot(figdir, resdir, name, p, qs, criteria):
figures_directory = "{}/{}-{}".format(figdir, name, p)
if not os.path.exists(figures_directory):
os.makedirs(figures_directory)
res = pd.read_csv("{}/results-{}.csv".format(resdir,criteria))
res = res[(res["name"] == name) & (res["p"]==p) & (res["q"].isin(qs))]
fig, ax = plt.subplots()
ax.set_xlabel("q = 1-r")
ax.set_xticks(range(len(qs)))
ax.set_xticklabels(qs)
ax.set_ylabel("error")
ax.plot(res["trusted"], label='trusted')
ax.plot(res["untrusted"], label='untrusted')
ax.plot(res["irbl"], label='irbl')
ax.plot(res["irblc"], label='irblc')
ax.plot(res["mixed"], label='mixed')
ax.plot(res["glc"], label='glc')
ax.plot(res["symetric"], label='symmetric')
ax.plot(res["total"], label='total')
ax.legend()
fig.savefig("{}/error-curve-{}.pdf".format(figures_directory, criteria), bbox = 'tight', bbox_inches="tight", format="pdf")
plt.close(fig)
fig, ax = plt.subplots(figsize=(8,4))
ax.set_xlabel("q = 1-r")
ax.set_xticks(range(len(qs)))
ax.set_xticklabels(sorted(qs))
ax.set_ylabel("error")
ax.plot(res["irblc"].values[::-1], label='irbl', color="black")
ax.plot(res["total"].values[::-1], label='total', color="black", linestyle="--")
ax.plot(res["mixed"].values[::-1], label='mixed', color="black", linestyle="-.")
ax.plot(res["trusted"].values[::-1], label='trusted', color="black", linestyle=":")
ax.plot(res["untrusted"].values[::-1], label='untrusted', color="black", linestyle="--",marker=".")
ax.legend(loc = 'upper right')
fig.savefig("{}/error-curve-simple-{}.pdf".format(figures_directory, criteria), bbox = 'tight', bbox_inches="tight", format="pdf")
plt.close(fig)
fig, ax = plt.subplots(figsize=(8,4))
ax.set_xlabel("q = 1-r")
ax.set_xticks(range(len(qs)))
ax.set_xticklabels(sorted(qs))
ax.set_ylabel("error")
ax.plot(res["irblc"].values[::-1], label='irbl', color="black")
ax.plot(res["total"].values[::-1], label='total', color="black", linestyle="--")
ax.plot(res["glc"].values[::-1], label='glc', color="black", linestyle="-.")
ax.plot(res["symetric"].values[::-1], label='rll', color="black", linestyle=":")
ax.legend(loc = 'upper right')
fig.savefig("{}/error-curve-competitors-{}.pdf".format(figures_directory, criteria), bbox = 'tight', bbox_inches="tight", format="pdf")
plt.close(fig)
def generate_results(resdir, names, ps, qs, criteria):
ftt_error_list = []
fut_error_list = []
bt_error_list = []
btc_error_list = []
mixed_error_list = []
glc_error_list = []
symetric_error_list = []
total_error_list = []
kdr_error_list = []
name_list = []
p_list = []
q_list = []
for _, name in enumerate(names):
for _, p in enumerate(ps):
for _, q in enumerate(qs):
complete_resdir = "{}/{}-{}-{}".format(resdir, name, p, q)
bt = pd.read_csv("{}/full-torched-perfs.csv".format(complete_resdir))
ftt = pd.read_csv("{}/ft-torched-perfs.csv".format(complete_resdir))
fut = pd.read_csv("{}/fu-torched-perfs.csv".format(complete_resdir))
btc = pd.read_csv("{}/full-torched-calibrated-perfs.csv".format(complete_resdir))
mixed = pd.read_csv("{}/mixed-perfs.csv".format(complete_resdir))
glc = pd.read_csv("{}/glc-perfs.csv".format(complete_resdir))
symetric = pd.read_csv("{}/symetric-perfs.csv".format(complete_resdir))
total = pd.read_csv("{}/{}/total-perfs.csv".format(resdir, name))
kdr = pd.read_csv("{}/kdr-perfs.csv".format(complete_resdir))
if criteria == "mean_valid_losse":
ftt_error = np.min(ftt[criteria + "s"])
fut_error = np.min(fut[criteria + "s"])
bt_error = np.min(bt[criteria + "s"])
btc_error = np.min(btc[criteria + "s"])
mixed_error = np.min(mixed[criteria + "s"])
glc_error = np.min(glc[criteria + "s"])
symetric_error = np.min(symetric[criteria + "s"])
total_error = np.min(total[criteria + "s"])
kdr_error = np.min(kdr[criteria + "s"])
else:
ftt_error = np.min(1 - ftt[criteria + "s"])
fut_error = np.min(1 - fut[criteria + "s"])
bt_error = np.min(1 - bt[criteria + "s"])
btc_error = np.min(1 - btc[criteria + "s"])
mixed_error = np.min(1 - mixed[criteria + "s"])
glc_error = np.min(1 - glc[criteria + "s"])
symetric_error = np.min(1 - symetric[criteria + "s"])
total_error = np.min(1 - total[criteria + "s"])
kdr_error = np.min(1 - kdr[criteria + "s"])
ftt_error_list.append(ftt_error)
fut_error_list.append(fut_error)
bt_error_list.append(bt_error)
btc_error_list.append(btc_error)
mixed_error_list.append(mixed_error)
glc_error_list.append(glc_error)
symetric_error_list.append(symetric_error)
total_error_list.append(total_error)
kdr_error_list.append(kdr_error)
name_list.append(name)
p_list.append(p)
q_list.append(q)
res = pd.DataFrame(list(zip(name_list, p_list, q_list, kdr_error_list)),
columns=["name", "p", "q", "kdr"])
res.to_csv("{}/results-{}.csv".format(resdir,criteria), index=False)
def generate_wilcoxon(resdir,criteria):
results = pd.read_csv("{}/results-{}.csv".format(resdir,criteria))
agg = results.groupby(["p", "q"]).agg(list).reset_index()
agg[["irblc_glc_score", "irblc_glc_hypothesis"]] = pd.DataFrame(
agg.apply(lambda row: wilcoxon_test(row["irblc"], row["glc"]), axis=1).values.tolist())
agg[["irblc_mixed_score", "irblc_mixed_hypothesis"]] = pd.DataFrame(
agg.apply(lambda row: wilcoxon_test(row["irblc"], row["mixed"]), axis=1).values.tolist())
agg[["irblc_trusted_score", "irblc_trusted_hypothesis"]] = pd.DataFrame(
agg.apply(lambda row: wilcoxon_test(row["irblc"], row["trusted"]), axis=1).values.tolist())
agg[["irblc_untrusted_score", "irblc_untrusted_hypothesis"]] = pd.DataFrame(
agg.apply(lambda row: wilcoxon_test(row["irblc"], row["untrusted"]), axis=1).values.tolist())
agg[["irblc_symetric_score", "irblc_symetric_hypothesis"]] = pd.DataFrame(
agg.apply(lambda row: wilcoxon_test(row["irblc"], row["symetric"]), axis=1).values.tolist())
agg[["irblc_total_score", "irblc_total_hypothesis"]] = pd.DataFrame(
agg.apply(lambda row: wilcoxon_test(row["irblc"], row["total"]), axis=1).values.tolist())
agg[["mixed_total_score", "mixed_total_hypothesis"]] = pd.DataFrame(
agg.apply(lambda row: wilcoxon_test(row["mixed"], row["total"]), axis=1).values.tolist())
agg[["irblc_kdr_score", "irblc_kdr_hypothesis"]] = pd.DataFrame(
agg.apply(lambda row: wilcoxon_test(row["irblc"], row["kdr"]), axis=1).values.tolist())
agg = agg.reindex(agg.irblc_glc_score.abs().sort_values(ascending=False).index).drop(
["name", "trusted", "untrusted", "irbl", "irblc", "mixed", "glc", "symetric", "total", "kdr"], axis=1)
agg.to_csv("{}/wilcoxon-{}.csv".format(resdir,criteria), index=False)
return ((agg["irblc_glc_score"]>1.96).sum(),(agg["irblc_glc_score"]<-1.96).sum())
def wilcoxon_plot(figdir, resdir, criteria, comp1, comp2):
agg = pd.read_csv("{}/wilcoxon-{}.csv".format(resdir,criteria))
ps_dict= {0.02:0,0.05:0.5,0.1:1,0.25:1.5}
ps = agg["p"].sort_values().unique()
qs = agg["q"].sort_values().unique()
score_col = "{}_{}_score".format(comp1,comp2)
scores = agg
fig, ax = plt.subplots(figsize=(5,2.5))
ties = scores[(scores[score_col]<1.96)&(scores[score_col]>-1.96)]
losses= scores[scores[score_col]<-1.96]
wins = scores[scores[score_col]>1.96]
ax.scatter(wins["q"],np.array([ps_dict[x] for x in wins["p"].values]),color="black",facecolor="white",label="win")
ax.scatter(ties["q"],np.array([ps_dict[x] for x in ties["p"].values]),color="black",marker=".",s=1,label="tie")
ax.scatter(losses["q"],np.array([ps_dict[x] for x in losses["p"].values]),color="black",label="loss")
ax.set_xlabel("q = 1-r")
ax.set_ylabel("p")
ax.set_xticks(qs)
ax.set_yticks(np.array([ps_dict[x] for x in ps]))
ax.set_xticklabels(qs)
ax.set_yticklabels(ps)
plt.tight_layout()
filename = "{}/wilcoxon-{}-{}-{}.pdf".format(figdir, criteria, comp1, comp2)
fig.savefig(filename, bbox='tight', bbox_inches="tight", format="pdf")
plt.close(fig)
optimizer = "sgd"
beta_batch_size = (24, 24)
batch_size = 24
beta_epochs = (20, 20)
epochs = 20
beta_learning_rate = (0.005, 0.005)
learning_rate = 0.005
beta_weight_decay = (1e-6, 1e-6)
weight_decay = 1e-6
beta_hidden_size = (0, 0)
hidden_size = 0
calibration_method = "isotonic"
dss = [
ad,
banknote,
ibn_sina,
eeg,
ijcnn1,
adult,
phishing,
spam,
musk,
australian,
diabetes,
breast,
german,
fourclass,
svmguide3,
svmguide1,
web,
hiva,
sylva,
zebra,
]
names = [
"ad",
"banknote",
"ibn_sina",
"eeg",
"ijcnn1",
"adult",
"phishing",
"spam",
"musk",
"australian",
"diabetes",
"breast",
"german",
"fourclass",
"svmguide3",
"svmguide1",
"web",
"hiva",
"sylva",
"zebra",
]
cr_kinds = [#noisy_completly_at_random,
noisy_not_at_random
]
cr_names = ["ncar",
#"nnar"
]
ps = [0.02,0.05,0.1,0.25]
qs = [1.0, 0.9, 0.8, 0.7,0.6, 0.5, 0.4,0.3,0.2,0.1, 0.0]
for cr_idx, cr_kind in enumerate(cr_kinds):
base_dir = cr_names[cr_idx]
for ds_idx, ds_lazy in enumerate(dss):
name = names[ds_idx]
print(name)
ds_dir = "{}/{}".format(base_dir, name)
if not os.path.exists(ds_dir):
os.makedirs(ds_dir)
dataset = ds_lazy()
train, test = split_scale_dataset(dataset, 0.2)
print("total")
total_model, total = normal(train, test, optimizer, batch_size, epochs, learning_rate, weight_decay, hidden_size)
total.to_csv("{}/total-perfs.csv".format(ds_dir), index=False)
for _, p in enumerate(ps):
trusted, untrusted = split_dataset(train, (1 - p))
for _, q in enumerate(qs):
print(name, p, q)
dir = "{}-{}-{}".format(ds_dir, p, q)
if not os.path.exists(dir):
os.makedirs(dir)
#corrupted = corrupt_dataset(untrusted, cr_kind, 1 - q)
#Use with NNAR
corrupted = corrupt_dataset(untrusted, lambda y,ratio: cr_kind(torch.nn.functional.softmax(total_model(torch.from_numpy(untrusted[:][0])),dim=1)[:,1].detach().numpy(),y,ratio), 1 - q)
print(np.sum(corrupted[:][1] != untrusted[:][1]) / len(corrupted[:][1] != untrusted[:][1]))
pd.Series(np.full(len(trusted), 2.0)).append(pd.Series(corrupted[:][1] != untrusted[:][1]).astype(int)).to_csv(
"{}/flipped.csv".format(dir), index=False, header=False)
loop(dir, trusted, corrupted, test, optimizer, beta_batch_size,
batch_size, beta_epochs, epochs, beta_learning_rate,
learning_rate, beta_weight_decay, weight_decay, beta_hidden_size, hidden_size, calibration_method)
hist_plot("{}-figures".format(base_dir), base_dir, name, p, q)
learning_curve_plot("{}-figures".format(base_dir), base_dir, name, p, q, "mean_valid_losse")
learning_curve_plot("{}-figures".format(base_dir), base_dir, name, p, q, "acc")
generate_results(base_dir, [name], [p], qs, "acc")
error_curve_plot("{}-figures".format(base_dir), base_dir, name, p, qs, "acc")
box_plot2("{}-figures".format(base_dir), base_dir, name, p, qs)
generate_results(base_dir, names, ps, qs, "acc")
generate_wilcoxon(base_dir, "acc")
wilcoxon_plot("{}-figures".format(base_dir),base_dir,"acc","irblc","glc")
wilcoxon_plot("{}-figures".format(base_dir),base_dir,"acc","irblc","mixed")
wilcoxon_plot("{}-figures".format(base_dir),base_dir,"acc","irblc","trusted")
wilcoxon_plot("{}-figures".format(base_dir),base_dir,"acc","irblc","untrusted")
wilcoxon_plot("{}-figures".format(base_dir),base_dir,"acc","irblc","total")
wilcoxon_plot("{}-figures".format(base_dir),base_dir,"acc","irblc","symetric")
wilcoxon_plot("{}-figures".format(base_dir),base_dir,"acc","irblc","kdr")
results = pd.read_csv("{}/results-{}.csv".format(base_dir,"acc"))
method_names = ["trusted","rll","irbl","glc","mixed"]
final = results.groupby(["name"]).sum().reset_index()
avranks = friedman_test(final["trusted"].values,final["symetric"].values,final["irblc"].values,final["glc"].values,
final["mixed"].values,reverse=False)[2]
cd = Orange.evaluation.compute_CD(avranks, 20)
Orange.evaluation.graph_ranks(avranks, method_names, cd=cd, width=6, textspace=1)
plt.savefig("{}/cd.pdf".format("{}-figures".format(base_dir), "acc"), bbox = 'tight', bbox_inches="tight", format="pdf")
print(wilcoxon_test(final["irblc"].values,final["glc"].values))
print(wilcoxon_test(final["irblc"].values,final["mixed"].values))
generate_results(base_dir, names, ps, qs, "acc")
results = pd.read_csv("{}/results-{}.csv".format(base_dir,"acc"))
results["irblc"] = 100*(1 - results["irblc"])
results["trusted"] = 100*(1 - results["trusted"])
results["symetric"] = 100*(1 - results["symetric"])
results["glc"] = 100*(1 - results["glc"])
results["mixed"] = 100*(1 - results["mixed"])
results["total"] = 100*(1 - results["total"])
results["kdr"] = 100*(1 - results["kdr"])
results = results.drop(["untrusted","irbl"],axis=1)
results.groupby(["p","name"]).agg(["mean","std"]).drop("q",axis=1,level=0).reset_index().groupby("p").mean().to_csv("{}/aggregated-results-{}.csv".format(base_dir,"acc"), index=False)
| 67,075 | 35.673592 | 199 | py |
mooc_knowledge_gain | mooc_knowledge_gain-main/weka_evaluation/average_result.py | import argparse
import os
import pandas as pd
import glob
import re
import numpy as np
import matplotlib.pyplot as plt
from os.path import basename
required_columns = ['Key_Dataset', 'Percent_correct', 'Kappa_statistic', 'Mean_absolute_error', 'Root_mean_squared_error',
'Relative_absolute_error', 'Root_relative_squared_error', 'IR_precision', 'IR_recall', 'F_measure',
'Kappa_statistic', 'Matthews_correlation']
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--path', type=str, required=True, help='the path of the weka csvs')
parser.add_argument('-k', '--k_fold', type=int, required=True, help='used k_fold for calculation')
args = parser.parse_args()
return args
def preprocess_csv(csv_path):
''' Removes unwanted columns from a csv-object
:param csv_path: path of csv
:return: edited csv
'''
df = pd.read_csv(csv_path, encoding='utf-8')
# remove unwanted columns
df = df[required_columns]
#print(df)
return df
def calculate_class_average(df, k):
''' Calculates the average of the k-fold results of a csv-file
:param df: loaded csv-file
:param k: k parameter of k-fold
:return: averaged values as csv-object
'''
# get length of different feature classes
file_names = df.iloc[:, 0].tolist()
all_len = sum('all_features' in s for s in file_names)
embd_len = sum('embedding' in s for s in file_names)
multi_len = sum('multimedia_features' in s for s in file_names)
text_len = sum('text_features' in s for s in file_names)
splits = df.copy()
# create new index array
indexes = np.concatenate([[i % int(all_len / k) for i in range(all_len)],
[i % int(embd_len / k) + int(all_len / k) for i in range(embd_len)],
[(i % int(multi_len / k)) + int((all_len + embd_len) / k) for i in range(multi_len)],
[(i % int(text_len / k)) + int((all_len + embd_len + multi_len) / k) for i in range(text_len)]], axis=None)
splits.index = indexes
changed = pd.MultiIndex.from_frame(splits)
# caluclate average for every subset
groups = changed.groupby(splits.index)
pattern = r'_\d+_'
averaged = []
std = []
for i in range(len(groups)):
group = groups[i]
name = re.sub(pattern, '_', group[0][0])
values = np.zeros((len(required_columns) - 1), dtype=object)
# calculate standard derivation
std.append([name] + groups[i].to_frame().std().to_numpy().tolist())
# calculate average
for current in group:
for i in range(len(required_columns) - 1):
values[i] += current[i+1]
# average the values
values = values / k
# create name for average
values = np.insert(values, 0, name)
averaged.append(values.tolist())
return averaged, std
def calculate_predictor_average(low, moderate, high):
averaged = []
std_all = []
for i in range(len(low)):
# creaty zero array for averaging (low, moderate, high have the same length)
values = np.zeros((len(required_columns) - 1), dtype=object)
# calculate average and standard derivation
std = [low[i][0]]
for j in range(len(required_columns) - 1):
values[j] += low[i][j+1] + moderate[i][j+1] + high[i][j+1]
std.append(np.std([low[i][j+1], moderate[i][j+1], high[i][j+1]]))
values = np.insert(values / 3, 0, low[i][0])
averaged.append(values.tolist())
std_all.append(std)
#print(averaged)
return averaged, std_all
def visualize_results(averaged, fn):
# get length of different feature classes
file_names = [x[0] for x in averaged]
all_len = sum('all_features' in s for s in file_names)
embd_len = sum('embedding' in s for s in file_names)
multi_len = sum('multimedia_features' in s for s in file_names)
#text_len = sum('text_features' in s for s in file_names)
# store visualization for all features
a = averaged[:all_len]
prec_all = [round(item[1], 2) for item in a]
f1_all = [round(item[3] * 100, 2) for item in a]
x_all = [get_feature_amount(item[0]) for item in a]
visualize_result(x_all, prec_all, f1_all, fn + '_all_features')
# store visulaization for embedding features
embd = averaged[all_len:all_len + embd_len]
prec_embd = [round(item[1], 2) for item in embd]
f1_embd = [round(item[3] * 100, 2) for item in embd]
x_emd = [get_feature_amount(item[0]) for item in embd]
visualize_result(x_emd, prec_embd, f1_embd, fn + '_all_features')
# store visualization for multimedia features
multi = averaged[all_len+embd_len:all_len + embd_len + multi_len]
prec_multi = [round(item[1], 2) for item in multi]
f1_multi = [round(item[3] * 100, 2) for item in multi]
x_multi = [get_feature_amount(item[0]) for item in multi]
visualize_result(x_multi, prec_multi, f1_multi, fn + '_multimedia_features')
# store visualization for text features
text = averaged[multi_len+ + embd_len + all_len:]
prec_text = [round(item[1], 2) for item in text]
f1_text = [round(item[3] * 100, 2) for item in text]
x_text = [get_feature_amount(item[0]) for item in text]
visualize_result(x_text,prec_text, f1_text, fn + '_text_features')
def get_feature_amount(name):
pattern = r'_\d+features_'
matches = re.search(pattern, name)
# check if object is embedding features
if not matches:
pattern = r'only|both'
matches = re.search(pattern, name)
if matches[0] == 'both':
return 2
return 1
# extract feature amount and return it
return int(matches[0][:-len('features_')][1:])
def visualize_result(x, precision, f1, name):
# get correct order for drawing
x, precision, f1 = zip(*sorted(zip(x,precision, f1),key=lambda x: x))
# visualize precision
plt.plot(x, precision, '-o')
plt.title(name)
plt.xlabel('amount of features')
plt.ylabel('avg precision in %')
plt.xlim(0, np.amax(x) + 5)
plt.ylim(0, 100)
plt.savefig('./generated_results/' + name + '_precision.png')
plt.clf()
# visualize F1-Score
plt.plot(x, f1, '-o')
plt.title(name)
plt.xlabel('amount of features')
plt.ylabel('avg f1-score in %')
plt.xlim(0, np.amax(x) + 5)
plt.ylim(0, 100)
plt.savefig('./generated_results/' + name + '_f1.png')
plt.clf()
def create_csv(low, std_low, moderate, std_moderate, high, std_high, averaged, std_averaged, fn):
class_names = ['', '', '', '', 'Low', '', '', '', 'Moderate', '', '', '', 'High', '', '', '', 'Average']
value_names = ['Dataset', 'P', 'std P', 'R', 'std R', 'F1', 'std F1', 'P', 'std P', 'R', 'std R', 'F1', 'std F1',
'P', 'std P', 'R', 'std R', 'F1', 'std F1', 'P', 'std P', 'R', 'std R', 'F1', 'std F1',
'Percent_correct', 'std Percent_correct']
csv = [class_names, value_names]
for i in range(len(low)):
# the standard derivation for the overall precision can be used from low, moderate or high (same values)
row = np.round([low[i][7], std_low[i][7], low[i][8], std_low[i][8], low[i][9], std_low[i][9], moderate[i][7],
std_moderate[i][7], moderate[i][8], std_moderate[i][8], moderate[i][9], std_moderate[i][9],
high[i][7], std_high[i][7], high[i][8], std_high[i][8], high[i][9], std_high[i][9],
averaged[i][7], std_averaged[i][7], averaged[i][8], std_averaged[i][8], averaged[i][9],
std_averaged[i][9], averaged[i][1], std_low[i][1]], 2).tolist()
row.insert(0, low[i][0])
csv.append(row)
df = pd.DataFrame(csv)
df.to_csv('./generated_results/' + fn + '_avg.csv', index=False, sep=',', encoding='utf-8', header=False)
def calculate_average_values(df, k, fn):
# extract values for the three classes (split df into 3 df's)
class_length = df.shape[0] / 3
class_length = int(class_length)
low, std_low = calculate_class_average(df[:class_length].reset_index(drop=True), k)
moderate, std_moderate = calculate_class_average(df[class_length: 2 * class_length].reset_index(drop=True), k)
high, std_high = calculate_class_average(df[2 * class_length: 3 * class_length].reset_index(drop=True), k)
averaged, std_avg = calculate_predictor_average(low, moderate, high)
create_csv(low, std_low, moderate, std_moderate, high, std_high, averaged, std_avg, fn)
return df
def main():
args = parse_arguments()
path = args.path
if args.k_fold <= 1:
print('no k-fold is needed because k is <= 1')
return 0
if not os.path.exists('./generated_results/'):
os.makedirs('./generated_results/')
# process all files
files = glob.glob(''.join([path, '/*.csv']))
for filepath in files:
# get filename without extension
fn = basename(filepath)[:-4]
df = preprocess_csv(filepath)
df = df.fillna(0)
print(f'Calculate average value of {fn}')
calculate_average_values(df, args.k_fold, fn)
if __name__ == "__main__":
main()
| 9,251 | 38.370213 | 132 | py |
mooc_knowledge_gain | mooc_knowledge_gain-main/dataset_processing/merge_files.py | import sys
import argparse
import csv
import pandas as pd
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--folder', type=str, required=False, default='all', help='set the foldername of the dataset')
parser.add_argument('-k', '--k_fold', type=int, required=False, default=5, help='set the k for k-fold split')
args = parser.parse_args()
return args
def merge_csv(first, second, result):
""" Merges two files together and delete redundant columns.
:param first: the first file
:param second: the second file
:param result: the combined result
:return: merged csv-file
"""
first_csv = csv.reader(first, delimiter=',')
second_csv = csv.reader(second, delimiter=',')
first_arr = []
second_arr = []
for row in first_csv:
first_arr.append(row)
for row in second_csv:
second_arr.append(row)
if len(first_arr) != len(second_arr): # files must have the same row length (because they should be from same data)
print("No same row length")
first.close()
second.close()
sys.exit(1)
else:
writer = csv.writer(result, delimiter=',')
i = 0
while i < len(first_arr): # deletes de knowledge_gain_value from the first file and appends the second file
modified_row = first_arr[i][:-1] + second_arr[i]
writer.writerow(modified_row)
i += 1
result.close()
first.close()
second.close()
def merge_df(first, second, name):
first_df = pd.read_csv(first, encoding='utf-8').drop(['Knowledge_Gain_Level'], axis=1)
second_df = pd.read_csv(second, encoding='utf-8')
second_df = pd.read_csv(second, encoding='utf-8').drop(list(second_df.filter(regex ='Person_ID')), axis=1)
merged = pd.concat([first_df, second_df], axis=1)
if 'test' in name:
merged.to_csv(f'./merged/merged_embeddings/test/{name}',
index=False, encoding='utf-8', quoting=csv.QUOTE_NONNUMERIC)
else:
merged.to_csv(f'./merged/merged_embeddings/train/{name}',
index=False, encoding='utf-8', quoting=csv.QUOTE_NONNUMERIC)
if __name__ == "__main__":
args = parse_arguments()
subfolder = 'avg' if args.folder == 'avg' else 'all'
for path, end in [(f'./feature_selection/{subfolder}/train/', ''), (f'./feature_selection/{subfolder}/test/', '_test')]:
for i in range(args.k_fold):
i = i+1
if subfolder == all:
merge_df(f'{path}all_features_{i}_drop_column_persons_influence_without{end}.csv', f'{path}embedding_{i}_persons_both_embd{end}.csv', f'all_features_{i}_persons_both_embed{end}.csv')
merge_df(f'{path}multimedia_features_{i}_drop_column_persons_influence_without{end}.csv', f'{path}embedding_{i}_persons_both_embd{end}.csv', f'multimedia_features_{i}_persons_both_embed{end}.csv')
merge_df(f'{path}text_features_{i}_drop_column_persons_influence_without{end}.csv', f'{path}embedding_{i}_persons_both_embd{end}.csv', f'text_features_{i}_persons_both_embed{end}.csv')
merge_df(f'{path}all_features_{i}_drop_column_persons_influence_without{end}.csv', f'{path}embedding_{i}_persons_only_slide{end}.csv', f'all_features_{i}_persons_slides_embed{end}.csv')
merge_df(f'{path}multimedia_features_{i}_drop_column_persons_influence_without{end}.csv', f'{path}embedding_{i}_persons_only_slide{end}.csv', f'multimedia_features_{i}_persons_slides_embed{end}.csv')
merge_df(f'{path}text_features_{i}_drop_column_persons_influence_without{end}.csv', f'{path}embedding_{i}_persons_only_slide{end}.csv', f'text_features_{i}_persons_slides_embed{end}.csv')
merge_df(f'{path}all_features_{i}_drop_column_persons_influence_without{end}.csv', f'{path}embedding_{i}_persons_only_transcript{end}.csv', f'all_features_{i}_persons_srt_embed{end}.csv')
merge_df(f'{path}multimedia_features_{i}_drop_column_persons_influence_without{end}.csv', f'{path}embedding_{i}_persons_only_transcript{end}.csv', f'multimedia_features_{i}_persons_srt_embed{end}.csv')
merge_df(f'{path}text_features_{i}_drop_column_persons_influence_without{end}.csv', f'{path}embedding_{i}_persons_only_transcript{end}.csv', f'text_features_{i}_persons_srt_embed{end}.csv')
# average results
else:
merge_df(f'{path}all_features_{i}_drop_column_videos_influence_without{end}.csv', f'{path}embedding_{i}_videos_both_embd{end}.csv', f'all_features_{i}_videos_both_embed{end}.csv')
merge_df(f'{path}multimedia_features_{i}_drop_column_videos_influence_without{end}.csv', f'{path}embedding_{i}_videos_both_embd{end}.csv', f'multimedia_features_{i}_videos_both_embed{end}.csv')
merge_df(f'{path}text_features_{i}_drop_column_videos_influence_without{end}.csv', f'{path}embedding_{i}_videos_both_embd{end}.csv', f'text_features_{i}_videos_both_embed{end}.csv')
merge_df(f'{path}all_features_{i}_drop_column_videos_influence_without{end}.csv', f'{path}embedding_{i}_videos_only_slide{end}.csv', f'all_features_{i}_videos_slides_embed{end}.csv')
merge_df(f'{path}multimedia_features_{i}_drop_column_videos_influence_without{end}.csv', f'{path}embedding_{i}_videos_only_slide{end}.csv', f'multimedia_features_{i}_videos_slides_embed{end}.csv')
merge_df(f'{path}text_features_{i}_drop_column_videos_influence_without{end}.csv', f'{path}embedding_{i}_videos_only_slide{end}.csv', f'text_features_{i}_videos_slides_embed{end}.csv')
merge_df(f'{path}all_features_{i}_drop_column_videos_influence_without{end}.csv', f'{path}embedding_{i}_videos_only_transcript{end}.csv', f'all_features_{i}_videos_srt_embed{end}.csv')
merge_df(f'{path}multimedia_features_{i}_drop_column_videos_influence_without{end}.csv', f'{path}embedding_{i}_videos_only_transcript{end}.csv', f'multimedia_features_{i}_videos_srt_embed{end}.csv')
merge_df(f'{path}text_features_{i}_drop_column_videos_influence_without{end}.csv', f'{path}embedding_{i}_videos_only_transcript{end}.csv', f'text_features_{i}_videos_srt_embed{end}.csv')
| 6,274 | 67.956044 | 217 | py |
mooc_knowledge_gain | mooc_knowledge_gain-main/dataset_processing/feature_selection.py | import os
import argparse
from math import ceil
import pandas as pd
import csv
import numpy as np
import random
import scipy
from sklearn.model_selection import KFold, RepeatedKFold, StratifiedKFold
from sklearn.preprocessing import MinMaxScaler
from rfpimp import permutation_importances, dropcol_importances, oob_classifier_accuracy # feature importance
import shap # feature importance
from sklearn.inspection import permutation_importance
from sklearn.feature_selection import VarianceThreshold
from sklearn.ensemble import RandomForestClassifier
from copy import copy
from sklearn.metrics import precision_recall_fscore_support, accuracy_score
from statistics import mean
from natsort import natsorted
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('-k', '--k_fold', type=int, required=False, default=5, help='set the k for k-fold split')
parser.add_argument('-p', '--path', type=str, required=True, help='the path of the csvs')
parser.add_argument('-m', '--method', type=str, required=False, default='python',
help='choose weka for later preprocessing or preprocess with pearson')
parser.add_argument('-f', '--filter', type=str, required=False, default='influence',
help='choose if threshold or amount of features is used to create subsets of features')
parser.add_argument('-i', '--feature_importance', type=str, required=False, default='permutation',
help='choose the feature importance algorithm')
args = parser.parse_args()
return args
def preprocess_csv(csv_path, use_avg=False, kg_str='Knowledge_Gain_Level'):
data = pd.read_csv(csv_path, encoding='utf-8')
if use_avg:
data = calculate_kg_level(get_vid_avg(data))
# remove video-id, person-id and knowledge_gain because they shouldn't be selected as features
knowledge = data[['Video_ID', kg_str]]
data = data.drop(['Knowledge_Gain', 'Knowledge_Gain_Level'], axis=1)
# data['Person_ID'] = data['Person_ID'].values.astype(str)
return (data, knowledge)
def store_csv(data, name):
nominal_to_num = {'Low': 0, 'Moderate': 1, 'High': 2}
data.sort_values(by=['Knowledge_Gain_Level'], key=lambda x: x.map(nominal_to_num)).to_csv(
''.join(['./feature_selection/weka/', name, '.csv']), index=False, encoding='utf-8')
def scale_data(train_X, test_X):
scaler = MinMaxScaler()
scaler.fit(train_X)
scaled_train_X, scaled_test_X = copy(scaler.transform(train_X)), copy(scaler.transform(test_X))
return scaled_train_X, scaled_test_X
def get_vid_avg(data):
data = copy(data.drop(['Person_ID', 'Knowledge_Gain_Level'], axis=1))
videos = data.groupby('Video_ID')
avg_df = None
for video in videos.groups:
group = videos.get_group(video)
avg_kg = group['Knowledge_Gain'].mean()
group_value = copy(group.head(1))
group_value['Knowledge_Gain'] = avg_kg
if avg_df is not None:
avg_df = pd.concat([avg_df, group_value])
else:
avg_df = group_value
return avg_df
def z_score_convert(value):
if -0.5 <= value <= 0.5:
return 'Moderate'
elif value < -0.5:
return 'Low'
return 'High'
def calculate_kg_level(data):
z_scores = scipy.stats.zscore(data['Knowledge_Gain'].to_numpy()).tolist()
for i in range(len(z_scores)):
z_scores[i] = z_score_convert(z_scores[i])
# print(z_scores)
data['Knowledge_Gain_Level'] = z_scores
# print(data)
return data
def convert_person_id(data):
num_participants = 13
person_id_columns = [f'Person_ID_{i + 1}' for i in range(num_participants)]
ids = data['Person_ID'].to_numpy() - 1
categorical = np.squeeze(np.eye(num_participants)[ids])
categorical = pd.DataFrame(data=categorical, columns=person_id_columns).astype(int)
data = data.drop(['Person_ID'], axis=1)
for column in person_id_columns:
data[column] = categorical[column].to_numpy()
return data
def remove_redudant_features():
return
def get_permutation_importance(name, X_train, y_train, X_test, y_test, k, store=False):
model = RandomForestClassifier(max_depth=1, random_state=999)
# model = make_pipeline(StandardScaler(), SVC(gamma='auto'))
# model = GaussianNB()
# print(f"x-shape:{X_train.shape}, y-shape:{X_test.shape}")
model.fit(X_train, y_train["Knowledge_Gain_Level"].to_numpy())
r = permutation_importance(model, X_train, y_train, n_repeats=30, random_state=999,
scoring='accuracy')
amount_importance = 0
names = []
values = []
columns = X_train.columns.to_numpy()
for i in abs(r.importances_mean).argsort()[::-1]:
if r.importances_mean[i] + r.importances_std[i] != 0:
print(f"{columns[i]} "
f"{r.importances_mean[i]:.3f}"
f" +/- {r.importances_std[i]:.3f}")
amount_importance += 1
names.append(columns[i])
values.append(r.importances_mean[i])
print(f"{amount_importance} features had an influence on the result")
print()
fi = pd.Series(values, names)
if store:
store_feature_importance(fi, name, 'permutation', k)
return fi
def get_permutation_importance_rfpimp(name, X_train, y_train, X_test, y_test, k, store=False):
model = RandomForestClassifier(max_depth=100, random_state=999)
model.fit(X_train, y_train["Knowledge_Gain_Level"].to_numpy())
fi = permutation_importances(model, X_test, y_test["Knowledge_Gain_Level"], oob_classifier_accuracy)
fi = fi.iloc[(-fi['Importance'].abs()).argsort()]
# fi = fi.loc[~(fi == 0).all(axis=1)] # remove features without importance
fi = fi.iloc[:, 0]
if store:
store_feature_importance(fi, name, 'permutation_rfpimp', k)
return fi
def get_drop_column_importance(name, X_train, y_train, X_test, y_test, k, store=False):
model = RandomForestClassifier(max_depth=100, random_state=999)
fi = dropcol_importances(model, X_train, y_train["Knowledge_Gain_Level"].to_numpy(),
X_test, y_test["Knowledge_Gain_Level"].to_numpy())
fi = fi.iloc[(-fi['Importance']).argsort()]
fi = fi.iloc[:, 0]
# print(fi)
if store:
store_feature_importance(fi, name, 'drop_column', k)
return fi
def get_shap_importance(name, X_train, y_train, X_test, y_test, k, store=False):
model = RandomForestClassifier(max_depth=100, random_state=999)
model.fit(X_train, y_train["Knowledge_Gain_Level"].to_numpy())
explainer = shap.TreeExplainer(model)
shap_values = explainer(X_train)[0].data
columns = X_train.columns.to_list()
# print(columns)
# print(shap_values)
sorted_columns = []
sorted_importance = []
for id in np.argsort(shap_values):
sorted_columns.append(columns[id])
sorted_importance.append(shap_values[id])
fi = pd.Series(sorted_importance, sorted_columns)
if store:
store_feature_importance(fi, name, 'shap', k)
return fi
def get_pearson_correlation(name, x_train, x_test, y_train, y_test, k, store=False):
# add y-values to x-values for pearson correlation check
x_train.insert(len(x_train.columns), 'Knowledge_Gain_Level', y_train, True)
# get pearson correlation from x-features to y-classes
pearson = x_train.corr()['Knowledge_Gain_Level'][:-1]
pearson = pearson.sort_values(ascending=False)
if store:
store_feature_importance(pearson, name, 'pearson', k)
return pearson
def store_feature_importance(importance, name, method, k):
importance.name = 'Feature Importance'
store_fi = importance.reindex(importance.sort_values(ascending=False).index)
store_fi.to_csv(f'./feature_selection/feature_importance/{name}_{method}_correlation_set{k + 1}_features.csv',
index=True,
encoding='utf-8')
def get_feature_importance(name, X_train, X_test, y_train, y_test, k, importance_method, store=False):
# make correct form for feature importance calculation
if 'Person_ID' in X_train:
X_train_corr, X_test_corr = copy(X_train).drop(['Person_ID'], axis=1), copy(X_test).drop(['Person_ID'], axis=1)
else:
X_train_corr, X_test_corr = copy(X_train), copy(X_test)
# print(len(X_train_corr.columns))
y_train_corr, y_test_corr = copy(y_train), copy(y_test)
# convert y-classes to integers for pearson correlation
dict_kg = {'Low': 0, 'Moderate': 1, 'High': 2}
y_train_corr.replace(dict_kg, inplace=True)
y_test_corr.replace(dict_kg, inplace=True)
# choose method
if importance_method == 'permutation':
return get_permutation_importance(name, X_train_corr, y_train_corr, X_test_corr, y_test_corr, k, store)
elif importance_method == 'drop_column':
return get_drop_column_importance(name, X_train_corr, y_train_corr, X_test_corr, y_test_corr, k, store)
elif importance_method == 'shap':
return get_shap_importance(name, X_train_corr, y_train_corr, X_test_corr, y_test_corr, k, store)
# default: pearson correlation
elif importance_method == 'permutation_rfpimp':
return get_permutation_importance_rfpimp(name, X_train_corr, y_train_corr, X_test_corr, y_test_corr, k, store)
return get_pearson_correlation(name, X_train_corr, X_test_corr, y_train_corr, y_test_corr, k, store)
def store_avg_importance(name, mean_values, fi_method, data_method):
if fi_method != 'pearson':
store_p = mean_values.reindex(mean_values.sort_values(ascending=False).index)
else:
store_p = mean_values.reindex(mean_values.abs().sort_values(ascending=False).index)
store_p.to_csv(f'./feature_selection/feature_importance/{name}_avg_{fi_method}_{data_method}_importance.csv',
index=True,
encoding='utf-8')
def get_non_relevant_features(data_x, data_y):
old_columns = data_x.columns
relevant_data_x = copy(data_x.loc[:, (data_x != data_x.iloc[0]).any()])
removed_columns = [column for column in old_columns if column not in relevant_data_x.columns]
print(f'Amount of non relevant features is {len(removed_columns)}')
print(removed_columns)
return removed_columns
def create_filtered_sets_best_features(name, fi, x_train, x_test, y_train, y_test, k):
max_features = None
all_features = [374, 350, 300, 250, 200, 150, 100, 90, 80, 70, 60, 50, 45, 40, 35, 30, 25, 20, 15, 10, 5, 1]
text_features = [337, 300, 250, 200, 150, 100, 90, 80, 70, 60, 50, 45, 40, 35, 30, 25, 20, 15, 10, 5, 1]
multimedia_features = [37, 35, 30, 25, 20, 15, 10, 5, 1]
# remove one hot vector of person_id from feature importance (person_id should stay as one complete vector)
specific_idx = [index for index in fi.index if 'Person_ID' in index]
if specific_idx:
fi = fi.drop(specific_idx)
specific_idx = natsorted(specific_idx)
# get correct list of feature-selection
if name == 'text_features':
max_features = text_features
elif name == 'multimedia_features':
max_features = multimedia_features
else:
max_features = all_features
# sort by absolute value
fi = fi.reindex(fi.abs().sort_values(ascending=False).index)
# create csv-files with maximum amount of features
for max_feature in max_features:
features = list(fi[0:max_feature].index)
# store csv-files
if specific_idx:
features = features + ['Person_ID']
filtered_train = copy(x_train[features])
filtered_test = copy(x_test[features])
max_string = ''.join([str(max_feature), 'features'])
store_filtered(name, filtered_train, y_train, filtered_test, y_test, max_string, k)
def create_filtered_sets_threshold(name, fi, x_train, x_test, y_train, y_test, k):
# get highest correlation to stop loop
fi = fi.sort_values(ascending=False)
stop_i = int(ceil(fi[0] * 100)) if fi[0] >= abs(fi[-1]) else int(ceil(abs(fi[-1]) * 100))
# start creating subsets
previous = -1
for threshold in range(0, stop_i):
threshold = threshold * 0.01
features = list(fi[abs(fi) > threshold].index)
# stop if there are no features
if not len(features):
break
# don't make duplicate subsets
if len(features) == previous:
continue
previous = len(features)
# store csv-files
filtered_train = copy(x_train[features])
filtered_test = copy(x_test[features])
if threshold == 0.0:
threshold = 0
th_str = ''.join([str(threshold), 'threshold'])
store_filtered(name, filtered_train, y_train, filtered_test, y_test, th_str, k)
def create_filtered_sets_influence(name, fis, x_train, x_test, y_train, y_test, k, fi_method, data_method):
features = [column for column in fis[fis >= 0.0].index.to_list() if 'Person_ID' not in column]
if [column for column in x_train.columns if 'Person_ID' in column]:
features = features + ['Person_ID']
print(f'Amount of features: {len(features)}')
filtered_train = copy(x_train[features])
filtered_test = copy(x_test[features])
store_filtered(name, filtered_train, y_train, filtered_test, y_test, 'influence', k, fi_method, data_method)
def create_embedding_csvs(slide_embd_train, y_train, slide_embd_test, y_test, transcript_embd_train,
transcript_embd_test, k, data_method):
nominal_to_num = {'Low': 0, 'Moderate': 1, 'High': 2}
folder = 'all' if data_method == 'persons' else 'avg'
# only slide embedding
pd.concat([slide_embd_train, y_train], axis=1).sort_values(by=['Knowledge_Gain_Level'],
key=lambda x: x.map(nominal_to_num)).to_csv(
f'./feature_selection/{folder}/train/embedding_{str(k + 1)}_{data_method}_only_slide.csv',
index=False, encoding='utf-8', quoting=csv.QUOTE_NONNUMERIC)
pd.concat([slide_embd_test, y_test], axis=1).sort_values(by=['Knowledge_Gain_Level'],
key=lambda x: x.map(nominal_to_num)).to_csv(
f'./feature_selection/{folder}/test/embedding_{k + 1}_{data_method}_only_slide_test.csv',
index=False, encoding='utf-8', quoting=csv.QUOTE_NONNUMERIC)
# only transcript embedding
pd.concat([transcript_embd_train, y_train], axis=1).sort_values(by=['Knowledge_Gain_Level'],
key=lambda x: x.map(nominal_to_num)).to_csv(
f'./feature_selection/{folder}/train/embedding_{k + 1}_{data_method}_only_transcript.csv',
index=False, encoding='utf-8', quoting=csv.QUOTE_NONNUMERIC)
pd.concat([transcript_embd_test, y_test], axis=1).sort_values(by=['Knowledge_Gain_Level'],
key=lambda x: x.map(nominal_to_num)).to_csv(
f'./feature_selection/{folder}/test/embedding_{k + 1}_{data_method}_only_transcript_test.csv',
index=False, encoding='utf-8', quoting=csv.QUOTE_NONNUMERIC)
# only both embeddings
pd.concat(
[slide_embd_train, transcript_embd_train.drop(list(transcript_embd_train.filter(regex='Person_ID')), axis=1),
y_train], axis=1).sort_values(by=['Knowledge_Gain_Level'], key=lambda x: x.map(nominal_to_num)).to_csv(
f'./feature_selection/{folder}/train/embedding_{k + 1}_{data_method}_both_embd.csv',
index=False, encoding='utf-8', quoting=csv.QUOTE_NONNUMERIC)
pd.concat([slide_embd_test, transcript_embd_test.drop(list(transcript_embd_test.filter(regex='Person_ID')), axis=1),
y_test], axis=1).sort_values(by=['Knowledge_Gain_Level'], key=lambda x: x.map(nominal_to_num)).to_csv(
f'./feature_selection/{folder}/test/embedding_{k + 1}_{data_method}_both_embd_test.csv',
index=False, encoding='utf-8', quoting=csv.QUOTE_NONNUMERIC)
def store_filtered(name, filtered_train, y_train, filtered_test, y_test, specific, k, fi_method, data_method):
if 'Person_ID' in filtered_train:
filtered_train = convert_person_id(filtered_train)
if 'Person_ID' in filtered_test:
filtered_test = convert_person_id(filtered_test)
nominal_to_num = {'Low': 0, 'Moderate': 1, 'High': 2}
folder = 'all' if data_method == 'persons' else 'avg'
pd.concat([filtered_train, y_train], axis=1).sort_values(by=['Knowledge_Gain_Level'],
key=lambda x: x.map(nominal_to_num)).to_csv(
f'./feature_selection/{folder}/train/{name}_{k + 1}_{fi_method}_{data_method}_{specific}_without.csv',
index=False, encoding='utf-8', quoting=csv.QUOTE_NONNUMERIC)
pd.concat([filtered_test, y_test], axis=1).sort_values(by=['Knowledge_Gain_Level'],
key=lambda x: x.map(nominal_to_num)).to_csv(
f'./feature_selection/{folder}/test/{name}_{k + 1}_{fi_method}_{data_method}_{specific}_without_test.csv',
index=False, encoding='utf-8', quoting=csv.QUOTE_NONNUMERIC)
def embedding_split(k, slide_embd, transcript_embd):
fold_dict = {}
num_participants = 13
person_cols = [f'Person_ID_{i + 1}' for i in range(num_participants)]
video_ids = np.array(['1_2a', '1_2b', '1_2c', '1_2d', '1_3a', '1_3b', '1_3c', '2_2a', '2_2b', '2_2c',
'2_2d', '3_2b', '3_3a', '3_3b', '4_2a', '4_3a', '5_1a', '5_1b', '6_2a', '7_2b', '7_3a',
'7_3c'])
k_fold = KFold(n_splits=k, shuffle=True, random_state=1234)
for i, (train, test) in enumerate(k_fold.split(video_ids)):
train = video_ids[train]
test = video_ids[test]
slide_train, slide_test = copy(slide_embd[0].iloc[np.where(slide_embd[0]['Video_ID'].isin(train))]), \
copy(slide_embd[0].iloc[np.where(slide_embd[0]['Video_ID'].isin(test))])
slide_train, slide_test = slide_train.drop(['Video_ID'], axis=1), slide_test.drop(['Video_ID'], axis=1)
transcript_train, transcript_test = copy(
transcript_embd[0].iloc[np.where(transcript_embd[0]['Video_ID'].isin(train))]), \
copy(transcript_embd[0].iloc[
np.where(transcript_embd[0]['Video_ID'].isin(test))])
transcript_train, transcript_test = transcript_train.drop(['Video_ID'], axis=1), transcript_test.drop(
['Video_ID'], axis=1)
slide_y_train, slide_y_test = copy(slide_embd[1].iloc[np.where(slide_embd[1]['Video_ID'].isin(train))]), \
copy(slide_embd[1].iloc[np.where(slide_embd[1]['Video_ID'].isin(test))])
slide_y_train, slide_y_test = slide_y_train.drop(['Video_ID'], axis=1), slide_y_test.drop(['Video_ID'], axis=1)
transcript_y_train, transcript_y_test = copy(
transcript_embd[1].iloc[np.where(transcript_embd[1]['Video_ID'].isin(train))]), \
copy(transcript_embd[1].iloc[
np.where(transcript_embd[1]['Video_ID'].isin(test))])
transcript_y_train, transcript_y_test = transcript_y_train.drop(['Video_ID'], axis=1), \
transcript_y_test.drop(['Video_ID'], axis=1)
slide_train = convert_person_id(slide_train)
slide_test = convert_person_id(slide_test)
transcript_train = convert_person_id(transcript_train)
transcript_test = convert_person_id(transcript_test)
# scale data
slide_train.loc[:, slide_train.columns != 'Person_ID'], slide_test.loc[:, slide_test.columns != 'Person_ID'] \
= scale_data(slide_train.loc[:, slide_train.columns != 'Person_ID'],
slide_test.loc[:, slide_test.columns != 'Person_ID'])
transcript_train.loc[:, transcript_train.columns != 'Person_ID'], transcript_test.loc[:, transcript_train.columns != 'Person_ID'] \
= scale_data(transcript_train.loc[:, transcript_train.columns != 'Person_ID'],
transcript_test.loc[:, transcript_test.columns != 'Person_ID'])
# store values
create_embedding_csvs(slide_train, slide_y_train, slide_test, slide_y_test, transcript_train,
transcript_test, i, 'persons')
def avg_split(k, name, feature, filter_type, slide_embd, transcript_embd, non_relevant, importance_method):
# randomly split data into train and test
print(f'Split average of {name}')
fis = [] # feature importances
sets = []
k_fold = StratifiedKFold(n_splits=k, shuffle=True, random_state=1234)
for i, (train, test) in enumerate(k_fold.split(feature[0], feature[1]['Knowledge_Gain_Level'])):
print(f'Calculate split{i + 1}')
# sort videos into train and test
x_train, x_test = copy(feature[0].iloc[train]), copy(feature[0].iloc[test])
y_train, y_test = copy(feature[1].iloc[train]), copy(feature[1].iloc[test])
# remove non relevant features
non_relevant = ['Video_ID'] + non_relevant
x_train = x_train.drop([column for column in non_relevant if column in x_train.columns], axis=1)
x_test = x_test.drop([column for column in non_relevant if column in x_test.columns], axis=1)
y_train, y_test = y_train.drop(['Video_ID'], axis=1), y_test.drop(['Video_ID'], axis=1)
# split embeddings into train and test
slide_x_train, slide_x_test = copy(slide_embd[0].iloc[train]), copy(slide_embd[0].iloc[test])
slide_x_train, slide_x_test = slide_x_train.drop(['Video_ID'], axis=1), slide_x_test.drop(['Video_ID'], axis=1)
transcript_x_train, transcript_x_test = copy(transcript_embd[0].iloc[train]), copy(
transcript_embd[0].iloc[test])
transcript_x_train, transcript_x_test = transcript_x_train.drop(['Video_ID'], axis=1), transcript_x_test.drop(
['Video_ID'], axis=1)
slide_y_train, slide_y_test = copy(slide_embd[1].iloc[train]), copy(slide_embd[1].iloc[test])
slide_y_train, slide_y_test = slide_y_train.drop(['Video_ID'], axis=1), slide_y_test.drop(['Video_ID'], axis=1)
transcript_y_train, transcript_y_test = copy(transcript_embd[1].iloc[train]), copy(
transcript_embd[1].iloc[test])
transcript_y_train, transcript_y_test = transcript_y_train.drop(['Video_ID'], axis=1), \
transcript_y_test.drop(['Video_ID'], axis=1)
# scale data
x_train[:], x_test[:] = scale_data(x_train, x_test)
slide_x_train[:], slide_x_test[:] = scale_data(slide_x_train, slide_x_test)
transcript_x_train[:], transcript_x_test[:] = scale_data(transcript_x_train, transcript_x_test)
sets.append((x_train, x_test, y_train, y_test))
fi = get_feature_importance(name, x_train, x_test, y_train, y_test, i, importance_method, True)
fis.append(fi)
if filter_type == "threshold":
create_filtered_sets_threshold(name, fi, x_train, x_test, y_train, y_test, i)
elif filter_type == "amount":
create_filtered_sets_best_features(name, fi, x_train, x_test, y_train, y_test, i)
elif filter_type == "influence":
create_filtered_sets_influence(name, fi, x_train, x_test, y_train, y_test, i, importance_method,
'videos')
# store embedding features
create_embedding_csvs(slide_x_train, slide_y_train, slide_x_test, slide_y_test, transcript_x_train,
transcript_x_test, i, 'videos')
concated = pd.concat(fis)
mean_values = concated.groupby(concated.index).mean()
store_avg_importance(name, mean_values, importance_method, 'videos')
def data_split(k, name, feature, filter_type, slide_embd, transcript_embd, non_relevant, importance_method):
k_fold = KFold(n_splits=5, random_state=1234, shuffle=True)
video_ids = np.array(['1_2a', '1_2b', '1_2c', '1_2d', '1_3a', '1_3b', '1_3c', '2_2a', '2_2b', '2_2c',
'2_2d', '3_2b', '3_3a', '3_3b', '4_2a', '4_3a', '5_1a', '5_1b', '6_2a', '7_2b', '7_3a',
'7_3c'])
# iterate through features to calculate correlation/feature importance
print(f'\nGenerate splits for csv-file: {name}')
complete_fi = []
sets = []
for i, (train, test) in enumerate(k_fold.split(video_ids)):
print(f'Calculate split{i + 1}')
# get train/test for features x-values and y-values
train = video_ids[train]
test = video_ids[test]
# sort videos into train and test
x_train, x_test = copy(feature[0].iloc[np.where(feature[0]['Video_ID'].isin(train))]), copy(
feature[0].iloc[np.where(feature[0]['Video_ID'].isin(test))])
# remove non relevant features
non_relevant = ['Video_ID'] + non_relevant
x_train = x_train.drop([column for column in non_relevant if column in x_train.columns], axis=1)
x_test = x_test.drop([column for column in non_relevant if column in x_test.columns], axis=1)
y_train, y_test = copy(feature[1].iloc[np.where(feature[1]['Video_ID'].isin(train))]), copy(
feature[1].iloc[np.where(feature[1]['Video_ID'].isin(test))])
y_train, y_test = y_train.drop(['Video_ID'], axis=1), y_test.drop(['Video_ID'], axis=1)
# scale data
x_train.loc[:, x_train.columns != 'Person_ID'], x_test.loc[:, x_test.columns != 'Person_ID'] \
= scale_data(x_train.loc[:, x_train.columns != 'Person_ID'],
x_test.loc[:, x_test.columns != 'Person_ID'])
#print(x_train)
# get feature importance
fis = get_feature_importance(name, convert_person_id(x_train), convert_person_id(x_test), y_train, y_test, i,
importance_method, True).fillna(0)
sets.append((x_train, x_test, y_train, y_test))
complete_fi.append(copy(fis))
if filter_type == "threshold":
create_filtered_sets_threshold(name, fis, x_train, x_test, y_train, y_test, i)
elif filter_type == "amount":
create_filtered_sets_best_features(name, fis, x_train, x_test, y_train, y_test, i)
elif filter_type == "influence":
create_filtered_sets_influence(name, fis, x_train, x_test, y_train, y_test, i, importance_method,
'persons')
concated = pd.concat(complete_fi)
mean_values = concated.groupby(concated.index).mean()
store_avg_importance(name, mean_values, importance_method, 'persons')
def create_csvs_weka(k, name, feature):
video_ids = ['1_2a', '1_2b', '1_2c', '1_2d', '1_3a', '1_3b', '1_3c', '2_2a', '2_2b', '2_2c',
'2_2d', '3_2b', '3_3a', '3_3b', '4_2a', '4_3a', '5_1a', '5_1b', '6_2a', '7_2b', '7_3a', '7_3c']
k_fold = KFold(n_splits=k, shuffle=True, random_state=1234)
# print(k_fold.split(video_ids))
for i, (train, test) in enumerate(k_fold.split(feature[0], feature[1])):
x_train, x_test = copy(feature[0].iloc[train]), copy(feature[0].iloc[test])
y_train, y_test = copy(feature[1][train]), copy(feature[1][test])
x_train.insert(len(x_train.columns), 'Knowledge_Gain_Level', y_train, True)
x_test.insert(len(x_test.columns), 'Knowledge_Gain_Level', y_test, True)
# store train and test set
store_csv(x_train, ''.join(['train/', name, '_', str(i + 1), '_weka_features']))
store_csv(x_test, ''.join(['test/', name, '_', str(i + 1), '_weka_features', '_test']))
def show_kg_prediction_stats(class_vals, overall, y_true, y_pred):
dict_kg = {0: 'Low', 1: 'Moderate', 2: 'High'}
sorted_class_vals = [[], [], []]
for class_val in class_vals[:-1]:
for i, val in enumerate(class_val):
sorted_class_vals[i].append(val)
for i, class_val in enumerate(sorted_class_vals):
print(f'{dict_kg[i]}:')
print(f'Precision:{round(class_val[0], 2)}, Recall:{round(class_val[1], 2)}, F1:{round(class_val[2], 2)}\n')
print('Overall:')
print(f'Precision:{round(overall[0], 2)}, Recall:{round(overall[1], 2)}, F1:{round(overall[2], 2)}')
print(f'Accuracy:{round(accuracy_score(y_true, y_pred) * 100, 2)}')
def user_kg_prediction(name, feature):
kg_class = feature[1]
# kg_class = kg_class.drop(['Video_ID'], axis=1)
z_scores = scipy.stats.zscore(kg_class['Knowledge_Gain'].to_numpy()).tolist()
kg_class['Knowledge_Gain'] = z_scores
# create groups of person_ids
person_ids = copy(pd.concat([feature[0]['Person_ID'], kg_class], axis=1))
grouped_ids = person_ids.groupby(['Person_ID'], as_index=False)
y_pred = []
y_true = []
for name, group in grouped_ids:
amount = len(group)
group = group.reset_index(drop=True)
for i in range(amount):
test_value = group.iloc[i]['Knowledge_Gain']
predicted = group.drop([i])['Knowledge_Gain'].mean()
# predicted = group['Knowledge_Gain'].mean()
# convert to three classes
test_value = z_score_convert(test_value)
predicted = z_score_convert(predicted)
y_true.append(test_value)
y_pred.append(predicted)
class_vals = list(precision_recall_fscore_support(y_true, y_pred, average=None, labels=['Low', 'Moderate', 'High']))
overall = list(precision_recall_fscore_support(y_true, y_pred, average='macro'))
overall[2] = 2 / (1 / overall[0] + 1 / overall[1])
show_kg_prediction_stats(class_vals, overall, y_true, y_pred)
def video_kg_prediction(name, feature, avg_feature, mode='avg'):
kg_class = feature[1]
kg_class_avg = avg_feature[1]
if mode == 'avg':
z_scores = scipy.stats.zscore(kg_class['Knowledge_Gain'].to_numpy()).tolist()
kg_class['Knowledge_Gain'] = z_scores
z_scores_avg = scipy.stats.zscore(kg_class_avg['Knowledge_Gain'].to_numpy()).tolist()
kg_class_avg['Knowledge_Gain'] = z_scores_avg
else:
random.seed(1234)
# group person ids to match with video_id value
person_ids = copy(pd.concat([feature[0]['Person_ID'], kg_class], axis=1))
grouped_persons = person_ids.groupby(['Person_ID'], as_index=False)
# scoring values
y_true = []
y_pred = []
for id, row in kg_class_avg.iterrows():
video_id = row['Video_ID']
video_kg_class = z_score_convert(row['Knowledge_Gain']) if mode == 'avg' else row['Knowledge_Gain_Level']
y_preds = []
for _, group in grouped_persons:
other_vid_value = None
if video_id in group['Video_ID'].values:
if mode == 'avg':
other_vid_value = group[group['Video_ID'] != video_id]['Knowledge_Gain'].mean()
else:
other_vid_values = group[group['Video_ID'] != video_id]['Knowledge_Gain_Level'].mode()
amount_most = len(other_vid_values)
if amount_most >= 2:
most_id = random.randint(0, amount_most - 1)
other_vid_value = other_vid_values[most_id]
else:
other_vid_value = other_vid_values[0]
y_preds.append(other_vid_value)
if mode == 'avg':
final_pred = z_score_convert(np.mean(y_preds))
else:
unique_counts = np.unique(y_preds, return_counts=True)
max_val = unique_counts[1][np.argmax(unique_counts[1])]
max_entries = []
for i, value in enumerate(unique_counts[1]):
if value == max_val:
max_entries.append(unique_counts[0][i])
amount_most = len(max_entries)
if amount_most >= 2:
most_id = random.randint(0, amount_most - 1)
final_pred = max_entries[most_id]
else:
final_pred = max_entries[0]
y_true.append(video_kg_class)
y_pred.append(final_pred)
class_vals = list(precision_recall_fscore_support(y_true, y_pred, average=None, labels=['Low', 'Moderate', 'High']))
overall = list(precision_recall_fscore_support(y_true, y_pred, average='macro'))
overall[2] = 2 / (1 / overall[0] + 1 / overall[1])
show_kg_prediction_stats(class_vals, overall, y_true, y_pred)
def create_folders():
if not os.path.exists('./feature_selection'):
os.makedirs('./feature_selection')
if not os.path.exists('./feature_selection'):
os.makedirs('./feature_selection/feature_importance')
if not os.path.exists('./feature_selection/feature_importance'):
os.makedirs('./feature_selection/feature_importance')
if not os.path.exists('./feature_selection/all'):
os.makedirs('./feature_selection/all')
if not os.path.exists('./feature_selection/all/train'):
os.makedirs('./feature_selection/all/train')
if not os.path.exists('./feature_selection/all/test'):
os.makedirs('./feature_selection/all/test')
if not os.path.exists('./feature_selection/avg'):
os.makedirs('./feature_selection/avg')
if not os.path.exists('./feature_selection/avg/train'):
os.makedirs('./feature_selection/avg/train')
if not os.path.exists('./feature_selection/avg/test'):
os.makedirs('./feature_selection/avg/test')
def main():
args = parse_arguments()
path = args.path
# check if commands are correct
if args.method.lower() not in ['python', 'weka', 'avg_python', 'person_id', 'video_id1', 'video_id2']:
print('The choosen method has to be python, avg_python, person_id, video_id1, video_id2 or weka')
return
if args.filter.lower() not in ['amount', 'threshold', 'influence']:
print('The choosen filter has to be amount, threshold or influence')
return
if args.feature_importance.lower() not in ['pearson', 'permutation', 'permutation_rfpimp', 'shap', 'drop_column']:
print('The choosen feature importance algorithm has to be pearson, permutation, permutation_rfpimp, '
'shap or drop_column')
return
fi_method = args.feature_importance
create_folders()
if args.method.lower() == 'weka':
print('Use weka method to get sets')
all_features = preprocess_csv(''.join([path, 'all_features.csv']))
text_features = preprocess_csv(''.join([path, 'text_features.csv']))
multimedia_features = preprocess_csv(''.join([path, 'multimedia_features.csv']))
create_csvs_weka(args.k_fold, 'all_features', all_features)
create_csvs_weka(args.k_fold, 'text_features', text_features)
create_csvs_weka(args.k_fold, 'multimedia_features', multimedia_features)
elif args.method.lower() == 'python':
print(f'Use python and the feature importance method "{args.feature_importance.lower()}" to get sets')
all_features = preprocess_csv(''.join([path, 'all_features.csv']))
text_features = preprocess_csv(''.join([path, 'text_features.csv']))
multimedia_features = preprocess_csv(''.join([path, 'multimedia_features.csv']))
slide_embd = preprocess_csv(''.join([path, 'slide_embedding.csv']))
transcript_embd = preprocess_csv(''.join([path, 'transcript_embedding.csv']))
non_relevant = get_non_relevant_features(all_features[0], all_features[1])
data_split(args.k_fold, 'all_features', all_features, args.filter.lower(), slide_embd, transcript_embd,
non_relevant, fi_method)
data_split(args.k_fold, 'text_features', text_features, args.filter.lower(), slide_embd, transcript_embd,
non_relevant, fi_method)
data_split(args.k_fold, 'multimedia_features', multimedia_features, args.filter.lower(), slide_embd,
transcript_embd, non_relevant, fi_method)
embedding_split(args.k_fold, slide_embd, transcript_embd)
elif args.method.lower() == 'avg_python':
print(
f'Use avg dataset with python and the feature importance method "{args.feature_importance.lower()}" to get sets')
all_features = preprocess_csv(''.join([path, 'all_features.csv']), True)
text_features = preprocess_csv(''.join([path, 'text_features.csv']), True)
multimedia_features = preprocess_csv(''.join([path, 'multimedia_features.csv']), True)
slide_embd = preprocess_csv(''.join([path, 'slide_embedding.csv']), True)
transcript_embd = preprocess_csv(''.join([path, 'transcript_embedding.csv']), True)
non_relevant = get_non_relevant_features(all_features[0], all_features[1])
avg_split(args.k_fold, 'all_features', all_features, args.filter.lower(), slide_embd, transcript_embd,
non_relevant,
fi_method)
avg_split(args.k_fold, 'text_features', text_features, args.filter.lower(), slide_embd, transcript_embd,
non_relevant,
fi_method)
avg_split(args.k_fold, 'multimedia_features', multimedia_features, args.filter.lower(), slide_embd,
transcript_embd,
non_relevant, fi_method)
elif args.method.lower() == 'person_id':
print('Calculate knowledge gain prediction with users only')
all_features = preprocess_csv(''.join([path, 'all_features.csv']), kg_str='Knowledge_Gain')
user_kg_prediction('person_id', all_features)
elif args.method.lower() == 'video_id1':
all_features = preprocess_csv(''.join([path, 'all_features.csv']), kg_str='Knowledge_Gain')
all_features_avg = preprocess_csv(''.join([path, 'all_features.csv']), True, kg_str='Knowledge_Gain')
video_kg_prediction('video_id1', all_features, all_features_avg)
elif args.method.lower() == 'video_id2':
all_features = preprocess_csv(''.join([path, 'all_features.csv']), kg_str='Knowledge_Gain_Level')
all_features_avg = preprocess_csv(''.join([path, 'all_features.csv']), True, kg_str='Knowledge_Gain_Level')
video_kg_prediction('video_id2', all_features, all_features_avg, 'majority')
if __name__ == "__main__":
main()
| 38,354 | 51.903448 | 139 | py |
mooc_knowledge_gain | mooc_knowledge_gain-main/feature_extraction/embedding.py | from sentence_transformers import SentenceTransformer, util
from numpy import add
from torch import Tensor
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
def get_model(name):
""" Loads the SentenceTransformer for the specific model. If the model is not stored it will be downloaded
:param name: the name of the model
:return: specific SentenceTransformer
"""
# best performance has roberta-large-nli-stsb-mean-tokens
return SentenceTransformer(name)
def get_embeddings(sentences, model):
""" Creates the sentence embeddings from the sentences
:param sentences: the sentences as array to calculate the embeddings from
:param model: the SentenceTransformer model
:return: Array of embeddings as tensor-object
"""
embeddings = []
for embedding in model.encode(sentences, convert_to_tensor=True):
embeddings.append(embedding)
return embeddings
def calculate_divisor(n):
""" Calculates how many tuples of sentence will be compared. This information is used as divisor to calculate
average values
:param n: the amount of sentences
:return: the amount of comparisons
"""
divisor = 1
n = (n - 1)
while n > 1: # do not add 1 to the divisor because it starts with 1
divisor += n
n = (n - 1)
return divisor
def get_similarity(embeddings):
""" This function calculates every similarity between two sentences and takes the average one
:param embeddings: array of embeddings
:return: average similarity
"""
length = len(embeddings)
if length == 0: # when array is empty there is no similarity
return 0.0
elif length == 1:
return 1.0
else:
i = 0
similarity = 0
while i < (length - 1): # iterates through the array of embeddings
z = i + 1
other_embeddings = []
while z < length: # iterates through all following embeddings to form a pair with the current one
other_embeddings.append(embeddings[z])
score = util.pytorch_cos_sim(embeddings[i], embeddings[z])[0] # consinus sim to show similarity
similarity += score.item()
z += 1
i += 1
return round(similarity / calculate_divisor(length), 2) # rounding for better representation
def calculate_avg_embedding(embeddings):
""" Calculates the average sentence embedding/vector of all sentence embeddings
:param embeddings: Array of embeddings to calculate the average embedding
:return: Average embedding (array of all coordinates)
"""
avg = [0] * 1024 # default vector
if not len(embeddings): # if length is 0 return default to avoid division with zero
return avg
for emb in embeddings:
avg = add(avg, emb.cpu().numpy()) # adds complete embedding to vector (numpy because embedding was an tensor object)
# divides every predictor with the amount of embeddings to get the average number
return [value / len(embeddings) for value in avg]
def reduce_dimension(embeddings):
""" Reduce the dimension of embeddings so that they can be used in classification. A high dimension of 1024 would
lead to a bad representation of the correlations. Other features would be drowned out by their small number
:param embeddings: the embeddings with high dimension
:return: reduced dimension embeddings
"""
n = 16 # used for experiments
print("Reduce Embeddings to dimension " + str(n))
pca = PCA(n_components=n)
x = StandardScaler().fit_transform(embeddings)
values = pca.fit_transform(x)
information = 0 # default amount of information
for value in pca.explained_variance_ratio_:
information += value
print("Reduced embeddings. Embeddings contain about " + str(round(information, 4) * 100) + " % information")
result = []
for embd in values.tolist():
# rounds values to avoid wrong representation of floating number
result.append([round(value, 6) for value in embd])
return result
def process_video_embeddings(slides, transcript, model):
""" Calculates the embeddings for the slides and the transcript. After that the features are going to be calculated
:param slides: An array with all lines of the slides (Array of Strings)
:param transcript: An array with all sentences of the transcript (Array of Strings)
:return: The calculated features
"""
embeddings_slides = get_embeddings(slides, model)
embeddings_transcript = get_embeddings(transcript, model)
similarity_slides = round(get_similarity(embeddings_slides), 6)
similarity_transcript = round(get_similarity(embeddings_transcript), 6)
diff_similarity = round(abs(similarity_slides - similarity_transcript), 6)
avg_slides = calculate_avg_embedding(embeddings_slides)
avg_slides = [round(value, 6) for value in avg_slides]
avg_transcript = calculate_avg_embedding(embeddings_transcript)
avg_transcript = [round(value, 6) for value in avg_transcript]
avg_vectors = [Tensor(avg_slides), Tensor(avg_transcript)]
similarity_vector = round(get_similarity(avg_vectors), 6)
features = [similarity_slides, similarity_transcript, diff_similarity, similarity_vector], avg_slides, \
avg_transcript
return features
| 5,370 | 42.314516 | 125 | py |
mooc_knowledge_gain | mooc_knowledge_gain-main/feature_extraction/main.py | import os
import files
import sys
import pre_processor
import client
def start_nlp():
"""" Start stanza and corenlp server to have a preload.
"""
try:
sta = pre_processor.start_stanza('en', 'tokenize,mwt,pos,lemma,ner')
cli = client.init_client()
client.start_client(cli)
return sta, cli
except:
pre_processor.init_stanza()
sta = pre_processor.start_stanza('en', 'tokenize,mwt,pos,lemma,ner')
cli = client.init_client()
client.start_client(cli)
return sta, cli
def extract_text():
""" Extract the text from the pdf slides (convert them to a .txt file) and
stores them in ./Data/Slides-Processed
"""
files.remove_files(files.load_files('./Data/Slides-Processed')) # Removes previous converted files
data = files.load_files('./Data/Slides')
for f in data:
files.process_pdf(f)
def get_features():
""" Starts the the program. Take all files and calculates the features for each file. After that the features are
stored in a csv file.
"""
if not os.path.exists('./Features/'):
os.makedirs('./Features/')
if not os.path.exists('./Data/Slides-Processed'):
os.makedirs('./Data/Slides-Processed')
sta, cli = start_nlp()
extract_text()
slides = files.load_files('./Data/Slides-Processed')
transcripts = files.load_files('./Data/Transcripts')
if len(slides) != len(transcripts):
print("Error: Every video needs slides and transcript. Please try again.")
sys.exit(1)
files.process_files(list(zip(slides, transcripts)), sta, cli)
# Start of the program
if __name__ == "__main__":
get_features()
| 1,693 | 29.25 | 117 | py |
mooc_knowledge_gain | mooc_knowledge_gain-main/feature_extraction/SortedCollection.py | from bisect import bisect_left, bisect_right
# This file was suggested in https://github.com/pymupdf/PyMuPDF/wiki/How-to-extract-text-in-natural-reading-order-(up2down,-left2right)
# Downloaded from https://code.activestate.com/recipes/577197-sortedcollection/
class SortedCollection(object):
'''Sequence sorted by a key function.
SortedCollection() is much easier to work with than using bisect() directly.
It supports key functions like those use in sorted(), min(), and max().
The result of the key function call is saved so that keys can be searched
efficiently.
Instead of returning an insertion-point which can be hard to interpret, the
five find-methods return a specific item in the sequence. They can scan for
exact matches, the last item less-than-or-equal to a key, or the first item
greater-than-or-equal to a key.
Once found, an item's ordinal position can be located with the index() method.
New items can be added with the insert() and insert_right() methods.
Old items can be deleted with the remove() method.
The usual sequence methods are provided to support indexing, slicing,
length lookup, clearing, copying, forward and reverse iteration, contains
checking, item counts, item removal, and a nice looking repr.
Finding and indexing are O(log n) operations while iteration and insertion
are O(n). The initial sort is O(n log n).
The key function is stored in the 'key' attibute for easy introspection or
so that you can assign a new key function (triggering an automatic re-sort).
In short, the class was designed to handle all of the common use cases for
bisect but with a simpler API and support for key functions.
>>> from pprint import pprint
>>> from operator import itemgetter
>>> s = SortedCollection(key=itemgetter(2))
>>> for record in [
... ('roger', 'young', 30),
... ('angela', 'jones', 28),
... ('bill', 'smith', 22),
... ('david', 'thomas', 32)]:
... s.insert(record)
>>> pprint(list(s)) # show records sorted by age
[('bill', 'smith', 22),
('angela', 'jones', 28),
('roger', 'young', 30),
('david', 'thomas', 32)]
>>> s.find_le(29) # find oldest person aged 29 or younger
('angela', 'jones', 28)
>>> s.find_lt(28) # find oldest person under 28
('bill', 'smith', 22)
>>> s.find_gt(28) # find youngest person over 28
('roger', 'young', 30)
>>> r = s.find_ge(32) # find youngest person aged 32 or older
>>> s.index(r) # get the index of their record
3
>>> s[3] # fetch the record at that index
('david', 'thomas', 32)
>>> s.key = itemgetter(0) # now sort by first name
>>> pprint(list(s))
[('angela', 'jones', 28),
('bill', 'smith', 22),
('david', 'thomas', 32),
('roger', 'young', 30)]
'''
def __init__(self, iterable=(), key=None):
self._given_key = key
key = (lambda x: x) if key is None else key
decorated = sorted((key(item), item) for item in iterable)
self._keys = [k for k, item in decorated]
self._items = [item for k, item in decorated]
self._key = key
def _getkey(self):
return self._key
def _setkey(self, key):
if key is not self._key:
self.__init__(self._items, key=key)
def _delkey(self):
self._setkey(None)
key = property(_getkey, _setkey, _delkey, 'key function')
def clear(self):
self.__init__([], self._key)
def copy(self):
return self.__class__(self, self._key)
def __len__(self):
return len(self._items)
def __getitem__(self, i):
return self._items[i]
def __iter__(self):
return iter(self._items)
def __reversed__(self):
return reversed(self._items)
def __repr__(self):
return '%s(%r, key=%s)' % (
self.__class__.__name__,
self._items,
getattr(self._given_key, '__name__', repr(self._given_key))
)
def __reduce__(self):
return self.__class__, (self._items, self._given_key)
def __contains__(self, item):
k = self._key(item)
i = bisect_left(self._keys, k)
j = bisect_right(self._keys, k)
return item in self._items[i:j]
def index(self, item):
'Find the position of an item. Raise ValueError if not found.'
k = self._key(item)
i = bisect_left(self._keys, k)
j = bisect_right(self._keys, k)
return self._items[i:j].index(item) + i
def count(self, item):
'Return number of occurrences of item'
k = self._key(item)
i = bisect_left(self._keys, k)
j = bisect_right(self._keys, k)
return self._items[i:j].count(item)
def insert(self, item):
'Insert a new item. If equal keys are found, add to the left'
k = self._key(item)
i = bisect_left(self._keys, k)
self._keys.insert(i, k)
self._items.insert(i, item)
def insert_right(self, item):
'Insert a new item. If equal keys are found, add to the right'
k = self._key(item)
i = bisect_right(self._keys, k)
self._keys.insert(i, k)
self._items.insert(i, item)
def remove(self, item):
'Remove first occurence of item. Raise ValueError if not found'
i = self.index(item)
del self._keys[i]
del self._items[i]
def find(self, k):
'Return first item with a key == k. Raise ValueError if not found.'
i = bisect_left(self._keys, k)
if i != len(self) and self._keys[i] == k:
return self._items[i]
raise ValueError('No item found with key equal to: %r' % (k,))
def find_le(self, k):
'Return last item with a key <= k. Raise ValueError if not found.'
i = bisect_right(self._keys, k)
if i:
return self._items[i-1]
raise ValueError('No item found with key at or below: %r' % (k,))
def find_lt(self, k):
'Return last item with a key < k. Raise ValueError if not found.'
i = bisect_left(self._keys, k)
if i:
return self._items[i-1]
raise ValueError('No item found with key below: %r' % (k,))
def find_ge(self, k):
'Return first item with a key >= equal to k. Raise ValueError if not found'
i = bisect_left(self._keys, k)
if i != len(self):
return self._items[i]
raise ValueError('No item found with key at or above: %r' % (k,))
def find_gt(self, k):
'Return first item with a key > k. Raise ValueError if not found'
i = bisect_right(self._keys, k)
if i != len(self):
return self._items[i]
raise ValueError('No item found with key above: %r' % (k,))
# --------------------------- Simple demo and tests -------------------------
if __name__ == '__main__':
def ve2no(f, *args):
'Convert ValueError result to -1'
try:
return f(*args)
except ValueError:
return -1
def slow_index(seq, k):
'Location of match or -1 if not found'
for i, item in enumerate(seq):
if item == k:
return i
return -1
def slow_find(seq, k):
'First item with a key equal to k. -1 if not found'
for item in seq:
if item == k:
return item
return -1
def slow_find_le(seq, k):
'Last item with a key less-than or equal to k.'
for item in reversed(seq):
if item <= k:
return item
return -1
def slow_find_lt(seq, k):
'Last item with a key less-than k.'
for item in reversed(seq):
if item < k:
return item
return -1
def slow_find_ge(seq, k):
'First item with a key-value greater-than or equal to k.'
for item in seq:
if item >= k:
return item
return -1
def slow_find_gt(seq, k):
'First item with a key-value greater-than or equal to k.'
for item in seq:
if item > k:
return item
return -1
from random import choice
pool = [1.5, 2, 2.0, 3, 3.0, 3.5, 4, 4.0, 4.5]
for i in range(500):
for n in range(6):
s = [choice(pool) for i in range(n)]
sc = SortedCollection(s)
s.sort()
for probe in pool:
assert repr(ve2no(sc.index, probe)) == repr(slow_index(s, probe))
assert repr(ve2no(sc.find, probe)) == repr(slow_find(s, probe))
assert repr(ve2no(sc.find_le, probe)) == repr(slow_find_le(s, probe))
assert repr(ve2no(sc.find_lt, probe)) == repr(slow_find_lt(s, probe))
assert repr(ve2no(sc.find_ge, probe)) == repr(slow_find_ge(s, probe))
assert repr(ve2no(sc.find_gt, probe)) == repr(slow_find_gt(s, probe))
for i, item in enumerate(s):
assert repr(item) == repr(sc[i]) # test __getitem__
assert item in sc # test __contains__ and __iter__
assert s.count(item) == sc.count(item) # test count()
assert len(sc) == n # test __len__
assert list(map(repr, reversed(sc))) == list(map(repr, reversed(s))) # test __reversed__
assert list(sc.copy()) == list(sc) # test copy()
sc.clear() # test clear()
assert len(sc) == 0
sd = SortedCollection('The quick Brown Fox jumped'.split(), key=str.lower)
assert sd._keys == ['brown', 'fox', 'jumped', 'quick', 'the']
assert sd._items == ['Brown', 'Fox', 'jumped', 'quick', 'The']
assert sd._key == str.lower
assert repr(sd) == "SortedCollection(['Brown', 'Fox', 'jumped', 'quick', 'The'], key=lower)"
sd.key = str.upper
assert sd._key == str.upper
assert len(sd) == 5
assert list(reversed(sd)) == ['The', 'quick', 'jumped', 'Fox', 'Brown']
for item in sd:
assert item in sd
for i, item in enumerate(sd):
assert item == sd[i]
sd.insert('jUmPeD')
sd.insert_right('QuIcK')
assert sd._keys ==['BROWN', 'FOX', 'JUMPED', 'JUMPED', 'QUICK', 'QUICK', 'THE']
assert sd._items == ['Brown', 'Fox', 'jUmPeD', 'jumped', 'quick', 'QuIcK', 'The']
assert sd.find_le('JUMPED') == 'jumped', sd.find_le('JUMPED')
assert sd.find_ge('JUMPED') == 'jUmPeD'
assert sd.find_le('GOAT') == 'Fox'
assert sd.find_ge('GOAT') == 'jUmPeD'
assert sd.find('FOX') == 'Fox'
assert sd[3] == 'jumped'
assert sd[3:5] ==['jumped', 'quick']
assert sd[-2] == 'QuIcK'
assert sd[-4:-2] == ['jumped', 'quick']
for i, item in enumerate(sd):
assert sd.index(item) == i
try:
sd.index('xyzpdq')
except ValueError:
pass
else:
assert 0, 'Oops, failed to notify of missing value'
sd.remove('jumped')
assert list(sd) == ['Brown', 'Fox', 'jUmPeD', 'quick', 'QuIcK', 'The']
import doctest
from operator import itemgetter
print(doctest.testmod())
| 11,327 | 35.076433 | 135 | py |
mooc_knowledge_gain | mooc_knowledge_gain-main/feature_extraction/pre_processor.py | import stanza
import srt
import re
def init_stanza():
""" Initialise stanza for the first time (downloads the important data)
"""
stanza.download('en')
def start_stanza(lang, processors):
""" Starts stanza to use it later
:param lang: The language which stanza should process
:param processors: The types how stanza should process the text
:return: Initialized stanza pipeline to process text
"""
return stanza.Pipeline(lang=lang, processors=processors)
def get_words(sentence, sta):
""" Calculates the tokens and pos-tags of a sentence
:param sentence: The sentence which need to be processed
:param sta: Stanza object which processes the sentence
:return: Words object that contains tokens and pos-tags
"""
words = []
processed = sta(sentence)
for sentence in processed.sentences: # Sometimes stanza interprets the sentence as multiple sentences
words += sentence.words
return words
def get_seconds(start, end):
""" Calculates the speech time of a subtitle
:param start: Start time of subtitle
:param end: End time of subtitle (finished)
:return: The speech time of a subtitle
"""
begin = start.seconds + (start.microseconds / 1000000)
end = end.seconds + (end.microseconds / 1000000)
diff = end - begin
return diff
def split_sentences(text, sta):
"""" Splits sentences into an array and change the apostrophe of every sentences for better processing.
:param text: text of the transcripts
:param sta: stanza object to seperate into sentences
:return: array of sentences (text of transcript splitted into sentences)
"""
sentences = []
data = sta(text)
for sentence in data.sentences:
# converts apostrophes for better calculations (for example for time tenses)
sentence.text = convert_apostrophe_nlp(sentence.text, sentence.words)
sentences.append(sentence)
return sentences
def concat_colon(sentences):
"""
Merges two sentence together when the first one ends with :. : is not the end of sentence in many readability
scales.
:param sentences: sentences to modify
:return: modified sentences
"""
sentences_text = [] # stores the text of every sentence
was_colon = False
for sentence in sentences:
text = sentence.text
if was_colon: # if the previous sentence ends with a colon merge it with the current one
pos = len(sentences_text) - 1
if not text.endswith(':'):
was_colon = False
sentences_text[pos] = sentences_text[pos] + " " + text
elif text.endswith(':'):
was_colon = True
sentences_text.append(text)
else:
sentences_text.append(text)
return sentences_text
def get_srt(text, sta):
""" Extracts the components of the srt-file (text/sentences, time and amount of subtitles)
:param text: the complete text of the srt-file (contains subtitles and time)
:param sta: stanza object to realise sentence separation
:return: sentences, complete time of the subtitles and amount of them
"""
if not text:
return '', 0, 0
subtitles = list(srt.parse(text))
time = 0.0
amount = len(subtitles)
parts = ''
for subtitle in subtitles:
time += get_seconds(subtitle.start, subtitle.end)
# converts apostrophes for better calculations . Removes linebreak for better merging
parts += " " + convert_apostrophe(subtitle.content.replace("\n", " "))
splitted = split_sentences(parts, sta)
sentences = concat_colon(splitted)
return sentences, time, amount
def change_s_apostrophe(sentence, words):
""" Change the parts of a sentence that contains 's. To know how to rewrite 's the method checks which wordtypes
comes after 's. So a decision can be made
:param sentence: sentence to modify
:param words: array with words/tokens and their wordtype from the sentence in correct order
:return: modified sentence
"""
contains = False
for word in words:
if contains:
if word.text.endswith("ing") or "JJ" in word.xpos or "RB" in word.xpos or "TO" in word.xpos or \
"CD" in word.xpos or "DT" in word.xpos:
sentence, contains = sentence.replace("'s", " is", 1), False
elif "VBN" == word.xpos:
sentence, contains = sentence.replace("'s", " has", 1), False
elif "NN" in word.xpos:
pass
else:
sentence, contains = sentence.replace("'s", " is", 1), False # default when no pattern match
elif "'s" in word.text:
contains = True
if contains:
sentence, contains = sentence.replace("'s", " is", 1), False
return sentence
def change_d_apostrophe(sentence, words):
""" Change the parts of a sentence that contains 's. To know how to rewrite 'd the method checks which wordtypes
comes after 'd. So a decision can be made
:param sentence: sentence to modify
:param words: array with words/tokens and their wordtype from the sentence in correct order
:return: modified sentence
"""
contains = False
is_wh = False
for word in words:
# print(word.xpos)
if contains:
if "VB" == word.xpos:
sentence, contains = sentence.replace("'d", " would", 1), False
elif "VBN" == word.xpos:
sentence, contains = sentence.replace("'d", " had", 1), False
elif "have" in word.text:
sentence, contains = sentence.replace("'d", " would", 1), False
elif "'d" in word.text:
sentence = sentence.replace("'d", " would", 1) # could be would or had
elif "better" in word.text:
sentence, contains = sentence.replace("'d", " had", 1), False
elif "rather" in word.text:
sentence, contains = sentence.replace("'d", " had", 1), False
elif "'d" in word.text:
if is_wh:
sentence = sentence.replace("'d", " did", 1)
is_wh = False
else:
contains = True
elif "W" == word.xpos[0]:
is_wh = True
else:
is_wh = False
if contains:
sentence = sentence.replace("'d", " had", 1)
return sentence
def convert_apostrophe(sentence):
""" Converts apostrophes that are always the same (no need for interpretation)
:param sentence: Sentence that has to be processed
:return: modified sentence
"""
sentence = sentence.replace("’", "'") # the char ’ made errors.
repl = [("'m", " am"), ("'re", " are"), ("'ve", " have"), ("he's", "he is"), ("she's", "she is"), ("it's", "it is"),
("won't", "will not"), ("can't", "can not"), ("'ll", " will"), ("let's", "let us"), ("n't", " not"),
("who's", "who is"), ("where's", "where is"), ("how's", "how is"), ("' ", " ")]
for a, b in repl:
sentence = re.sub(a, b, sentence, flags=re.IGNORECASE)
return sentence
def convert_apostrophe_nlp(sentence, words):
""" Converts apostrophes that needs to be interpreted with NLP
:param sentence: Sentence that has to be processed
:param words: Object that contains words and their pos-tag
:return: modified sentence
"""
return change_s_apostrophe(change_d_apostrophe(sentence, words), words)
| 7,480 | 37.761658 | 120 | py |
mooc_knowledge_gain | mooc_knowledge_gain-main/feature_extraction/tenses.py |
class Tenses:
""" Class that calculate time tense specific features
"""
def __init__(self):
""" Initialise class-object with default values
"""
# Present
self.sim_pres = 0 # Simple Present
self.pres_prog = 0 # Present Progressive (includes going-to future because it is very similar)
self.pres_perf = 0 # Present Perfect
self.pres_perf_prog = 0 # Present Perfect Progressive
# Past
self.sim_pas = 0 # Simple Past
self.pas_prog = 0 # Past Progressive
self.pas_perf = 0 # Past Perfect
self.pas_perf_prog = 0 # Past Perfect Progressive
# Future
self.will = 0 # Will Future
self.fu_prog = 0 # Future Progressive
self.fu_perf = 0 # Future Perfect
self.fu_perf_prog = 0 # Future Perfect Progressive
# Conditional
self.cond_sim = 0 # Conditional Simple
self.cond_prog = 0 # Conditonal Progressive
self.cond_perf = 0 # Condtional Perfect
self.cond_perf_prog = 0 # Conditonal Perfect Progressive
# Participle
self.gerund = 0 # Gerund , Present Participle
self.perf_part = 0 # Perfect Participle
# Infinitive
self.inf = 0 # Present Infinitive
self.perf_inf = 0 # perfect Infinitive
# tense version
self.active = 0
self.passive = 0
# help variable
self.is_passive = False # shows if current tense is passive or not
self.was_prog = False # Shows if previous was progressive
self.was_perf = False # Shows if previous was perfect
def get_features(self):
""" Generates array of the time tenses
:return: Time tenses as array
"""
amount = self.active + self.passive
if not amount:
amount = 1
features = []
for element in self.__dict__.items():
if element[0] not in ["is_passive", "was_prog", "was_perf"]:
features.append(round(element[1], 6)) # no ratio
features.append(round(element[1] / amount, 6)) # ratio
return features
def print_tenses(self):
""" For tests to see if verb tenses are correct
"""
print("Active:" + str(self.active))
print("Passive:" + str(self.passive))
print()
print("Present:")
print("Simple Present: " + str(self.sim_pres))
print("Present Progressiv: " + str(self.pres_prog))
print("Present Perfect: " + str(self.pres_perf))
print("Present Perfect Progressive: " + str(self.pres_perf_prog))
print()
print("Past:")
print("Simple Past: " + str(self.sim_pas))
print("Past Progressiv: " + str(self.pas_prog))
print("Past Perfect: " + str(self.pas_perf))
print("Past Perfect Progressive: " + str(self.pas_perf_prog))
print()
print("Future:")
print("Will-Future: " + str(self.will))
print("Future Progressive: " + str(self.fu_prog))
print("Future Perfect: " + str(self.fu_perf))
print("Future Perfect Progressive: " + str(self.fu_perf_prog))
print()
print("Conditional:")
print("Conditional Simple: " + str(self.cond_sim))
print("Conditional Progressive: " + str(self.cond_prog))
print("Conditional Perfect: " + str(self.cond_perf))
print("Conditional Perfect Progressive: " + str(self.cond_perf_prog))
print()
print("Infinitive:")
print("Present Infinitive: " + str(self.inf))
print("Perfect Infinitive: " + str(self.perf_inf))
print()
print("Participle: ")
print("Gerund/Present Participle: " + str(self.gerund))
print("Perfect Participle: " + str(self.perf_part))
print()
def __get_time(self, time, token, pos):
""" Checks which time tense it could be and set them to previous token, previous pos for the next token
:param time: array of booleans to set the current time
:param token: current token to analyze
:param pos: the xpos-tag of the token
:return: modified time-array, previous token and previous pos tag
"""
prev_token = ''
prev_pos = ''
if pos == 'MD':
if token == 'will' or token == "shall":
time[2] = True # Future
prev_token, prev_pos = 'will/shall', pos
elif token == 'would':
time[3] = True # Conditional
prev_token, prev_pos = 'would', pos
else:
pass
elif pos == 'TO':
time[4] = True # Infinitve
prev_token, prev_pos = token, pos
elif pos in ["VBZ", "VBP"]:
time[0] = True # Present
prev_token, prev_pos = token, pos
elif pos == "VB":
self.sim_pres += 1
self.active += 1
elif pos == "VBD":
time[1] = True # Past
prev_token, prev_pos = token, pos
elif pos == "VBN": # for example formalized model (formalized is here like an adjective)
pass
elif pos == "VBG": # Participle
time[5] = True
prev_token, prev_pos = token, pos
return time, prev_token, prev_pos
def __get_present(self, time, prev_token, prev_pos, token, pos):
""" The present can have different forms. This function checks which tense could be possible and if one tense
is recognized, it will be incremented. At the end of the function the current token and current pos tag (xpos)
are set to the previous ones. Possible tenses are:
Simple Present -> Active: VB/VBZ/VBP; do/does + VB; Passive: am/is/are + VBN
Present Progressive -> Active: am/is/are + VBG; Passive: is + being + VBN
Present Perfect -> Active: has/have + VBN; Passive: has/have + been + VBN
Present Perfect Progressive -> Active: has/have + been + VBG
:param time: time-array that contains boolean. The current tense has the value True (here present)
:param prev_token: the previous token to identify the possible tenses
:param prev_pos: the previous pos tag (xpos) to identify the possible tenses
:param token: the current token
:param pos: the current pos tag (xpos)
:return: time-array, the previous token and the previous pos tag (xpos)
"""
if prev_token in ["do", "does"] and pos == "VB":
prev_token, prev_pos = token, pos
elif (prev_token in ["is", "am", "are"] or prev_pos == "VBN") and pos == "VBG":
prev_token, prev_pos = token, pos
elif prev_token in ["is", "am", "are"] and pos == "VBN":
self.is_passive = True
prev_token, prev_pos = token, pos
elif prev_token in ["has", "have"] and pos == "VBN":
self.was_perf = True
prev_token, prev_pos = token, pos
elif prev_token == "been" and self.was_perf and pos == "VBN":
self.is_passive = True
prev_token, prev_pos = token, pos
elif prev_pos == "VBG" and pos == "VBN":
self.is_passive = True
self.was_prog = True
prev_token, prev_pos = token, pos
else:
if prev_pos == "VBN":
if self.is_passive:
if self.was_prog:
self.pres_prog += 1
elif self.was_perf:
self.pres_perf += 1
else:
self.sim_pres += 1
self.passive += 1
self.is_passive = False
self.was_prog = False
self.was_perf = False
else:
self.pres_perf += 1
self.active += 1
time[0] = False # set present to False to check the next tense
time, prev_token, prev_pos = self.__get_time(time, token, pos)
elif prev_pos == "VBG":
if self.was_perf:
self.pres_perf_prog += 1
self.active += 1
else:
self.pres_prog += 1
self.active += 1
self.was_perf = False
time[0] = False # set present to False to check the next tense
time, prev_token, prev_pos = self.__get_time(time, token, pos)
else:
self.sim_pres += 1
self.active += 1
time[0] = False # set present to False to check the next tense
time, prev_token, prev_pos = self.__get_time(time, token, pos)
return time, prev_token, prev_pos
def __get_past(self, time, prev_token, prev_pos, token, pos):
""" The past can have different forms. This function checks which tense could be possible and if one tense
is recognized, it will be incremented. At the end of the function the current token and current pos tag (xpos)
are set to the previous ones. Possible tenses are:
Simple Past -> Active: VBD; did + VB; Passive: was/were + VBN
Past Progressive -> Active: was/were + VBG; Passive: was/were + being + VBN
Past Perfect -> Active: had + VBN; Passive: had + been + VBN
Past Perfect Progressive -> Active: had + been + VBG;
:param time: time-array that contains boolean. The current tense has the value True (here past)
:param prev_token: the previous token to identify the possible tenses
:param prev_pos: the previous pos tag (xpos) to identify the possible tenses
:param token: the current token
:param pos: the current pos tag (xpos)
:return: time-array, the previous token and the previous pos tag (xpos)
"""
if prev_token == "did" and pos == "VB":
prev_token, prev_pos = token, pos
elif (prev_token in ["was", "were"] or prev_pos == "VBN") and pos == "VBG":
prev_token, prev_pos = token, pos
elif prev_token in ["was", "were"] and pos == "VBN":
self.is_passive = True
prev_token, prev_pos = token, pos
elif prev_token == "had" and pos == "VBN":
self.was_perf = True
prev_token, prev_pos = token, pos
elif prev_token == "been" and self.was_perf and pos == "VBN":
self.is_passive = True
prev_token, prev_pos = token, pos
elif prev_pos == "VBG" and pos == "VBN":
self.is_passive = True
self.was_prog = True
prev_token, prev_pos = token, pos
else:
if prev_pos == "VBN":
if self.is_passive:
if self.was_prog:
self.pas_prog += 1
elif self.was_perf:
self.pas_perf += 1
else:
self.sim_pas += 1
self.passive += 1
self.was_prog = False
self.is_passive = False
else:
self.pas_perf += 1
self.active += 1
time[1] = False # set past to False to check the next tense
time, prev_token, prev_pos = self.__get_time(time, token, pos)
elif prev_pos == "VBG":
if self.was_perf:
self.pas_perf_prog += 1
self.active += 1
else:
self.pas_prog += 1
self.active += 1
self.was_perf = False
time[1] = False # set past to False to check the next tense
time, prev_token, prev_pos = self.__get_time(time, token, pos)
else:
self.sim_pas += 1
self.active += 1
time[1] = False # set past to False to check the next tense
time, prev_token, prev_pos = self.__get_time(time, token, pos)
return time, prev_token, prev_pos
def __get_future(self, time, prev_token, prev_pos, token, pos):
""" The future can have different forms. This function checks which tense could be possible and if one tense
is recognized, it will be incremented. At the end of the function the current token and current pos tag (xpos)
are set to the previous ones. Possible tenses are:
Will Future -> Active: will/shall + VB; Passive: will/shall + be + VBN
Future Progressive -> Active: will/shall + be + VBG
Future Perfect -> Active: will/shall + have + VBN
Future Perfect Progressive -> Active: will/shall + have + been + VBG
Conditional Simple -> would + VB; Passive: would + be + VBN
:param time: time-array that contains boolean. The current tense has the value True (here future)
:param prev_token: the previous token to identify the possible tenses
:param prev_pos: the previous pos tag (xpos) to identify the possible tenses
:param token: the current token
:param pos: the current pos tag (xpos)
:return: time-array, the previous token and the previous pos tag (xpos)
"""
if prev_token == "will/shall" and pos == "VB":
prev_token, prev_pos = token, pos
elif prev_token == "be" and pos == "VBN":
self.is_passive = True
prev_token, prev_pos = token, pos
elif prev_token == "be" and pos == "VBG":
self.was_prog = True
prev_token, prev_pos = token, pos
elif prev_token == "have" and pos == "VBN":
prev_token, prev_pos = token, pos
elif prev_token == "been" and pos == "VBG":
prev_token, prev_pos = token, pos
else:
if prev_pos == "VB":
self.will += 1
self.active += 1
time[2] = False # set future to False to check the next tense
time, prev_token, prev_pos = self.__get_time(time, token, pos)
elif prev_pos == "VBN":
if self.is_passive:
self.will += 1
self.passive += 1
else:
self.fu_perf += 1
self.active += 1
time[2] = False # set future to False to check the next tense
time, prev_token, prev_pos = self.__get_time(time, token, pos)
elif prev_pos == "VBG":
if self.was_prog:
self.was_prog = False
self.fu_prog += 1
self.active += 1
else:
self.fu_perf_prog += 1
self.active += 1
time[2] = False # set future to False to check the next tense
time, prev_token, prev_pos = self.__get_time(time, token, pos)
else:
self.will += 1
self.active += 1
time[2] = False # set future to False to check the next tense
time, prev_token, prev_pos = self.__get_time(time, token, pos)
return time, prev_token, prev_pos
def __get_conditional(self, time, prev_token, prev_pos, token, pos):
""" The conditional can have different forms. The function checks which tense could be possible and if one tense
is recognized, it will be incremented. At the end of the function the current token and current pos tag (xpos)
are set to the previous ones. Possible tenses are:
Conditional Simple -> Active: would + VB; Passive: would + be + VBN
Conditional Progressive -> Active: would + be + VBG
Conditional Perfect -> Active: would + have + VBN; Passive: would + have + been + VBN
Condtional Perfect Progressive -> Active: would + have + been + VBG
:param time: time-array that contains boolean. The current tense has the value True (here conditional)
:param prev_token: the previous token to identify the possible tenses
:param prev_pos: the previous pos tag (xpos) to identify the possible tenses
:param token: the current token
:param pos: the current pos tag (xpos)
:return: time-array, the previous token and the previous pos tag (xpos)
"""
if prev_token == "would" and pos == "VB":
prev_token, prev_pos = token, pos
elif prev_token == "be" and pos == "VBN":
self.is_passive = True
prev_token, prev_pos = token, pos
elif prev_token == "been" and self.was_perf and pos == "VBN":
self.is_passive = True
prev_token, prev_pos = token, pos
elif prev_token in ["be", "been"] and pos == "VBG":
prev_token, prev_pos = token, pos
elif prev_token == "have" and pos == "VBN":
self.was_perf = True
prev_token, prev_pos = token, pos
else:
if prev_pos == "VBN":
if self.is_passive:
if self.was_perf:
self.cond_perf += 1
else:
self.cond_sim += 1
self.passive += 1
self.is_passive = False
elif self.was_perf:
self.cond_perf += 1
self.active += 1
self.was_perf = False
else:
self.cond_prog += 1
self.active += 1
time[3] = False # set conditional to False to check the next tense
time, prev_token, prev_pos = self.__get_time(time, token, pos)
elif prev_pos == "VBG":
if self.was_perf:
self.cond_perf_prog += 1
self.active += 1
self.was_perf = False
else:
self.cond_prog += 1
self.active += 1
time[3] = False # set conditional to False to check the next tense
time, prev_token, prev_pos = self.__get_time(time, token, pos)
else:
self.cond_sim += 1
self.active += 1
time[3] = False # set conditional to False to check the next tense
time, prev_token, prev_pos = self.__get_time(time, token, pos)
return time, prev_token, prev_pos
def __get_infinitive(self, time, prev_token, prev_pos, token, pos):
""" The infinitive can have different forms. The function checks which tense could be possible and if one tense
is recognized, it will be incremented. At the end of the function the current token and current pos tag (xpos)
are set to the previous ones. Possible tenses are:
Present Infinitive -> Active: to + VB; Passive: to + be + VBN
Perfect Infinitive -> Active: to + have + VBN; Passive: to + have + been + VBN
:param time: time-array that contains boolean. The current tense has the value True (here infinitive)
:param prev_token: the previous token to identify the possible tenses
:param prev_pos: the previous pos tag (xpos) to identify the possible tenses
:param token: the current token
:param pos: the current pos tag (xpos)
:return: time-array, the previous token and the previous pos tag (xpos)
"""
if prev_pos == "TO" and pos == "VB":
prev_token, prev_pos = token, pos
elif prev_token == "be" and pos == "VBN":
self.is_passive = True
prev_token, prev_pos = token, pos
elif prev_token == "been" and pos == "VBN":
self.is_passive = True
self.was_perf = True
prev_token, prev_pos = token, pos
elif prev_token == "have" and pos == "VBN":
prev_token, prev_pos = token, pos
else:
if prev_pos == "VBN":
if self.is_passive:
if self.was_perf:
self.perf_inf += 1
self.passive += 1
else:
self.inf += 1
self.passive += 1
else:
self.perf_inf += 1
self.active += 1
time[4] = False # set infinitive to False to check the next tense
elif prev_pos == "VB":
self.inf += 1
self.active += 1
time[4] = False # set infinitive to False to check the next tense
else:
time[4] = False # set infinitive to False to check the next tense
time, prev_token, prev_pos = self.__get_time(time, token, pos)
return time, prev_token, prev_pos
def __get_participle(self, time, prev_token, prev_pos, token, pos):
""" The participle can have different forms. The function checks which tense could be possible and if one tense
is recognized, it will be incremented. At the end of the function the current token and current pos tag (xpos)
are set to the previous ones. Possible tenses are:
Present Participle -> Active: VBG; Passive: being + VBN
Perfect Participle -> Active: having + VBN; Passive: having + been + VBN
:param time: time-array that contains boolean. The current tense has the value True (here participle)
:param prev_token: the previous token to identify the possible tenses
:param prev_pos: the previous pos tag (xpos) to identify the possible tenses
:param token: the current token
:param pos: the current pos tag (xpos)
:return: time-array, the previous token and the previous pos tag (xpos)
"""
if prev_token == "being" and pos == "VBN":
self.is_passive = True
prev_token, prev_pos = token, pos
elif prev_token == "having" and pos == "VBN":
self.was_perf = True
prev_token, prev_pos = token, pos
elif prev_pos == "VBN" and self.was_perf and pos == "VBN":
self.is_passive = True
prev_token, prev_pos = token, pos
else:
if prev_pos == "VBG":
self.gerund += 1
self.active += 1
elif prev_pos == "VBN":
if self.is_passive:
if self.was_perf:
self.perf_part += 1
else:
self.gerund += 1
self.is_passive = False
self.passive += 1
else:
self.perf_part += 1
self.active += 1
self.was_perf = False
time[5] = False # set participle to False to check the next tense
time, prev_token, prev_pos = self.__get_time(time, token, pos)
return time, prev_token, prev_pos
def __get_last(self, time, prev_pos):
""" Checks for the last token the tense
:param time: time-array that contains boolean. The current tense has the value True
:param prev_pos: the previous pos tag (xpos) to identify the possible tenses
"""
if time[0]: # Present
if prev_pos in ["VBZ", "VB", "VBP"]:
self.sim_pres += 1
self.active += 1
elif prev_pos == "VBN":
if self.is_passive:
if self.was_prog:
self.pres_prog += 1
elif self.was_perf:
self.pres_perf += 1
else:
self.sim_pres += 1
self.passive += 1
else:
self.pres_perf += 1
self.active += 1
elif prev_pos == "VBG":
if self.was_perf:
self.pres_perf_prog *= 1
self.active += 1
else:
self.pres_prog += 1
self.active += 1
elif time[1]: # Past
if prev_pos in "VBD, VB":
self.sim_pas += 1
self.active += 1
elif prev_pos == "VBN":
if self.is_passive:
if self.was_prog:
self.pas_prog += 1
elif self.was_perf:
self.pas_perf += 1
else:
self.sim_pas += 1
self.passive += 1
else:
self.pas_perf += 1
self.active += 1
elif prev_pos == "VBG":
if self.was_perf:
self.pas_perf_prog += 1
self.active += 1
else:
self.pas_prog += 1
self.active += 1
elif time[2]: # Future
if prev_pos in ["MD", "VB"]:
self.will += 1
self.active += 1
elif prev_pos == "VBN":
if self.is_passive:
self.will += 1
self.passive += 1
else:
self.fu_perf += 1
self.active += 1
elif prev_pos == "VBG":
if self.was_prog:
self.fu_prog += 1
else:
self.fu_perf_prog += 1
self.active += 1
elif time[3]: # Condition
if prev_pos in ["MD", "VB"]: # signalise would
self.cond_sim += 1
self.active += 1
elif prev_pos == "VBN":
if self.is_passive:
if self.was_perf:
self.cond_perf += 1
else:
self.cond_sim += 1
self.passive += 1
else:
self.cond_perf += 1
self.active += 1
elif prev_pos == "VBG":
if self.was_perf:
self.cond_perf_prog += 1
self.active += 1
else:
self.cond_prog += 1
self.active += 1
elif time[4]: # Infinitive
if prev_pos == "VB":
self.inf += 1
self.active += 1
elif prev_pos == "VBN":
if self.is_passive:
if self.was_perf:
self.perf_inf += 1
else:
self.inf += 1
self.passive += 1
else:
self.perf_inf += 1
self.active += 1
elif time[5]: # Participle
if prev_pos == "VBG":
self.gerund += 1
self.active += 1
elif prev_pos == "VBN":
if self.is_passive:
if self.was_perf:
self.perf_part += 1
else:
self.gerund += 1
self.passive += 1
else:
self.perf_part += 1
self.active += 1
def __reset_help(self):
""" Resets the help variables to the default value to make the correct decision for the next sentence
"""
self.is_passive = False
self.was_prog = False
self.was_perf = False
def calculate_tenses(self, part):
""" Calculates all tenses for a sentence part/ line part
:param part: the part of a sentence/line
"""
prev_token = ''
prev_pos = ''
time = [False, False, False, False, False, False] # present, past, future, condition, infinitive, Participle
for token, pos in part: # iterates through all tokens
# if there is no current time, the method checks which time is possible
if not (time[0] or time[1] or time[2] or time[3] or time[4] or time[5]):
time, prev_token, prev_pos = self.__get_time(time, token, pos)
elif time[0]: # current Present
time, prev_token, prev_pos = self.__get_present(time, prev_token, prev_pos, token, pos)
elif time[1]: # current Past
time, prev_token, prev_pos = self.__get_past(time, prev_token, prev_pos, token, pos)
elif time[2]: # current Future
time, prev_token, prev_pos = self.__get_future(time, prev_token, prev_pos, token, pos)
elif time[3]: # current Conditional
time, prev_token, prev_pos = self.__get_conditional(time, prev_token, prev_pos, token, pos)
elif time[4]: # current Infinitive
time, prev_token, prev_pos = self.__get_infinitive(time, prev_token, prev_pos, token, pos)
elif time[5]: # current Participle
time, prev_token, prev_pos = self.__get_participle(time, prev_token, prev_pos, token, pos)
self.__get_last(time, prev_pos)
self.__reset_help()
def process_tenses(self, sentence):
""" Processes the sentence/line to calculate the tenses and stores them
:param sentence: the current sentence/line
"""
for part in sentence:
self.calculate_tenses(part) | 29,322 | 44.74571 | 120 | py |
mooc_knowledge_gain | mooc_knowledge_gain-main/feature_extraction/readability.py | import math
def flesch_reading_ease(total_words, total_sentences, total_syllables):
""" Calculates the value of the flesch reading ease index
:param total_words: amount of words in the text
:param total_sentences: amount of sentences in the text
:param total_syllables: amount of syllables in the text
:return: value of flesch reading ease index
"""
if not total_sentences or not total_words: # if there are no words/sentences the value is 0 (default)
return 0
initial = 206.835
a = 1.015
b = 84.6
avg_sen_len = total_words / total_sentences # average sentence length of the text
avg_syll = total_syllables / total_words # average number of syllables per word
return initial - a * avg_sen_len - b * avg_syll
def flesch_kincaid(total_words, total_sentences, total_syllables):
""" Calculates the value of the flesch reading ease index
:param total_words: amount of words in the text
:param total_sentences: amount of sentences in the text
:param total_syllables: amount of syllables in the text
:return: value of flesch kincaid index
"""
if not total_sentences or not total_words: # if there are no words/sentences the value is 0 (default)
return 0
a = 0.39
b = 11.8
sub = 15.59
avg_sen_len = total_words / total_sentences # average sentence length of the text
avg_syll = total_syllables / total_words # average number of syllables per word
return a * avg_sen_len + b * avg_syll - sub
def gunning_fog(total_words, total_sentences, complex_words):
""" Calculates the value of the gunning fog index
:param total_words: amount of words in the text
:param total_sentences: amount of sentences in the text
:param complex_words: words that have 3 syllables without the suffixes es, ed, ing and no proper nouns
or compound words
:return: value of the gunning fog index
"""
if not total_sentences or not total_words: # if there are no words/sentences the value is 0 (default)
return 0
a = 0.4
avg_sen_len = total_words / total_sentences # average sentence length
return (avg_sen_len + ((complex_words / total_words) * 100)) * a
def smog(total_sentences, total_polysyllables):
""" Calculates the value of the smog index
:param total_sentences: amount of sentences in the text
:param total_polysyllables: amount of words that have 3 or more syllables
:return: value of smog index
"""
if not total_sentences: # if there are no sentences the value is 0 (default)
return 0
a = 1.0430
min_sentences = 30
b = 3.1291
return b + a * math.sqrt(total_polysyllables * min_sentences / total_sentences)
# Automated readability index
def ari(total_chars, total_words, total_sentences):
""" Calculates the value of the ari
:param total_chars: amount of chars in the text
:param total_words: amount of words in the text
:param total_sentences: amount of sentences in the text
:return: value of the ari
"""
if not total_sentences or not total_words: # if there are no words/sentences the value is 0 (default)
return 0
a = 0.50
b = 4.71
c = 21.43
avg_word_len = total_chars / total_words
avg_sen_len = total_words / total_sentences
return b * avg_sen_len + a * avg_word_len + - c
def avg_hundred(a, b):
""" calculate the ratio between a and b and calculates the average per hundred
:param a: value that should show the average per hundred
:param b: other value that is the dividend
:return: avg per hundred
"""
avg = a / b # average per one
return avg * 100 # average per hundred
def coleman_liau(total_chars, total_words, total_sentences):
""" Calculates the value of the coleman liau index
:param total_chars: amount of chars in the text
:param total_words: amount of words in the text
:param total_sentences: amount of sentences in the text
:return: value of the coleman liau index
"""
if not total_sentences or not total_words: # if there are no words/sentences the value is 0 (default)
return 0
a = 0.0588
b = 0.296
c = 15.8
return a * avg_hundred(total_chars, total_words) - b * avg_hundred(total_sentences, total_words) - c
def get_speak_tempo(amount_words, time):
""" Calculates the speaktempo (words per minute)
:param amount_words: amount of words in the text
:param time: time that passed
:return: speaktempo
"""
minutes = time / 60
return amount_words / minutes
def difference_tempo(speak_tempo):
""" Calculates the subtraction from the speaktempo and the default tempo (180 WPM)
:param speak_tempo: speaktempo of text
:return: result of subtraction (speaktempo - default tempo)
"""
wpm = 180 # default amount of words per minute
return speak_tempo - wpm
def reading_time(amount_words):
""" Calculates the reading time of an text with the default tempo of 180 WPM
:param amount_words: amount of words in the text
:return: reading time in minutes
"""
wpm = 180 # default amount of words per minute
return amount_words / wpm
def speaking_difference(subtitle_time, read_time):
""" Calculates the subtraction from the speaktime and the readtime (transcripts)
:param subtitle_time: complete time of the subtitles
:param read_time: reading time of the text
:return: subtraction (subtitle_time - read_time)
"""
return subtitle_time - read_time
| 5,523 | 37.096552 | 106 | py |
mooc_knowledge_gain | mooc_knowledge_gain-main/feature_extraction/client.py | import os
from stanza import install_corenlp
from stanza.server import CoreNLPClient
def install():
""" Installs the files for corenlp if they do not exist
"""
if not(os.path.isdir("./corenlp")):
install_corenlp(dir="./corenlp/")
def init_client():
""" Initialise the client
:return: client
"""
install()
os.environ["CORENLP_HOME"] = "./corenlp/"
client = CoreNLPClient(annotators=['tokenize', 'pos', 'parse'],
timeout=30000, memory='16G',
threads=12,
be_quiet=True,
preload=True,
output_format='json',
endpoint='http://localhost:9001') # Change port if it is closed
return client
def annotate_sentence(client, sentence):
""" Sends the sentence to the Stanford Corenlp to process it
:param client: the active client to use
:param sentence: the sentence which needs to be processed
:return: results of processing
"""
return client.annotate(sentence)
def start_client(client):
""" Starts the client
:param client: client to start
"""
client.start()
def stop_client(client):
""" Stops the client
:param client: the client to stop
"""
client.stop()
| 1,322 | 25.46 | 91 | py |
mooc_knowledge_gain | mooc_knowledge_gain-main/feature_extraction/processor.py | import re
import syllapy
import CompoundWordSplitter
import pre_processor
import readability
import tenses
import csv
def count_syllables(csv_file, word):
""" Counts syllables from a word. The algorithm checks a list if it contains the word or not. If it contains the
word then the amount of syllables is returned from the list. Else the amount of syllables is calculated
manually.
:param csv_file: the file which contains words and syllables
:param word: the word where the syllables have to be calculated
:return: Amount of syllables from the word
"""
words = word.split('-') # split words to calculate better syllables for each one
all_syllables = 0
for w in words:
syllables = get_amount_syllable(csv_file, w) # check list for syllables
if not syllables: # if value is 0: the word was not in the list
all_syllables += syllapy.count(w) # Calculate the syllables without the list
else:
all_syllables += syllables
return all_syllables
def calculate_ngrams(tokens, size):
""" Calculates n-grams for the given tokens.
:param tokens: the tokens which should be n-grams
:param size: it defines the size of an n-gram, for example 2 is a 2-gram
:return: Matrix with n-grams
"""
if not size: # when size is 0 there are no n-grams
return []
ngrams = []
for i in range(len(tokens)):
ngram = []
for z in range(i, size + i, 1):
if z == len(tokens):
break
ngram.append(tokens[z])
if len(ngram) < size: # n-grams lower than the size are not valid
break
ngrams.append(ngram)
return ngrams
def remove_duplicates(elements):
""" Removes duplicates from a list
:param elements: the list which may contain duplicates
:return: list without duplicates
"""
# Defines a a dictionary with keys from the list (keys are unique. There are no duplicates).
dictionary = dict.fromkeys(elements)
return list(dictionary)
def remove_suffix(word):
""" Removes the common suffixes (ing, ed, es) of a word.
:param word: the word which need to be manipulated
:return: the manipulated word (word with removed suffix)
"""
if word.endswith('ing'):
return word[:-len('ing')]
elif word.endswith('ed'):
return word[:-len('ed')]
elif word.endswith('es'):
return word[:-len('es')]
return word
def contains_alpha(word):
""" Checks if word has an alphabet char
:param word: word which need to be checked
:return: True if there is an alphabet char else False
"""
for c in word:
if c.isalpha():
return True
return False
def contains_hyphen(word):
""" Checks if word has a hyphen
:param word: the word which must be checked
:return: True when word contains a hyphen, False when word doesn't contain a hyphen
"""
if '‐' in word:
return True
return False
def is_compound(word):
""" Checks if the word is a compound of words (for example sunflower. sunflower is the compound of sun and flower.
:param word: The word which has to be checked
:return: True when the word is a compound of words else False
"""
'''
CompoundWordSplitter splits the word into compounds. If the length of the return is smaller 2 then the word is not
a compound of word.
'''
if len(CompoundWordSplitter.split(word, "en_en")) <= 1:
return False
return True
def is_proper_noun(pos):
""" Checks if word is a proper noun or not. A proper noun has the pos-tag 'NNP' or 'NNPS'.
:param pos: the pos which says what type of word the word is.
:return: True when pos is NNP else False
"""
if "NNP" in pos: # is proper noun when pos tag is NNP (singluar) or NNPS (plural)
return True
return False
def ratio(a, b):
""" Calculates the ratio between a and b
:param a: the value which has to be checked how many % it is in b
:param b: the value that contains the 100%
:return: the ratio
"""
if not b: # if there is no b (0) a should be stay a. (b should never be 0)
return a
else:
return a / b # ratio
def get_freq_word(csv_file, word):
"""
:param csv_file: the csv-file that contains the frequencies of words
:param word: the specific word to check the frequency for
:return: the average frequency of the word
"""
words = word.split('‐') # Word can consist of more than one word
freq = 0.0
for w in words: # calculate the frequency for every word
csv_file.seek(1) # go to start of file again
reader = csv.reader(csv_file, delimiter=",")
for row in reader:
if row[0].lower() == w.lower():
freq += float(row[1].replace(",", ""))
return freq / len(words) # calculate average frequency
def get_amount_syllable(csv_file, word):
"""
:param csv_file: file that contains syllables for words
:param word: the specific word to get the syllables for
:return: the amount of syllables or 0 when the list does not contain the word
"""
csv_file.seek(1) # go to start of file again (ignore header)
reader = csv.reader(csv_file, delimiter=',')
for row in reader:
if row[0].lower() == word.lower():
if row[2] != '#':
return float(row[2])
else:
return 0 # default value to show that there is no value
return 0 # default value to show that there is no value
def get_age_acquisition(csv_file, word):
""" Calculate the age of acquisition of a word. If the word consists of more than one word then this function will
calculate every age of acquisition for each partial word.
:param csv_file: The file which has the age of acquisition values
:param word: The word that have to be checked
:return: The sum of all calculated values
"""
words = word.split('‐') # Word can consist of more than one word
values = 0.0
for w in words:
stored = False # If value of a word is added to values no need to add a default value!
csv_file.seek(1) # go to start of file again (ignore header)
reader = csv.reader(csv_file, delimiter=',')
for row in reader:
if row[0].lower() == w.lower(): # need to check if word is same. upper and lowercase don't matter
stored = True
if not row[10] == "NA":
values += float(row[10].replace(",", ".")) # Stores the calculated value from the csv-file
else: # no known value, use default value
values += 10.36 # average value as default
if not stored: # word was not in list
values += 10.36 # average value as default
return values / len(words) # average age of the complete word
class Calculator:
""" Class that is used for calculating all features
"""
def __init__(self):
self.sentences = [] # Array of all sentences/lines
self.tokens = [] # Array of tokens
self.lemmas = [] # Array of lemmas
self.amount_sentences = 0 # Amount of sentences/lines
self.amount_tokens = 0 # counter for tokens (words)
self.amount_freq_tokens = 0 # Amount of tokens which can be used for frequency calculation
self.amount_age_tokens = 0 # Amount of tokens which can be used for age acquisition calculation
self.amount_unique_tokens = 0 # Amount of unique tokens (words)
self.ratio_unique_tokens = 0 # Ratio of unique tokens to all tokens
self.amount_unique_lemmas = 0 # Amount of unique lemmas
self.ratio_unique_lemmas = 0 # Ratio of unique lemmas to all tokens
# Attributes for tokens
self.sum_tok_len = 0
self.min_tok_len = float('inf')
self.avg_tok_len = 0
self.max_tok_len = 0
self.amount_freq_tok = 0 # All frequences of words added together
self.avg_freq_tok = 0 # Average frequence of an word
# Attributes for ngrams
self.amount_threegrams = 0
self.avg_threegrams = 0 # Average of threegrams per sentence
self.amount_fourgrams = 0
self.avg_fourgrams = 0 # Average of fourgrams per sentence
# Attributes for sentences
self.min_sen_len = float('inf') # minimum amount of words for a sentence
self.avg_sen_len = 0 # average amount of words for a sentence
self.max_sen_len = 0 # maximum amount of words for a sentence
self.min_sen_char_amount = float('inf') # minimum amount of chars for a sentence
self.avg_sen_char_amount = 0 # average amount of chars for a sentence
self.max_sen_char_amount = 0 # maximum amount of chars for a sentence
# Attributs for syllables
self.amount_syl = 0 # amount of syllables
self.amount_one_syl = 0 # amount of words that have only one syllable
self.amount_two_syl = 0 # amount of words that have two syllables
self.amount_psyl = 0 # amount of syllables greater than 2
self.amount_hard = 0 # amount of hard words (over 2 syllables, no compound word, no Proper Noun)
self.avg_syl = 0 # Average of syllables per word
self.ratio_one_syl = 0 # Ratio between one syllable words and total words
self.ratio_two_syl = 0 # Ratio between two syllable words and total words
self.ratio_psyl = 0 # Ratio between syllables greater than 2 and total words
self.ratio_hard = 0 # Ratio between hard words and total words
# Attributes for age of acquistion
self.sum_age = 0 # Age of acquisition (all values together)
self.min_age = float('inf') # Minimum value of the age of acquisition of a word
self.avg_age = 0 # Average value of the age of acquisition
self.max_age = 0 # Maximum value of the age of acquisition of a word
# slides specific
self.pages = 0 # Amount of pages
self.sum_lines = 0 # Number of lines
self.min_lines = float('inf') # Minimum number of lines per page
self.avg_lines = 0 # Average number of lines per page
self.max_lines = 0 # maximum number of lines per page
self.sum_words_page = 0 # Amount of words of a page
self.min_words_page = float('inf') # Minimum number of words per page
self.avg_words_page = 0 # Average number of words per page
self.max_words_page = 0 # Maximum number of words per page
# Readability
self.flesch_ease = 0 # Flesch Reading Ease
self.flesch_kin = 0 # Flesch Kincaid
self.gunning_fog = 0 # Gunning Fog
self.smog = 0 # SMOG
self.ari = 0 # Automated Readability Index
self.coleman = 0 # Coleman Liauf
self.read_time = 0 # Time to read text
# Transcript only
self.speak_time = 0 # Speak time of the video
self.speak_difference = 0 # Difference between speak and reading time
self.amount_subtitles = 0 # Amount of subtitles
# Word-types
self.amount_adj = 0 # Amount of adjectives
self.avg_adj = 0 # Average amount of adjectives per sentence
self.ratio_adj = 0 # Ratio of adjectives to total words
self.amount_adposition = 0 # Amount of adpositions (pre- and posposition)
self.avg_adposition = 0 # Average amount of adpositions per sentence
self.ratio_adposition = 0 # Ratio of adpositions (pre- and posposition) to total words
self.amount_noun = 0 # Amount of nouns
self.avg_noun = 0 # Average amount of nouns per sentence
self.ratio_noun = 0 # Ratio of nouns to total words
self.amount_pronoun = 0 # Amount of pronouns
self.avg_pronoun = 0 # Average amount of pronouns per sentence
self.ratio_pronoun = 0 # Ratio of pronouns to total words
self.ratio_pronoun_noun = 0 # Ratio of pronouns to total words
self.amount_verb = 0 # Amount of all verbs
self.avg_verb = 0 # Average amount of verbs per sentence
self.ratio_verb = 0 # Ratio of verbs to total words
self.amount_main_verb = 0 # Amount of all main verbs (no auxiliaries)
self.avg_main_verb = 0 # Average amount of main verbs per sentence
self.ratio_main_verb = 0 # Ratio of main verbs to total words
self.amount_auxiliary = 0 # Amount of auxiliaries
self.avg_auxiliary = 0 # Average amount of auxiliaries per sentence
self.ratio_auxiliary = 0 # Ratio of auxiliaries to total words
self.amount_adverb = 0 # Amount of adverbs
self.avg_adverb = 0 # Average amount of adverbs per sentence
self.ratio_adverb = 0 # Ratio of adverbs to total words
self.amount_coordinate_conj = 0 # Amount of coordinate conjunctions
self.avg_coordinate_conj = 0 # Average amount of coordinate conjunctions per sentence
self.ratio_coordinate_conj = 0 # Ratio of coordinate conjunctions to total words
self.amount_determiner = 0 # Amount of determiners
self.avg_determiner = 0 # Average amount of determiners per sentence
self.ratio_determiner = 0 # Ratio of determiners to total words
self.amount_interjection = 0 # Amount of interjections
self.avg_interjection = 0 # Average amount of interjections per sentence
self.ratio_interjection = 0 # Ratio of interjections to total words
self.amount_num = 0 # Amount of numbers (written as words)
self.avg_num = 0 # Average amount of numbers (written as words) per sentence
self.ratio_num = 0 # Ratio of numbers (written as words) to total words
self.amount_particle = 0 # Amount of particles
self.avg_particle = 0 # Average amount of particles per sentence
self.ratio_particle = 0 # Ratio of particles to total words
self.amount_subord_conjunction = 0 # Amount of subordinates conjunctions
self.avg_subord_conjunction = 0 # Average amount of subordinates conjunctions per sentence
self.ratio_subord_conjunction = 0 # Ratio of subordinates conjunctions to total words
self.amount_foreign_word = 0 # Amount of foreign words
self.avg_foreign_word = 0 # Average amount of foreign words per sentence
self.ratio_foreign_word = 0 # Ratio of foreign words to total words
self.amount_content_word = 0 # Amount of content words
self.avg_content_word = 0 # Average amount of content words per sentence
self.ratio_content_word = 0 # Ratio of content words to total words
self.amount_function_word = 0 # Amount of function words
self.avg_function_word = 0 # Average amount of function words per sentence
self.ratio_function_word = 0 # Ratio of function words to total words
self.amount_filtered = 0 # Amount of tokens that were filtered.
self.avg_filtered = 0 # Ratio of function filtered words to total words
self.ratio_filtered = 0 # Ratio of filtered words to unfiltered ones
def check_min_sen(self, sen_len):
""" Checks if the current sentence has the minimum length (minimum number of words/tokens)
:param sen_len: the length of the current sentence
"""
if sen_len < self.min_sen_len:
self.min_sen_len = sen_len
def check_max_sen(self, sen_len):
""" Checks if the current sentence has the maximum length (maximum number of words/tokens)
:param sen_len: the length of the current sentence
"""
if sen_len > self.max_sen_len:
self.max_sen_len = sen_len
def check_min_sen_chars_amount(self, chars):
""" Checks if the current sentence has the minimum amount of chars
:param chars: the amount of chars
"""
if chars < self.min_sen_char_amount:
self.min_sen_char_amount = chars
def check_max_sen_chars_amount(self, chars):
""" Checks if the current sentence has the maximum amount of chars
:param chars: the amount of chars
"""
if chars > self.max_sen_char_amount:
self.max_sen_char_amount = chars
def process_sen(self, sen_len, chars):
""" Process amount of words/tokens and chars of the current sentence to calculate the min/max values of them
:param sen_len: amount of words/tokens
:param chars: amount of chars
"""
self.check_min_sen(sen_len)
self.check_max_sen(sen_len)
self.check_min_sen_chars_amount(chars)
self.check_max_sen_chars_amount(chars)
def calculate_avg_sen(self):
""" Calculates the average sentence length and the average amount of chars per sentence
"""
if len(self.sentences):
self.avg_sen_len = self.amount_tokens / len(self.sentences)
self.avg_sen_char_amount = self.sum_tok_len / len(self.sentences)
def check_min_tok(self, tok_len):
""" Checks if the current token has the minimum amount of chars
:param tok_len: amount of chars
"""
if tok_len < self.min_tok_len:
self.min_tok_len = tok_len
def check_max_tok(self, tok_len):
""" Checks if the current token has the maximum amount of chars
:param tok_len: amount of chars
"""
if tok_len > self.max_tok_len:
self.max_tok_len = tok_len
def process_tok(self, tok_len):
""" Adds the token length to a variable and checks the minimum/maximum length
:param tok_len: amount of chars in the word/token
"""
self.sum_tok_len += tok_len
self.check_min_tok(tok_len)
self.check_max_tok(tok_len)
def calculate_avg_tok(self):
""" Calculates the average length of a word/token (amount of chars)
"""
if len(self.tokens):
self.avg_tok_len = self.sum_tok_len / len(self.tokens)
def check_min_line(self, amount_line):
""" Checks if the current slide has the minimum amount of slides
:param amount_line: Amount of lines
"""
if amount_line < self.min_lines:
self.min_lines = amount_line
def check_max_line(self, amount_line):
""" Checks if the current slide has the maximum amount of slides
:param amount_line: Amount of lines
"""
if amount_line > self.max_lines:
self.max_lines = amount_line
def calculate_avg_line(self):
""" Calculates the average amount of lines per slide
"""
if self.pages:
self.avg_lines = self.sum_lines / self.pages
def check_min_words(self):
""" Checks if the amount of words/tokens of the current slide has the minimum number. if it has the minimum
number, it will be set as minimum
"""
if self.sum_words_page < self.min_words_page:
self.min_words_page = self.sum_words_page
def check_max_words(self):
""" Checks if the amount of words/tokens of the current slide has the maximum number. If it has the minimum
number, it will be set as maximum
"""
if self.sum_words_page > self.max_words_page:
self.max_words_page = self.sum_words_page
def calculate_avg_words(self):
""" Calculates the average amount of words/tokens per slide
"""
if self.pages:
self.avg_words_page = self.amount_tokens / self.pages
def calculate_threegrams(self, tokens):
""" Calculates the amount of trigrams of a list of tokens and adds the amount to a variable
:param tokens: a list of tokens
"""
if len(tokens):
self.amount_threegrams += len(calculate_ngrams(tokens, 3))
def calculate_fourgrams(self, tokens):
""" Calculates the amount of tetragrams of a list of tokens and adds the amount to a variable
:param tokens: a list of tokens
"""
if len(tokens):
self.amount_fourgrams += len(calculate_ngrams(tokens, 4))
def calculate_avg_ngrams(self):
""" Calculates the average amount of trigrams/fourgrams per sentence
"""
self.avg_threegrams = self.amount_threegrams / self.amount_sentences
self.avg_fourgrams = self.amount_fourgrams / self.amount_sentences
def change_inf(self):
""" Changes every min value which still have the infinite value to 0. The infinitive value was only there to
calculate the correct min value (f. e. 0 as initial value would cause bugs because higher min values are
bigger than 0 (so the value 0 does not change)
"""
if self.min_lines == float('inf'):
self.min_lines = 0
if self.min_sen_char_amount == float('inf'):
self.min_sen_char_amount = 0
if self.min_words_page == float('inf'):
self.min_words_page = 0
if self.min_tok_len == float('inf'):
self.min_tok_len = 0
if self.min_sen_len == float('inf'):
self.min_sen_len = 0
if self.min_age == float('inf'):
self.min_age = 0
def process_page(self, amount_line):
""" Calculates word and line specific features of a page (a page represents one slide)
:param amount_line: amount of lines of the page
"""
# Words
self.check_min_words()
self.check_max_words()
self.sum_words_page = 0
# Lines
self.sum_lines += amount_line
self.check_min_line(amount_line)
self.check_max_line(amount_line)
def unique_words(self):
""" Calculates the amount of unique tokens and unique lemmas. Also it calculates the ratio of them to all
words/tokens
"""
filtered_tokens = remove_duplicates(self.tokens)
filtered_lemmas = remove_duplicates(self.lemmas)
self.amount_unique_tokens = len(filtered_tokens)
self.amount_unique_lemmas = len(filtered_lemmas)
self.ratio_unique_tokens = len(filtered_tokens) / self.amount_tokens
self.ratio_unique_lemmas = len(filtered_lemmas) / self.amount_tokens
def calculate_freq(self, properties, word):
""" Calculate the frequency of a word and adds it to a variable
:param properties: csv-file that contains frequencies of words
:param word: the current word
"""
self.amount_freq_tok += get_freq_word(properties, word)
def calculate_avg_freq(self):
""" Calculates the average frequency of a word
"""
self.avg_freq_tok = self.amount_freq_tok / self.amount_freq_tokens
def calculate_syllables(self, csv_file, word, pos):
""" Calculates the syllables of a word. This adds the amount of syllables to a variable and checks whether the
word is a one-syllable, two-syllable, polysyllable oder hard word. The number of the correct wordclass will be
incremented
:param csv_file: cs-file that contains the amount of syllables of words
:param word: the current word/token to check
:param pos: the Part-of-Speech Tag of the word/token
"""
syllables = count_syllables(csv_file, word)
self.amount_syl += syllables
if syllables >= 3: # polysyllable words have 3 or more syllables
self.amount_psyl += 1
syllables = count_syllables(csv_file, remove_suffix(word))
''' hard words have three or more syllables. Also they are no proper nouns
and compound words
'''
if syllables >= 3 and not (contains_hyphen(word) or is_compound(word) or is_proper_noun(pos)):
self.amount_hard += 1
elif syllables == 2: # word has two syllables
self.amount_two_syl += 1
else: # word has one syllable
self.amount_one_syl += 1
def calculate_relation_syllables(self):
""" Calculates the average amount of syllables per word and the ratio of the different syllabletypes to all
words/tokens
"""
self.avg_syl = self.amount_syl / self.amount_tokens
self.ratio_one_syl = self.amount_one_syl / self.amount_tokens
self.ratio_two_syl = self.amount_two_syl / self.amount_tokens
self.ratio_psyl = self.amount_psyl / self.amount_tokens
self.ratio_hard = self.amount_hard / self.amount_tokens
def calculate_age(self, csv_file, word):
""" Calculates the age of acquistion of a word and checks if this age is the current minimum/maximum value.
Also it adds the age of the word to a variable
:param csv_file: the csv-file that contains the age of words
:param word: the current word
"""
age = get_age_acquisition(csv_file, word)
self.calculate_min_age(age)
self.calculate_max_age(age)
self.sum_age += age
def calculate_min_age(self, age):
""" Checks if an age is the minimum age at the moment. If it is the minimum it will be stored as minimum.
:param age: age of acquisition of the actually word
"""
if self.min_age > age:
self.min_age = age
def calculate_avg_age(self):
""" Calculates the average age per word/token
"""
self.avg_age = self.sum_age / self.amount_age_tokens
def calculate_max_age(self, age):
""" Checks if an age is the maximum age at the moment. If it is the maximum it will be stored as minimum.
:param age: age of acquisition of the actually word
"""
if self.max_age < age:
self.max_age = age
def calculate_speech_diff(self):
""" Calculates the difference between the real speaking time vs. the calculated reading time. (transcript)
"""
self.speak_time = self.speak_time / 60
self.speak_difference = readability.speaking_difference(self.speak_time, self.read_time)
def calculate_word_types(self, counter):
""" Calculates the amount, average per sentence and ratio to all words/tokens for every wordtype
:param counter: counter-object that contains the values of the wordtypes
"""
self.amount_adj = counter.get_amount_adj()
self.avg_adj = self.amount_adj / self.amount_sentences
self.ratio_adj = ratio(counter.get_amount_adj(), self.amount_tokens)
self.amount_adposition = counter.get_amount_adposition()
self.avg_adposition = self.amount_adposition / self.amount_sentences
self.ratio_adposition = ratio(counter.get_amount_adposition(), self.amount_tokens)
self.amount_noun = counter.get_amount_noun()
self.avg_noun = self.amount_noun / self.amount_sentences
self.ratio_noun = ratio(counter.get_amount_noun(), self.amount_tokens)
self.amount_pronoun = counter.get_amount_pronoun()
self.avg_pronoun = self.amount_pronoun / self.amount_sentences
self.ratio_pronoun = ratio(counter.get_amount_pronoun(), self.amount_tokens)
self.ratio_pronoun_noun = ratio(counter.get_amount_pronoun(), counter.get_amount_noun())
self.amount_verb = counter.get_amount_verb()
self.avg_verb = self.amount_verb / self.amount_sentences
self.ratio_verb = ratio(counter.get_amount_verb(), self.amount_tokens)
self.amount_main_verb = counter.get_amount_main_verb()
self.avg_main_verb = self.amount_main_verb / self.amount_sentences
self.ratio_main_verb = ratio(counter.get_amount_main_verb(), self.amount_tokens)
self.amount_auxiliary = counter.get_amount_auxiliary()
self.avg_auxiliary = self.amount_auxiliary / self.amount_sentences
self.ratio_auxiliary = ratio(counter.get_amount_auxiliary(), self.amount_tokens)
self.amount_adverb = counter.get_amount_adverb()
self.avg_adverb = self.amount_adverb / self.amount_sentences
self.ratio_adverb = ratio(counter.get_amount_adverb(), self.amount_tokens)
self.amount_coordinate_conj = counter.get_amount_coordinate_conj()
self.avg_coordinate_conj = self.amount_coordinate_conj / self.amount_sentences
self.ratio_coordinate_conj = ratio(counter.get_amount_coordinate_conj(), self.amount_tokens)
self.amount_determiner = counter.get_amount_determiner()
self.avg_determiner = self.amount_determiner / self.amount_sentences
self.ratio_determiner = ratio(counter.get_amount_determiner(), self.amount_tokens)
self.amount_interjection = counter.get_amount_interjection()
self.avg_interjection = self.amount_interjection / self.amount_sentences
self.ratio_interjection = ratio(counter.get_amount_interjection(), self.amount_tokens)
self.amount_num = counter.get_amount_num()
self.avg_num = self.amount_num / self.amount_sentences
self.ratio_num = ratio(counter.get_amount_num(), self.amount_tokens)
self.amount_particle = counter.get_amount_particle()
self.avg_particle = self.amount_particle / self.amount_sentences
self.ratio_particle = ratio(counter.get_amount_particle(), self.amount_tokens)
self.amount_subord_conjunction = counter.get_amount_subord_conjuction()
self.avg_subord_conjunction = self.amount_subord_conjunction / self.amount_sentences
self.ratio_subord_conjunction = ratio(counter.get_amount_subord_conjuction(), self.amount_tokens)
self.amount_foreign_word = counter.get_amount_foreign_word()
self.avg_foreign_word = self.amount_foreign_word / self.amount_sentences
self.ratio_foreign_word = ratio(counter.get_amount_foreign_word(), self.amount_tokens)
self.amount_content_word = counter.get_amount_content_word()
self.avg_content_word = self.amount_content_word / self.amount_sentences
self.ratio_content_word = ratio(counter.get_amount_content_word(), self.amount_tokens)
self.amount_function_word = counter.get_amount_function_word()
self.avg_function_word = self.amount_function_word / self.amount_sentences
self.ratio_function_word = ratio(counter.get_amount_function_word(), self.amount_tokens)
self.amount_filtered = counter.get_amount_filtered()
self.avg_filtered = self.amount_filtered / self.amount_sentences
self.ratio_filtered = ratio(counter.get_amount_filtered(), self.amount_tokens)
def process_readability(self):
""" Calculates the result of the readability scales.
"""
self.flesch_ease = readability.flesch_reading_ease(self.amount_tokens, self.amount_sentences, self.amount_syl)
self.flesch_kin = readability.flesch_kincaid(self.amount_tokens, self.amount_sentences, self.amount_syl)
self.gunning_fog = readability.gunning_fog(self.amount_tokens, self.amount_sentences, self.amount_hard)
self.smog = readability.smog(self.amount_sentences, self.amount_psyl)
self.ari = readability.ari(self.sum_tok_len, self.amount_tokens, self.amount_sentences)
self.coleman = readability.coleman_liau(self.sum_tok_len, self.amount_tokens, self.amount_sentences)
self.read_time = readability.reading_time(self.amount_tokens)
def process_tokens(self, text, sta, is_line, counter, properties, age, stopwords):
""" Process the tokens of a sentence/line to get the token specific features
:param text: sentence/line that contains the tokens
:param sta: stanza object for NLP tasks
:param is_line: true = text is a line, false = text is a sentence
:param counter: counter-object for wordtypes/phrases features
:param properties: csv-file that contains the frequency and amount of syllables of words
:param age: csv-file that contains the age of acquisition of words
:param stopwords: the list that contains stopwords
:return: the modified sentence/line for embedding features,
"""
data = sta(text)
is_question = False # Checks if sentence is question (contains "?")
context = 0 # counter for real words
chars = 0 # counter for the chars of the real words
tokens = [] # Array of unfiltered tokens
tense = [] # An array of an array of tuples of words and their pos-tag. (only verbs)
current_tense = [] # An array of tuples of words and their pos-tag of the current sub-sentence
embedding = "" # modified sentence for embedding calculation
non_words_tags = ["PUNCT", "SYM", "X"] # Tokens that has no meaning for the sentence
verb_tags = ["VB", "VBP", "VBZ", "VBD", "VBG", "VBN", "MD", "TO"] # Verb tenses to get tense form
for sentence in data.sentences: # Sometimes stanza interprets the sentence as multiple sentences
for word in sentence.words:
# print("upos: " + word.upos + " " + "xpos: " + word.xpos + " " + "word: " + word.text)
xpos = word.xpos
upos = word.upos
wordtext = word.text
lemma = word.lemma
'''
Check if token should be calculated in result or not. The word can be a symbol. Most symbols should be
filtered for example •. But one important symbol for the meaning of the sentence is a mass. The mass
defines the meaning of a number (30 % , 5 € etc.)
'''
if upos not in non_words_tags or xpos == "NN":
tokens.append(wordtext)
context += 1
chars += len(wordtext)
if xpos in verb_tags:
current_tense.append((wordtext.lower(), xpos))
if is_line: # calculate line feature if text is a line
self.sum_words_page += 1
if embedding: # if modified sentence has content, add whitespace and after that the token
embedding += " " + wordtext
else: # add start of modified sentence
embedding += wordtext
self.tokens.append(wordtext.lower()) # make text lower to check later if it is unique
self.lemmas.append(lemma.lower())
self.amount_tokens += 1
self.process_tok(len(wordtext))
self.calculate_syllables(properties, word.text, xpos)
# counts the amount of masses
if xpos == "NN" and not re.search("[a-zA-Z]", wordtext):
counter.inc_mass()
''' calculates the frequency of a token if it is not a number (f. e. 3, for three the frequency will
be calculated) and not a mass/ unknown word (f. e. $. A mass has the xpos 'NN' and
does not contain an alphabetic. Unknown words are a mixture of alphabets and other characters)
'''
if not is_proper_noun(xpos) and wordtext not in stopwords and \
((upos == "NUM" or xpos == "NN") and wordtext.isalpha()) or \
(upos != "NUM" and xpos != "NN"):
self.calculate_freq(properties, wordtext)
self.amount_freq_tokens += 1
# calculate age of acquistion
if ((upos == "NUM" or xpos == "NN") and wordtext.isalpha()) or (upos != "NUM" and xpos != "NN"):
self.calculate_age(age, wordtext)
self.amount_age_tokens += 1
elif wordtext == "?": # the sentence/line is a question when it contains a '?'
is_question = True
# for better time predicition accurray split parts of sentence
elif wordtext in [',', ':'] and len(current_tense):
tense.append(current_tense)
current_tense = []
counter.store_pos(xpos)
counter.store_upos(upos)
if is_question:
counter.inc_question()
else:
counter.inc_statement()
self.process_sen(context, chars)
if len(current_tense): # if the filtered sentence/line has a minimum of 1 token use it for calculations
tense.append(current_tense)
self.calculate_threegrams(tokens)
self.calculate_fourgrams(tokens)
return embedding, tense, counter
def process_lines(self, lines, cli, sta, counter, properties, age, stopwords):
""" Process the lines of a pdf to calculate all the features
:param lines: the lines of a pdf
:param cli: client to use the coreNLP
:param sta: stanza-object for NLP tasks
:param counter: counter-object for wordtypes and phrasetypes features
:param properties: the csv-file that contains the frequency and amount of syllables of words
:param age: the csv-file that contains the age of acquistion of words
:param stopwords: the list that contains the stopwords (txt file)
:return: the features for the lines and an array of lines
"""
time = tenses.Tenses()
amount_lines = 0
for line in lines:
line = line.decode("utf-8")
if "Starting next page:" in line:
if self.pages:
self.process_page(amount_lines)
self.pages += 1
amount_lines = 0
else:
amount_lines += 1
line = pre_processor.convert_apostrophe(line)
words = pre_processor.get_words(line, sta)
line = pre_processor.convert_apostrophe_nlp(line, words)
counter.get_phrases(cli, line)
embedding, tense, counter = self.process_tokens(line, sta, True, counter, properties, age, stopwords)
self.sentences.append(embedding)
time.process_tenses(tense) # Verb tenses features
self.process_page(amount_lines) # stores the values of the last page
self.amount_sentences = len(self.sentences)
counter.calculate_avg_sentence_parts(self.amount_sentences)
self.change_inf()
self.unique_words()
self.process_readability()
self.calculate_avg_age()
self.calculate_avg_line()
self.calculate_avg_sen()
self.calculate_avg_tok()
self.calculate_word_types(counter)
self.calculate_avg_freq()
self.calculate_avg_ngrams()
self.calculate_avg_words()
self.calculate_relation_syllables()
return self.get_features_line() + counter.get_features() + time.get_features(), self.sentences
def process_sentences(self, text, cli, sta, counter, properties, age, stopwords):
"""
:param text: the text of a srt-file
:param cli: client to use the coreNLP
:param sta: stanza-object for NLP tasks
:param counter: counter-object for wordtypes and phrasetypes features
:param properties: the csv-file that contains the frequency and amount of syllables of words
:param age: the csv-file that contains the age of acquistion of words
:param stopwords: the list that contains the stopwords (txt file)
:return: the features for the sentences and an array of sentences
"""
time = tenses.Tenses()
# extracts the components of the srt-file
text, self.speak_time, self.amount_subtitles = pre_processor.get_srt(text.decode('utf-8'), sta)
for sentence in text: # iterate through the text (sentence for sentence)
# print(sentence)
counter.get_phrases(cli, sentence)
embedding, tense, counter = self.process_tokens(sentence, sta, False, counter, properties, age, stopwords)
self.sentences.append(embedding)
time.process_tenses(tense)
self.amount_sentences = len(self.sentences)
counter.calculate_avg_sentence_parts(self.amount_sentences)
self.change_inf()
self.unique_words()
self.process_readability()
self.calculate_avg_age()
self.calculate_avg_sen()
self.calculate_avg_tok()
self.calculate_word_types(counter)
self.calculate_avg_freq()
self.calculate_avg_ngrams()
self.calculate_speech_diff()
self.calculate_relation_syllables()
return self.get_features_sentence() + counter.get_features() + time.get_features(), self.sentences
def __get_features(self, ignore):
""" Collects all features and returns them
:param ignore: attributes that are not part of the features of the specific file
:return: returns the features of this class
"""
features = []
for elem in self.__dict__.items():
if elem[0] not in ignore:
features.append(round(elem[1], 6))
return features
def get_features_line(self):
""" Collects the features for a pdf and returns them
:return: returns the features for a pdf
"""
return self.__get_features(["sentences", "amount_sentences", "tokens", "amount_freq_tokens",
"amount_age_tokens", "lemmas",
"amount_freq_tok", "speak_time", "amount_subtitles",
"speak_difference", "sum_words_page", "sum_age", "amount_threegrams",
"amount_fourgrams"])
def get_features_sentence(self):
""" Collects the features for a srt-file and returns them
:return: returns the features for a srt-file
"""
ignore = ["sentences", "tokens", "amount_freq_tokens", "amount_age_tokens", "lemmas", "amount_freq_tok",
"pages", "sum_lines", "min_lines", "avg_lines", "max_lines", "sum_words_page",
"min_words_page", "avg_words_page", "max_words_page", "sum_age", "amount_threegrams",
"amount_fourgrams"]
return self.__get_features(ignore)
class Counter:
""" The class that process and stores the wordtypes and phrasetypes features
"""
def __init__(self):
""" Initialise the class with default values
"""
self.statement = 0 # Normal statement (terminates for example with . and !)
self.question = 0 # Question (terminates with ?)
self.mass = 0 # amount of mass (to check the amount of the filtered tokens)
# POS TAGS (count them)
self.dict = {"CC": 0, "CD": 0, "DT": 0, "EX": 0, "FW": 0, "IN": 0, "JJ": 0, "JJR": 0, "JJS": 0, "LS": 0,
"MD": 0, "NN": 0, "NNS": 0, "NNP": 0, "NNPS": 0, "PDT": 0, "POS": 0, "PRP": 0, "PRP$": 0, "RB": 0,
"RBR": 0, "RBS": 0, "RP": 0, "SYM": 0, "TO": 0, "UH": 0, "VB": 0, "VBD": 0, "VBG": 0, "VBN": 0,
"VBP": 0, "VBZ": 0, "WDT": 0, "WP": 0, "WP$": 0, "WRB": 0, "#": 0, "$": 0, ".": 0, ",": 0, ":": 0,
"(": 0, ")": 0, "``": 0, "''": 0, "NFP": 0, "HYPH": 0, "-LRB-": 0, "-RRB-": 0, "AFX": 0, "ADD": 0}
self.universal_pos = {"ADJ": 0, "ADP": 0, "ADV": 0, "AUX": 0, "CCONJ": 0, "DET": 0, "INTJ": 0, "NOUN": 0,
"NUM": 0, "PART": 0, "PRON": 0, "PROPN": 0, "PUNCT": 0, "SCONJ": 0, "SYM": 0, "VERB": 0,
"X": 0}
# POS TAG sentence part (nominal phrase etc.)
self.adjp = 0 # Adjective phrase
self.avg_adjp = 0 # Average amount of prepositional phrases per sentence
self.advp = 0 # Adverb phrase
self.avg_advp = 0 # Average amount of adverb phrases per sentence
self.np = 0 # Noun phrase
self.avg_np = 0 # Average amount of noun phrases per sentence
self.pp = 0 # Prepositional phrase
self.avg_pp = 0 # Average amount of prepositional phrases per sentence
self.s = 0 # Simple declarative clause
self.avg_s = 0 # Average amount of declarative clause per sentence
self.frag = 0 # Fragment
self.avg_frag = 0 # Average amount of fragments per sentence
self.sbar = 0 # Subordinate clause
self.avg_sbar = 0 # Average amount of subordinate clauses per sentence
self.sbarq = 0 # Direct question introduced by wh-element
self.avg_sbarq = 0 # Average amount of questions introduced by wh-element per sentence
self.sinv = 0 # Declarative sentence with subject-aux inversion
self.avg_sinv = 0 # Average amount of declarative sentences with subject-aux inversion per sentence
self.sq = 0 # Questions without wh-element and yes/no questions
self.avg_sq = 0 # Average amount of questions without wh-element and yes/no questions per sentence
self.vp = 0 # Verb phrase
self.avg_vp = 0 # Average amount of verb phrases per sentence
self.whadvp = 0 # Wh-adverb phrase
self.avg_whadvp = 0 # Average amount of wh-adverb phrases per sentence
self.whnp = 0 # Wh-noun phrase
self.avg_whnp = 0 # Average amount of wh-noun phrases per sentence
self.whpp = 0 # Wh-prepositional phrase
self.avg_whpp = 0 # Average amount of wh-prepositional phrases per sentence
self.avg_phrases = 0 # Average amount of phrases per sentence
def inc_statement(self):
""" Increments the amount of statements
"""
self.statement += 1
def inc_question(self):
""" Increments the amount of questions
"""
self.question += 1
def inc_mass(self):
""" Increments the amount of mass
"""
self.mass += 1
def get_amount_adj(self):
""" Returns the amount of adjectives
:return: Amount of adjectives
"""
return self.universal_pos["ADJ"]
def get_amount_adposition(self):
""" Returns the amount of adpositions
:return: Amount of adpositions
"""
return self.universal_pos["ADP"]
def get_amount_noun(self):
""" Returns the amount of nouns
:return: Amount of nouns
"""
return self.universal_pos["NOUN"] + self.universal_pos["PROPN"]
def get_amount_pronoun(self):
""" Returns the amount of pronouns
:return: Amount of pronouns
"""
return self.universal_pos["PRON"]
def get_amount_verb(self):
""" Returns the amount of verbs
:return: Amount of verbs
"""
return self.universal_pos["VERB"] + self.universal_pos["AUX"]
def get_amount_main_verb(self):
""" Returns the amount of main verbs
:return: Amount of main verbs
"""
return self.universal_pos["VERB"]
def get_amount_auxiliary(self):
""" Returns the amount of adjectives
:return: Amount of adjectives
"""
return self.universal_pos["AUX"]
def get_amount_adverb(self):
""" Returns the amount of adverbs
:return: Amount of adverbs
"""
return self.universal_pos["ADV"]
def get_amount_coordinate_conj(self):
""" Returns the amount of coordinate conjunctions
:return: Amount of coordinate conjunctions
"""
return self.universal_pos["CCONJ"]
def get_amount_determiner(self):
""" Returns the amount of determiners
:return: Amount of determiners
"""
return self.universal_pos["DET"]
def get_amount_interjection(self):
""" Returns the amount of interjections
:return: Amount of interjections
"""
return self.universal_pos["INTJ"]
def get_amount_num(self):
""" Returns the amount of numbers
:return: Amount of numbers
"""
return self.universal_pos["NUM"]
def get_amount_particle(self):
""" Returns the amount of particles
:return: Amount of particles
"""
return self.universal_pos["PART"]
def get_amount_subord_conjuction(self):
""" Returns the amount of subordinate conjunctions
:return: Amount of subordinate conjunctions
"""
return self.universal_pos["SCONJ"]
def get_amount_foreign_word(self):
""" Returns the amount of foreign words
:return: Amount of foreign words
"""
return self.dict["FW"]
def get_amount_content_word(self):
""" Returns the amount of content words
:return: Amount of content words
"""
amount_content_words = self.universal_pos["ADJ"] + self.universal_pos["ADV"] + self.universal_pos["INTJ"] + \
self.universal_pos["NOUN"] + self.universal_pos["PROPN"] + self.universal_pos["VERB"]
return amount_content_words
def get_amount_function_word(self):
""" Returns the amount of function words
:return: Amount of function words
"""
amount_function_words = self.universal_pos["ADP"] + self.universal_pos["AUX"] + self.universal_pos["CCONJ"] + \
self.universal_pos["DET"] + self.universal_pos["NUM"] + self.universal_pos["PART"] + \
self.universal_pos["PRON"] + self.universal_pos["SCONJ"]
return amount_function_words
def get_amount_filtered(self):
""" Calculates the number of all filtered tokens
:return: Number of all filtered tokens
"""
return self.universal_pos["PUNCT"] + self.universal_pos["SYM"] + self.universal_pos["X"] - self.dict["FW"] - \
self.mass
def store_pos(self, pos):
""" Increment the specific pos tag (xpos)
:param pos: specific pos tag (xpos)
"""
if pos in self.dict:
self.dict[pos] += 1
else:
print("Is not in dictionary: " + pos) # For tests
def store_upos(self, upos):
""" Increment the specific pos tag (upos)
:param pos: specific pos tag (upos)
"""
if upos in self.universal_pos:
self.universal_pos[upos] += 1
else:
print("Is not in dictionary: " + upos) # For tests
def get_phrases(self, cli, sentence):
""" From a given sentence/line this function counts all types of phrases and stores them
:param cli: the client of corenlp to parse the sentence to get all tokens of the grammatical structure.
:param sentence: The sentence/line which has to be checked
"""
annotated = cli.annotate(sentence)
parsed = annotated['sentences'][0]['parse'].replace("\r\n", " ")
self.adjp += parsed.count("(ADJP ")
self.advp += parsed.count("(ADVP ")
self.np += parsed.count("(NP ")
self.pp += parsed.count("(PP ")
self.s += parsed.count("(S ")
self.frag += parsed.count("(FRAG ")
self.sbar += parsed.count("(SBAR ")
self.sbarq += parsed.count("(SBARQ ")
self.sinv += parsed.count("(SINV ")
self.sq += parsed.count("(SQ ")
self.vp += parsed.count("(VP ")
self.whadvp += parsed.count("(WHADVP ")
self.whnp += parsed.count("(WHNP ")
self.whpp += parsed.count("(WHPP ")
def calculate_avg_sentence_parts(self, amount_sentences):
""" Calculates the average of phrases and specific phrases per sentence/line
:param amount_sentences: amount of sentences/lines
"""
divisor = 1
if amount_sentences:
divisor = amount_sentences
self.avg_adjp = self.adjp / divisor
self.avg_advp = self.advp / divisor
self.avg_np = self.np / divisor
self.avg_pp = self.pp / divisor
self.avg_s = self.s / divisor
self.avg_frag = self.frag / divisor
self.avg_sbar = self.sbar / divisor
self.avg_sbarq = self.sbarq / divisor
self.avg_sinv = self.sinv / divisor
self.avg_sq = self.sq / divisor
self.avg_vp = self.vp / divisor
self.avg_whadvp = self.whadvp / divisor
self.avg_whnp = self.whnp / divisor
self.avg_whpp = self.whpp / divisor
self.avg_phrases = self.get_amount_phrases() / divisor
def get_amount_phrases(self):
""" Calculates and returns the amount phrases
:return: Amount of phrases
"""
return self.adjp + self.advp + self.np + self.pp + self.s + self.frag + self.sbar + self.sbarq + self.sinv + \
self.sq + self.vp + self.whadvp + self.whnp + self.whpp
def get_features(self):
""" Stores the values of all important attributes of this class (Counter) to an array and returns it.
:return: Array with all the necessary attribute-values.
"""
features = []
amount_phrases = self.get_amount_phrases()
amount_sentence_types = self.statement + self.question
for element in self.__dict__.items():
if element[0] not in ["dict", "universal_pos", "mass"]:
features.append(round(element[1], 6)) # no ratio
if element[0] in ["statement", "question"]: # ratio
features.append(round(ratio(element[1], amount_sentence_types), 6))
elif "avg" not in element[0]:
features.append(round(ratio(element[1], amount_phrases), 6))
return features
| 53,380 | 46.789615 | 121 | py |
mooc_knowledge_gain | mooc_knowledge_gain-main/feature_extraction/files.py | import os
import sys
import csv
import fitz
import processor
import client
import embedding
import re
import scipy
import numpy
import torch
from operator import itemgetter
from SortedCollection import SortedCollection
def load_stop_words(path):
""" Loads stopwords of the stopwords.txt file to use them later
:param path: the path where the file exist
:return: Array of stopwords
"""
try:
stops = open(path, 'r')
lines = stops.readlines()
stopwords = []
for line in lines:
stopwords.append(line.replace("\n", ""))
return stopwords
except IOError:
print("Couldn't load stopwords.txt")
sys.exit(1)
def load_embd_model(name):
""" Loads embedding model. the program will stop if the name does not exist
:param name: name of model
:return: specific model
"""
try:
return embedding.get_model(name)
except IOError:
print("Couldn't load sentence-embedding model")
sys.exit(1)
def get_embedding_array(name, length):
""" Creates an array of strings where every string represents a predictore of the embedding.
:param name: name that is used for the strings
:param length: length of embedding
:return: array with the names
"""
arr = []
i = 1
while i <= length:
arr.append(name + "_" + str(i)) # enumerate predicores (gives them a number)
i += 1
return arr
def store_embeddings_to_features(features, embeddings_sli, embeddings_tra):
""" Takes the average embeddings of slides and transcripts and includes them into array of features for a specific
video
:param features: features of videos
:param embeddings_sli: average embedding of slides
:param embeddings_tra: average embedding of transcript
:return: features and average embeddings as one array
"""
embd_pos = 0
for i in range(len(features)):
for y in range(len(features[i])): # iterates through videos
new_list = features[i][y]
knowledge_gain = new_list[-1]
knowledge_gain_value = new_list[-2]
new_list = new_list[:-2] # Removes the knowledge gain values to put them later to the end of the list
# Stores every predictor of the embeddings as one feature
for z in range(len(embeddings_sli[embd_pos])):
new_list.append(embeddings_sli[embd_pos][z])
for z in range(len(embeddings_tra[embd_pos])):
new_list.append(embeddings_tra[embd_pos][z])
embd_pos += 1
new_list.append(knowledge_gain_value)
new_list.append(knowledge_gain)
features[i][y] = new_list
return features
def load_csv(path):
""" Loads a specific csv-file. The program will be stopped if the file does not exist
:param path: the path of the file
:return: the loaded file
"""
try:
csv_file = open(path, 'r', newline='', encoding="ISO-8859-1")
return csv_file
except IOError:
print("Couldn't load csv-file")
sys.exit(1)
def get_z_values(test):
""" Loads the knowledge gain of the test values (csv-file) and calculates them into z-values to get a nominal
distribution. This distribution is used to classify the knowledge gain into one of three knowledge gain classes.
:param test:
:return: z-values of knowledge gain
"""
test.seek(1)
reader = csv.reader(test, delimiter=',')
values = []
first = True # ignores header
for row in reader:
if first:
first = False
else:
values.append(float(row[len(row) - 1])) # last entry contains the knowledge gain
print("Convert Knowledge Gain values into nominal ones:")
print("Mean:" + str(scipy.mean(values)))
print("Standard Deviation:" + str(numpy.std(values)))
converted = scipy.stats.zscore(values)
print("Converted.")
return converted
def process_test(test, name, z_scores, slides_result, transcript_result, embedding_features):
""" Generates all rows of a video for the result csv-file. The method checks how many persons watched the video.
Every person represents a row and contains their knowledge gain
:param test: csv-file with knowledge gain of persons
:param name: name of the video
:param z_scores: z_scores of all knowledge gains
:param slides_result: features of the slides
:param transcript_result: features of the transcript
:param embedding_features: features of the embeddings for the video
:return: rows that represent all information of a video
"""
name = name.replace("video", "") # test file has not video in the name of videos
rows = []
found = False # shows if correct video values were already found
pos = -1 # shouldn't check header so begin with -1 (-1 will automatically goes to 0)
test.seek(1) # go to start of file again (so every video can be processed)
reader = csv.reader(test, delimiter=',')
for row in reader: # Iterates through all rows
if name in row: # checks if row is about the specific video
# generates row with features
person_id = [row[1]]
# get visual features
visual_features = row[2: len(row) - 3]
knowledge_gain = [row[len(row) - 1]]
# converts knowledge gain to nominal value
if -0.5 <= z_scores[pos] <= 0.5:
knowledge_gain_level = ["Moderate"]
elif z_scores[pos] < -0.5:
knowledge_gain_level = ["Low"]
else:
knowledge_gain_level = ["High"]
rows.append([name] + person_id + slides_result + transcript_result + embedding_features
+ visual_features + knowledge_gain + knowledge_gain_level)
elif found:
break
pos += 1
return rows
def create_csv(rows):
""" Stores all the extracted features into a csv file
:param rows: rows that contains the information of the videos (every row has information about one video)
"""
features = open('./Features/all_features.csv', 'w', newline='')
text_features = open('./Features/text_features.csv', 'w', newline='')
multimedia_features = open('./Features/multimedia_features.csv', 'w', newline='')
avg_slide_embedding = open('./Features/slide_embedding.csv', 'w', newline='')
avg_transcript_embedding = open('./Features/transcript_embedding.csv', 'w', newline='')
text_features_writer = csv.writer(text_features, delimiter=',')
multimedia_features_writer = csv.writer(multimedia_features, delimiter=',')
features_writer = csv.writer(features, delimiter=',')
slide_writer = csv.writer(avg_slide_embedding, delimiter=',')
transcript_writer = csv.writer(avg_transcript_embedding, delimiter=',')
slides_features = ["amount_tok_sli",
"amount_uni_tok_sli", "ratio_uni_tok_sli", "amount_uni_lemma_sli", "ratio_uni_lemma_sli",
"sum_tok_len_sli", "min_tok_len_sli",
"avg_tok_len_sli", "max_tok_len_sli", "avg_freq_tok_sli",
"avg_trigram_sli", "avg_tetragram_sli", "min_line_len", "avg_line_len",
"max_line_len", "min_line_chars", "avg_line_chars", "max_line_chars",
"amount_syl_sli", "amount_one_syl_sli", "amount_two_syl_sli",
"amount_psyl_sli", "amount_hard_sli", "avg_syl_sli", "ratio_one_syl_sli",
"ratio_two_syl_sli", "ratio_psyl_sli", "ratio_hard_sli",
"min_age_sli", "avg_age_sli", "max_age_sli",
"amount_slides", "sum_lines", "min_lines", "avg_lines", "max_lines",
"min_words_slide", "avg_words_slide", "max_words_slide", "flesch_ease_sli", "flesch_kin_sli",
"gunning_fog_sli", "smog_sli", "ari_sli", "coleman_sli",
"read_time_sli", "amount_adj_sli", "avg_adj_sli",
"ratio_adj_sli", "amount_adpos_sli", "avg_adpos_sli", "ratio_adpos_sli",
"amount_noun_sli", "avg_noun_sli",
"ratio_noun_sli", "amount_pronoun_sli", "avg_pronoun_sli",
"ratio_pronoun_sli", "ratio_pronoun_noun_sli", "amount_verb_sli", "avg_verb_sli",
"ratio_verb_sli",
"amount_main_verb_sli", "avg_main_verb_sli", "ratio_main_verb_sli", "amount_aux_sli",
"avg_aux_sli", "ratio_aux_sli",
"amount_adverb_sli", "avg_adverb_sli", "ratio_adverb_sli", "amount_coord_conj_sli",
"avg_coord_conj_sli", "ratio_coord_conj_sli", "amount_determiner_sli",
"avg_determiner_sli", "ratio_determiner_sli",
"amount_interj_sli", "avg_interj_sli", "ratio_interj_sli", "amount_num_sli",
"avg_num_sli", "ratio_num_sli",
"amount_particle_sli", "avg_particle_sli", "ratio_particle_sli", "amount_subord_conj_sli",
"avg_subord_conj_sli", "ratio_subord_conj_sli", "amount_foreign_sli",
"avg_foreign_sli", "ratio_foreign_sli",
"amount_content_word_sli", "avg_content_word_sli", "ratio_content_word_sli",
"amount_function_word_sli", "avg_function_word_sli", "ratio_function_word_sli",
"amount_filtered_sli", "avg_filtered_sli", "ratio_filtered_sli",
"amount_statement_sli", "ratio_statement_sli",
"amount_question_sli", "ratio_question_sli", "ADJP_sli", "ratio_ADJP_sli", "avg_ADJP_sli",
"ADVP_sli",
"ratio_ADVP_sli", "avg_ADVP_sli",
"NP_sli", "ratio_NP_sli", "avg_NP_sli", "PP_sli", "ratio_PP_sli", "avg_PP_sli",
"S_sli", "ratio_S_sli", "avg_S_sli", "FRAG_sli", "ratio_FRAG_sli", "avg_FRAG_sli",
"SBAR_sli", "ratio_SBAR_sli", "avg_SBAR_sli", "SBARQ_sli", "ratio_SBARQ_sli", "avg_SBARQ_sli",
"SINV_sli", "ratio_SINV_sli", "avg_SINV_sli", "SQ_sli", "ratio_SQ_sli", "avg_SQ_sli",
"VP_sli", "ratio_VP_sli", "avg_VP_sli", "WHADVP_sli", "ratio_WHADVP_sli", "avg_WHADVP_sli",
"WHNP_sli", "ratio_WHNP_sli", "avg_WHNP_sli", "WHPP_sli", "ratio_WHPP_sli", "avg_WHPP_sli",
"avg_phrases_sli",
"sim_pres_sli", "ratio_sim_pres_sli", "pres_prog_sli", "ratio_pres_prog_sli",
"pres_perf_sli", "ratio_pres_perf_sli", "pres_perf_prog_sli", "ratio_pres_perf_prog_sli",
"sim_pas_sli", "ratio_sim_pas_sli", "pas_prog_sli", "ratio_pas_prog_sli",
"pas_perf_sli", "ratio_pas_perf_sli", "pas_perf_prog_sli", "ratio_pas_perf_prog_sli",
"will_sli", "ratio_will_sli", "fu_prog_sli", "ratio_fu_prog_sli", "fu_perf_sli",
"ratio_fu_perf_sli",
"fu_perf_prog_sli", "ratio_fu_perf_prog_sli", "cond_sim_sli", "ratio_cond_sim_sli",
"cond_prog_sli", "ratio_cond_prog_sli", "cond_perf_sli", "ratio_cond_perf_sli",
"cond_perf_prog_sli", "ratio_cond_perf_prog_sli",
"gerund_sli", "ratio_gerund_sli", "perf_part_sli", "ratio_perf_part_sli",
"inf_sli", "ratio_inf_sli", "perf_inf_sli", "ratio_perf_inf_sli",
"active_sli", "ratio_active_sli", "passive_sli", "ratio_passive_sli"]
transcript_features = ["amount_sentences", "amount_tok_tra",
"amount_uni_tok_tra", "ratio_uni_tok_tra", "amount_uni_lemma_tra", "ratio_uni_lemma_tra",
"sum_tok_len_tra", "min_tok_len_tra",
"avg_tok_len_tra", "max_tok_len_tra", "avg_freq_tok_tra", "avg_trigram_tra",
"avg_tetragram_tra", "min_sen_len", "avg_sen_len",
"max_sen_len", "min_sen_chars", "avg_sen_chars", "max_sen_chars",
"amount_syl_tra", "amount_one_syl_tra", "amount_two_syl_tra",
"amount_psyl_tra", "amount_hard_tra", "avg_syl_tra", "ratio_one_syl_tra",
"ratio_two_syl_tra", "ratio_psyl_tra", "ratio_hard_tra",
"min_age_tra", "avg_age_tra", "max_age_tra", "flesch_ease_tra", "flesch_kin_tra",
"gunning_fog_tra", "smog_tra", "ari_tra", "coleman_tra", "read_time_tra",
"speak_time",
"speak_difference", "amount_subtitles", "amount_adj_tra", "avg_adj_tra", "ratio_adj_tra",
"amount_adpos_tra", "avg_adpos_tra", "ratio_adpos_tra", "amount_noun_tra",
"avg_noun_tra", "ratio_noun_tra",
"amount_pronoun_tra", "avg_pronoun_tra", "ratio_pronoun_tra", "ratio_pronoun_noun_tra",
"amount_verb_tra", "avg_verb_tra", "ratio_verb_tra",
"amount_main_verb_tra", "avg_main_verb_tra", "ratio_main_verb_tra", "amount_aux_tra",
"avg_aux_tra",
"ratio_aux_tra", "amount_adverb_tra", "avg_adverb_tra", "ratio_adverb_tra",
"amount_coord_conj_tra", "avg_coord_conj_tra", "ratio_coord_conj_tra",
"amount_determiner_tra", "avg_determiner_tra",
"ratio_determiner_tra", "amount_interj_tra", "avg_interj_tra",
"ratio_interj_tra",
"amount_num_tra", "avg_num_tra", "ratio_num_tra", "amount_particle_tra",
"avg_particle_tra", "ratio_particle_tra",
"amount_subord_conj_tra", "avg_subord_conj_tra", "ratio_subord_conj_tra",
"amount_foreign_tra", "avg_foreign_tra", "ratio_foreign_tra",
"amount_content_word_tra", "avg_content_word_tra", "ratio_content_word_tra",
"amount_function_word_tra", "avg_function_word_tra", "ratio_function_word_tra",
"amount_filtered_tra", "avg_filtered_tra", "ratio_filtered_tra",
"amount_statement_tra", "ratio_statement_tra", "amount_question_tra", "ratio_question_tra",
"ADJP_tra", "ratio_ADJP_tra", "avg_ADJP_tra", "ADVP_tra", "ratio_ADVP_tra", "avg_ADVP_tra",
"NP_tra", "ratio_NP_tra", "avg_NP_tra", "PP_tra", "ratio_PP_tra", "avg_PP_tra",
"S_tra", "ratio_S_tra", "avg_S_tra", "FRAG_tra", "ratio_FRAG_tra", "avg_FRAG_tra",
"SBAR_tra", "ratio_SBAR_tra", "avg_SBAR_tra", "SBARQ_tra", "ratio_SBARQ_tra",
"avg_SBARQ_tra", "SINV_tra", "ratio_SINV_tra", "avg_SINV_tra",
"SQ_tra", "ratio_SQ_tra", "avg_SQ_tra", "VP_tra", "ratio_VP_tra", "avg_VP_tra",
"WHADVP_tra", "ratio_WHADVP_tra", "avg_WHADVP_tra", "WHNP_tra", "ratio_WHNP_tra",
"avg_WHNP_tra", "WHPP_tra", "ratio_WHPP_tra", "avg_WHPP_tra", "avg_phrases_tra",
"sim_pres_tra", "ratio_sim_pres_tra", "pres_prog_tra", "ratio_pres_prog_tra",
"pres_perf_tra", "ratio_pres_perf_tra", "pres_perf_prog_tra", "ratio_pres_perf_prog_tra",
"sim_pas_tra", "ratio_sim_pas_tra", "pas_prog_tra", "ratio_pas_prog_tra",
"pas_perf_tra", "ratio_pas_perf_tra", "pas_perf_prog_tra", "ratio_pas_perf_prog_tra",
"will_tra", "ratio_will_tra", "fu_prog_tra", "ratio_fu_prog_tra", "fu_perf_tra",
"ratio_fu_perf_tra",
"fu_perf_prog_tra", "ratio_fu_perf_prog_tra", "cond_sim_tra", "ratio_cond_sim_tra",
"cond_prog_tra", "ratio_cond_prog_tra", "cond_perf_tra", "ratio_cond_perf_tra",
"cond_perf_prog_tra", "ratio_cond_perf_prog_tra", "gerund_tra", "ratio_gerund_tra",
"perf_part_tra", "ratio_perf_part_tra", "inf_tra", "ratio_inf_tra",
"perf_inf_tra", "ratio_perf_inf_tra", "active_tra",
"ratio_active_tra", "passive_tra", "ratio_passive_tra"]
embedding_features = ["similarity_sli", "similarity_tra", "diff_similarity", "similarity_vectors"]
visual_features = ["Clear_Language", "Vocal_Diversity", "Filler_Words", "Speed_of_Presentation",
"Coverage_of_the_Content",
"Level_of_Detail", "Highlight", "Summary", "Text_Design", "Image_Design", "Formula_Design",
"Table_Design",
"Structure", "Entry_Level", "Overall_Rating", "loudness_avg", "mod_loudness_avg",
"rms_energy_avg",
"f0_avg", "jitter_avg", "delta_jitter_avg", "shimmer_avg", "harmonicity_avg", "log_HNR_avg",
"PVQ_avg", "speech_rate", "articulation_rate", "average_syllable_duration", "txt_ratio_avg",
"txt_ratio_var", "img_ratio_avg", "img_ratio_var", "highlight", "level_of_detailing_avg",
"level_of_detailing_var",
"coverage_of_slide_content_avg", "coverage_of_slide_content_var"]
avg_embedding_slides = get_embedding_array("avg_embd_slides_dim", 16)
avg_embedding_transcript = get_embedding_array("avg_embd_transcript_dim", 16)
features_writer.writerow(["Video_ID"] + ["Person_ID"] + slides_features + transcript_features + embedding_features
+ visual_features + ["Knowledge_Gain", "Knowledge_Gain_Level"])
text_features_writer.writerow(
["Video_ID"] + ["Person_ID"] + slides_features + transcript_features + embedding_features
+ ["Knowledge_Gain", "Knowledge_Gain_Level"])
multimedia_features_writer.writerow(["Video_ID"] + ["Person_ID"] + visual_features + ["Knowledge_Gain",
"Knowledge_Gain_Level"])
slide_writer.writerow(
["Video_ID"] + ["Person_ID"] + avg_embedding_slides + ["Knowledge_Gain", "Knowledge_Gain_Level"])
transcript_writer.writerow(
["Video_ID"] + ["Person_ID"] + avg_embedding_transcript + ["Knowledge_Gain", "Knowledge_Gain_Level"])
# write values inside csv-files
for line in rows:
for row in line:
i = 0
stop = 2 + len(slides_features) + len(transcript_features) + len(embedding_features)
feature_values = []
text_feature_values = []
multimedia_feature_values = []
slides_embd = []
transcript_embd = []
# store video_id and user_id
while i < 2:
feature_values.append(row[i])
text_feature_values.append(row[i])
multimedia_feature_values.append(row[i])
slides_embd.append(row[i])
transcript_embd.append(row[i])
i += 1
# store text-features
while i < stop:
feature_values.append(row[i])
text_feature_values.append(row[i])
i += 1
# store multimedia-features
stop += len(visual_features)
while i < stop:
feature_values.append(row[i])
multimedia_feature_values.append(row[i])
i += 1
stop += len(avg_embedding_slides)
# store slides-embedding
while i < stop:
slides_embd.append(row[i])
i += 1
stop += len(avg_embedding_transcript)
# store transcript-embedding
while i < stop:
transcript_embd.append(row[i])
i += 1
# store knowledge-gain
feature_values.append(row[-2])
feature_values.append(row[-1])
text_feature_values.append(row[-2])
text_feature_values.append(row[-1])
multimedia_feature_values.append(row[-2])
multimedia_feature_values.append(row[-1])
slides_embd.append(row[-2])
slides_embd.append(row[-1])
transcript_embd.append(row[-2])
transcript_embd.append(row[-1])
#store to files
features_writer.writerow(feature_values)
text_features_writer.writerow(text_feature_values)
multimedia_features_writer.writerow(multimedia_feature_values)
slide_writer.writerow(slides_embd)
transcript_writer.writerow(transcript_embd)
# close files
features.close()
text_features.close()
multimedia_features.close()
avg_slide_embedding.close()
avg_transcript_embedding.close()
def remove_files(files):
""" Deletes files. This method is used to deletes old files when the user starts a new calculation
:param files: Array of old files
"""
for file in files:
os.remove(file)
def load_files(path):
""" Loads files of an path
:param path: the path where the files exist
:return: array of strings of the path of files
"""
path_files = []
for root, dirs, files in os.walk(path):
for file in files:
path = root + '/' + file
# replace \ to / to get for windows/linux/mac the same representation
path_files.append(path.replace('\\', '/'))
return path_files
def process_files(files, sta, cli):
""" Process all files of the videos to generate the features. The pdf files and the srt files must have the same
name to know that they belong to the same video. Otherwise the program can't recognize it and stops the
calculation.
:param files: tuple of files for the videos
:param sta: stanza object to get word specific features
:param cli: client to generate sentence trees
"""
rows = []
embeddings_sli = [] # contains average embeddings for slides
embeddings_tra = [] # contains average embeddings for transcripts
properties = load_csv('./wordlists/freq_syll_words.csv')
age = load_csv('./wordlists/AoA_51715_words.csv')
test = load_csv('./Data/Test/test.csv')
stopwords = load_stop_words('./wordlists/stopwords.txt')
z_scores = get_z_values(test)
model = load_embd_model('roberta-large-nli-stsb-mean-tokens')
for slides, transcript in files:
try:
s = open(slides, 'rb')
t = open(transcript, 'rb')
s_name = os.path.basename(s.name[:-4])
t_name = os.path.basename(t.name[:-4])
if s_name != t_name: # Check if slides and transcript have the same name
print("Names of slides and transcript must be the same.")
client.stop_client(cli)
properties.close()
test.close()
sys.exit(1)
# get features and stores them
features, embd_sli, embd_tra = (process_video(s, t, s_name, sta, cli, properties, age,
test, z_scores, model, stopwords))
rows.append(features)
for i in range(len(features)):
embeddings_sli.append(embd_sli)
embeddings_tra.append(embd_tra)
s.close()
t.close()
# clean gpu cache
if torch.cuda.is_available():
torch.cuda.empty_cache()
except IOError:
print("Can't open slides or transcript for a video. Video will be ignored.")
pass
properties.close()
test.close()
client.stop_client(cli)
# reduce dimension of embeddings to have a better representation
embeddings_sli = embedding.reduce_dimension(embeddings_sli)
embeddings_tra = embedding.reduce_dimension(embeddings_tra)
create_csv(store_embeddings_to_features(rows, embeddings_sli, embeddings_tra))
print("Stored features as csv-files in ./Features")
def process_video(slides, transcript, name, sta, cli, properties, age, test, z_scores, model, stopwords):
""" Process a specific video. The files for slides and transcript are used to get the features about this video.
Also all important objects are passed to realize the calculation
:param slides: files for slides
:param transcript: files for transcript
:param name: name of the video
:param sta: stanza to calculate word specific features
:param cli: client to calculate sentence trees
:param properties: csv-table with amount of syllables and frequency for words
:param age: csv-table that includes the age of acquisition for words
:param test: csv-file with the knowledge gains
:param z_scores: values of the knowledge gains as z-score to calculate the nominal classes
:param model: the sentence embedding model to calculate the embeddings
:param stopwords: stopwords that has to be filtered for the frequency
:return: rows of features for the specific video
"""
print("Process slide: " + name + ".pdf")
slides_result, slides_lines = process_slides(slides, sta, cli, properties, age, stopwords)
print("Finished process of slide: " + name + ".pdf")
print("Process transcript: " + name + ".srt")
transcript_result, transcript_sentences = process_transcript(transcript, sta, cli, properties, age, stopwords)
embd_features, embd_sli, embd_tra = embedding.process_video_embeddings(slides_lines, transcript_sentences, model)
print("Finished process of transcript: " + name + ".srt")
return process_test(test, name, z_scores, slides_result, transcript_result, embd_features), embd_sli, embd_tra
def process_slides(slides, sta, cli, properties, age, stopwords):
""" Calculates the feature for specific slides of a video
:param slides: the slides of a video
:param sta: stanza to calculate word specific features
:param cli: client to calculate sentence trees
:param properties: csv-table with amount of syllables and frequency for words
:param age: csv-table that includes the age of acquisition for words
:param stopwords: stopwords that has to be filtered for the frequency
:return: features of the slides
"""
calculator = processor.Calculator()
features, sentences = calculator.process_lines(slides.readlines(), cli, sta, processor.Counter(), properties, age,
stopwords)
return features, sentences
def process_transcript(transcript, sta, cli, properties, age, stopwords):
""" Calculates the feature for specific transcript of a video
:param transcript: the transcript of a video
:param sta: stanza to calculate word specific features
:param cli: client to calculate sentence trees
:param properties: csv-table with amount of syllables and frequency for words
:param age: csv-table that includes the age of acquisition for words
:param stopwords: stopwords that has to be filtered for the frequency
:return: features of the slides
"""
calculator = processor.Calculator()
features, sentences = calculator.process_sentences(transcript.read(), cli, sta, processor.Counter(), properties,
age, stopwords)
return features, sentences
def write_pdf(location, pages):
""" Stores the textual aspects of a pdf into a txt-file.
:param location: location to store it
:param pages: array of pages that contains an array of lines for every page
"""
f = open(location, "w", encoding="utf-8")
number = 0
for page in pages:
# Every page gets the beginning line "Starting next page:" to clarify that a new page started
f.write("Starting next page:" + str(number) + "\n")
for line in page:
"""
Replaces special characters with whitespace or nothing to get a better formatted string/line.
Also replace -\xad‐ to - because so it is the same as the text in the pdf.
Add \n to the string to see it later as one line.
"""
f.write(line[4].replace("\xa0", "").replace("\r", "").replace("\t ", " ").replace("\t", " ")
.replace("-\xad‐", "‐") + "\n")
number += 1
f.close()
def write_embeddings(location, embeddings):
""" Method to store embeddings as txt file (not used but can be useful)
:param location: the location to store them
:param embeddings: the embeddings to store
"""
f = open(location, "w")
for embedding in embeddings:
"""the first entry of an embedding represents the normal sentence. This sentence ist stored in " " to identify
what the embedding represents. Every predictor is separated with a whitespace """
f.write('"' + embedding[0] + '"')
for emb in embedding[1]:
f.write(" " + str(emb.item()))
f.write('\n')
f.close()
def positions(lines):
""" Stores every line in a new array and checks if the order is correct or the line should be placed earlier.
:param lines: lines to check
:return: array of lines with new order
"""
result = []
for line in lines:
if len(result) > 1: # array has more then 1 element
# check backwards if previous lines are more on the right side then the current one
for i in range(len(result) - 1, -1, -1):
current = result[i]
diff = current[3] - line[3]
""" Checks if previous line has similar y coordinate and a higher x coordinate. If this is true the
order will be replaced to the position where this is no more true
"""
if (0 >= diff >= -5) and (current[0] > line[0]):
if i == 0: # reached end. Replaces it to the beginning
result.insert(0, line)
break
continue
else:
if i < (len(result) - 1): # Replaces it to the correct position
result.insert(i + 1, line)
break
result.append(line)
break
elif len(result) == 1: # 1 element in array
current = result[0]
diff = current[3] - line[3]
if (0 >= diff >= -5) and (current[0] > line[0]):
result.insert(0, line)
else:
result.append(line)
else: # empty array
result.append(line)
return result
def contains_letters(text):
""" Check if text is bigger then 1 and check if text contains letters. This is a helpfunction to check if a line is
useful or useless. A character or only special characters / numbers have no meaning.
:param text: text to check the criteria
:return: Returns True if text is longer than 1 and contains a letter otherwise False
"""
return len(text) > 1 and re.search("[a-zA-Z]", text)
def process_pdf(file):
"""" Converts a PDF file to a txt file to get the text.
:param file: PDF file which should be converted
"""
doc = fitz.open(file)
# print(file)
i = 0
pages = []
for page in doc:
block_page = page.getTextWords()
sorted_blocks = SortedCollection(key=itemgetter(5, 6, 7))
for block in block_page:
sorted_blocks.insert(block)
lines = merge_line(sorted_blocks)
sorted_lines = SortedCollection(key=itemgetter(3, 0))
for line in lines:
# print(line)
# Checks if line is bigger then 1 and has a letter.
if contains_letters(line[4]):
sorted_lines.insert(line)
# print()
sorted_lines = positions(sorted_lines)
i += 1
pages.append(sorted_lines)
write_pdf("./Data/Slides-Processed/" + os.path.basename(file).replace(".pdf", ".txt"), pages)
def change_values(word1, word2, text, string):
""" Merges the text of two words.
:param word1: first word
:param word2: second word
:param text: the text that represents the words
:param string:
:return: changed first word
"""
word1[text] = word1[text] + string + word2[text]
return word1
def merge_line(words):
""" Merge words-objects (lines) to one object (line) if they have the same block_no, a different line_no and a
a maximum difference for y0 and y1 of 2. The object that starts first is the beginning of the merged object.
:param words: words-object (lines) to manipulate
:return: merged objects list
"""
merged = []
for word in words:
if not merged:
merged.append(word)
else:
prev = list(merged[len(merged) - 1])
# get values of objects
block_no1, line_no1, word_no1 = prev[5], prev[6], prev[7]
block_no2, line_no2, word_no2 = word[5], word[6], word[7]
# checks if both words-objects are in the same block and line. If it is true merge the prev with the next.
if block_no1 == block_no2 and line_no1 == line_no2:
prev = change_values(prev, word, 4, " ")
merged[len(merged) - 1] = prev
# Checks if objects have same block_no and different line_no to look deeper for merging
elif block_no1 == block_no2 and line_no1 != line_no2:
# Checks if y0 and y1 coordinates are similar between the objects (merge criteria)
if (abs(prev[1] - word[1]) <= 2) and (abs(prev[3] - word[3]) <= 2):
diff = prev[0] - word[0]
""" checks if the x0 coordinate of the previous one is higher. If it is higher the word will be
append to it. Otherwise the word will be prepend to it"""
if diff > 0:
word = change_values(list(word), prev, 4, "\t")
merged[len(merged) - 1] = word
else:
prev = change_values(prev, word, 4, "\t")
merged[len(merged) - 1] = prev
else: # no merge
merged.append(word)
else: # no merge
merged.append(word)
return merged
| 34,651 | 50.642325 | 119 | py |
mooc_knowledge_gain | mooc_knowledge_gain-main/feature_extraction/CompoundWordSplitter.py | # from Github : https://github.com/GokulVSD/FOGIndex
import enchant,sys
# requires PyEnchant library
# to be able to support Python 2 & 3
if sys.version_info[0] > 2:
unicode = str
def __concat(object1, object2):
if isinstance(object1, str) or isinstance(object1, unicode):
object1 = [object1]
if isinstance(object2, str) or isinstance(object2, unicode):
object2 = [object2]
return object1 + object2
def __capitalize_first_char(word):
return word[0].upper() + word[1:]
def __split(word, language='en_US'):
dictionary = enchant.Dict(language)
max_index = len(word)
if max_index < 3:
return word
for index, char in enumerate(word, 2):
left_word = word[0:index]
right_word = word[index:]
if index == max_index - 1:
break
if dictionary.check(left_word) and dictionary.check(right_word):
return [compound for compound in __concat(left_word, right_word)]
return word
def split(compound_word, language='en_US'):
words = compound_word.split('-')
word = ""
for x in words:
word += x
result = __split(word, language)
if result == compound_word:
return [result]
return result
| 1,256 | 18.338462 | 77 | py |
GoogleScraper | GoogleScraper-master/setup.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
from setuptools import setup
version = re.search(
"^__version__\s*=\s*'(.*)'",
open('GoogleScraper/version.py').read(),
re.M).group(1)
requirements = [r for r in open('requirements.txt', 'r').read().split('\n') if r]
# https://dustingram.com/articles/2018/03/16/markdown-descriptions-on-pypi
setup(name='GoogleScraper',
version=version,
description='A module to scrape and extract links, titles and descriptions from various search engines. Supports google,bing,yandex and many more.',
long_description=open('README.md').read(),
long_description_content_type="text/markdown",
author='Nikolai Tschacher',
author_email='nikolai@tschacher.ch',
url='http://incolumitas.com',
py_modules=['usage'],
packages=['GoogleScraper'],
entry_points={'console_scripts': ['GoogleScraper = GoogleScraper.core:main']},
package_dir={'examples': 'examples'},
install_requires=requirements
)
| 1,016 | 32.9 | 154 | py |
GoogleScraper | GoogleScraper-master/run.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Convenience wrapper for running GoogleScrapper directly from source tree."""
from GoogleScraper.core import main
if __name__ == '__main__':
main()
| 204 | 19.5 | 79 | py |
GoogleScraper | GoogleScraper-master/conf.py | from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = ['.rst', '.md']
| 133 | 15.75 | 48 | py |
GoogleScraper | GoogleScraper-master/GoogleScraper/search_engine_parameters.py | """
Contains all parameters and sources/information about the parameters of the supported search engines.
All values set to None, are NOT INCLUDED in the GET request! Everything else (also the empty string), is included in the request
"""
"""Google search params
Some good stuff:
http://www.blueglass.com/blog/google-search-url-parameters-query-string-anatomy/
http://www.rankpanel.com/blog/google-search-parameters/
http://lifehacker.com/5933248/avoid-getting-redirected-to-country-specific-versions-of-google
All search requests must include the parameters site, client, q, and output. All parameter values
must be URL-encoded (see “Appendix B: URL Encoding” on page 94), except where otherwise noted.
"""
google_search_params = {
'q': '', # the search query string
'oq': None, # Shows the original query.
'num': '', # the number of results per page
'numgm': None,
# Number of KeyMatch results to return with the results. A value between 0 to 50 can be specified for this option.
'start': '0',
# Specifies the index number of the first entry in the result set that is to be returned.
# page number = (start / num) + 1
# The maximum number of results available for a query is 1,000, i.e., the value of the start parameter added to
# the value of the num parameter cannot exceed 1,000.
'rc': None, # Request an accurate result count for up to 1M documents.
'site': None,
# Limits search results to the contents of the specified collection. If a user submits a search query without
# the site parameter, the entire search index is queried.
'sort': None, # Specifies a sorting method. Results can be sorted by date.
'client': 'firefox-a',
# Required parameter. If this parameter does not have a valid value, other parameters in the query string
# do not work as expected. Set to 'firefox-a' in mozilla firefox
# A string that indicates a valid front end and the policies defined for it, including KeyMatches, related
# queries, filters, remove URLs, and OneBox Modules. Notice that the rendering of the front end is
# determined by the proxystylesheet parameter. Example: client=myfrontend
'output': None,
# required parameter. Selects the format of the search results. 'xml_no_dtd XML' : XML results or custom
# HTML, 'xml': XML results with Google DTD reference. When you use this value, omit proxystylesheet.
'partialfields': None,
# Restricts the search results to documents with meta tags whose values contain the specified words or
# phrases. Meta tag names or values must be double URL-encoded
'requiredfields': None,
# Restricts the search results to documents that contain the exact meta tag names or name-value pairs.
# See “Meta Tags” on page 32 for more information.
'pws': '0', # personalization turned off
'proxycustom': None,
# Specifies custom XML tags to be included in the XML results. The default XSLT stylesheet uses these
# values for this parameter: <HOME/>, <ADVANCED/>. The proxycustom parameter can be used in custom
# XSLT applications. See “Custom HTML” on page 44 for more information.
# This parameter is disabled if the search request does not contain the proxystylesheet tag. If custom
# XML is specified, search results are not returned with the search request.
'proxyreload': None,
# Instructs the Google Search Appliance when to refresh the XSL stylesheet cache. A value of 1 indicates
# that the Google Search Appliance should update the XSL stylesheet cache to refresh the stylesheet
# currently being requested. This parameter is optional. By default, the XSL stylesheet cache is updated
# approximately every 15 minutes.
'proxystylesheet': None,
# If the value of the output parameter is xml_no_dtd, the output format is modified by the
# proxystylesheet value as follows:
# 'Omitted': Results are in XML format.
# 'Front End Name': Results are in Custom HTML format. The XSL stylesheet associated
# with the specified Front End is used to transform the output.
'cd': None, # Passes down the keyword rank clicked.
'filter': 0, # Include omitted results if set to 0
'complete': None, # Turn auto-suggest and Google Instant on (=1) or off (=0)
'nfpr': None, # Turn off auto-correction of spelling on=1, off=0
'ncr': None,
# No country redirect: Allows you to set the Google country engine you would like to use despite your
# current geographic location.
'safe': 'off', # Turns the adult content filter on or off
'rls': None,
#Source of query with version of the client and language set. With firefox set to 'org.mozilla:en-US:official'
'sa': None,
# User search behavior parameter sa=N: User searched, sa=X: User clicked on related searches in the SERP
'source': None, # Google navigational parameter specifying where you came from, univ: universal search
'sourceid': None, # When searching with chrome, is set to 'chrome'
'tlen': None,
# Specifies the number of bytes that would be used to return the search results title. If titles contain
# characters that need more bytes per character, for example in utf-8, this parameter can be used to
# specify a higher number of bytes to get more characters for titles in the search results.
'ud': None,
# Specifies whether results include ud tags. A ud tag contains internationalized domain name (IDN)
# encoding for a result URL. IDN encoding is a mechanism for including non-ASCII characters. When a ud
# tag is present, the search appliance uses its value to display the result URL, including non-ASCII
# characters.The value of the ud parameter can be zero (0) or one (1):
# • A value of 0 excludes ud tags from the results.
# • A value of 1 includes ud tags in the results.
# As an example, if the result URLs contain files whose names are in Chinese characters and the ud
# parameter is set to 1, the Chinese characters appear. If the ud parameter is set to 0, the Chinese
# characters are escaped.
'tbm': None, # Used when you select any of the “special” searches, like image search or video search
'tbs': None,
# Also undocumented as `tbm`, allows you to specialize the time frame of the results you want to obtain.
# Examples: Any time: tbs=qdr:a, Last second: tbs=qdr:s, Last minute: tbs=qdr:n, Last day: tbs=qdr:d,
# Time range: tbs=cdr:1,cd_min:3/2/1984,cd_max:6/5/1987
# But the tbs parameter is also used to specify content:
# Examples: Sites with images: tbs=img:1, Results by reading level, Basic level: tbs=rl:1,rls:0,
# Results that are translated from another language: tbs=clir:1,
# For full documentation, see http://stenevang.wordpress.com/2013/02/22/google-search-url-request-parameters/
'lr': None,
# Restricts searches to pages in the specified language. If there are no results in the specified language, the
# search appliance displays results in all languages .
# lang_xx where xx is the country code such as en, de, fr, ca, ...
'hl': None, # Language settings passed down by your browser
'cr': None, # The region the results should come from
'gr': None,
# Just as gl shows you how results look in a specified country, gr limits the results to a certain region
'gcs': None, # Limits results to a certain city, you can also use latitude and longitude
'gpc': None, # Limits results to a certain zip code
'gm': None, # Limits results to a certain metropolitan region
'gl': None, # as if the search was conducted in a specified location. Can be unreliable. for example: gl=countryUS
'ie': 'UTF-8', # Sets the character encoding that is used to interpret the query string.
'oe': 'UTF-8', # Sets the character encoding that is used to encode the results.
'ip': None,
# When queries are made using the HTTP protocol, the ip parameter contains the IP address of the user
#who submitted the search query. You do not supply this parameter with the search request. The ip
#parameter is returned in the XML search results. For example:
'sitesearch': None,
# Limits search results to documents in the specified domain, host, or web directory. Has no effect if the q
# parameter is empty. This parameter has the same effect as the site special query term.
# Unlike the as_sitesearch parameter, the sitesearch parameter is not affected by the as_dt
# parameter. The sitesearch and as_sitesearch parameters are handled differently in the XML results.
# The sitesearch parameter’s value is not appended to the search query in the results. The original
# query term is not modified when you use the sitesearch parameter. The specified value for this
# parameter must contain fewer than 125 characters.
'access': 'a', # Specifies whether to search public content (p), secure content (s), or both (a).
'biw': None, # Browser inner width in pixel
'bih': None, # Browser inner height in pixel
'as_dt': None, # If 'i' is supplied: Include only results in the web directory specified by as_sitesearch
# if 'e' is given: Exclude all results in the web directory specified by as_sitesearch
'as_epq': None,
# Adds the specified phrase to the search query in parameter q. This parameter has the same effect as
# using the phrase special query term (see “Phrase Search” on page 24).
'as_eq': None,
# Excludes the specified terms from the search results. This parameter has the same effect as using the
# exclusion (-) special query term (see “Exclusion” on page 22).
'as_filetype': None,
# Specifies a file format to include or exclude in the search results. Modified by the as_ft parameter.
'as_ft': None,
# Modifies the as_filetype parameter to specify filetype inclusion and exclusion options. The values for as
# ft are: 'i': filetype and 'e': -filetype
'as_lq': None,
# Specifies a URL, and causes search results to show pages that link to the that URL. This parameter has
#the same effect as the link special query term (see “Back Links” on page 20). No other query terms can
#be used when using this parameter.
'as_occt': None,
# Specifies where the search engine is to look for the query terms on the page: anywhere on the page, in
#the title, or in the URL.
'as_oq': None,
# Combines the specified terms to the search query in parameter q, with an OR operation. This parameter
# has the same effect as the OR special query term (see “Boolean OR Search” on page 20).
'as_q': None, # Adds the specified query terms to the query terms in parameter q.
'as_sitesearch': None,
# Limits search results to documents in the specified domain, host or web directory, or excludes results
#from the specified location, depending on the value of as_dt. This parameter has the same effect as the
#site or -site special query terms. It has no effect if the q parameter is empty.
'entqr': None, # This parameter sets the query expansion policy according to the following valid values:
# 0: None
# 1: Standard Uses only the search appliance’s synonym file.
# 2: Local Uses all displayed and activated synonym files.
# 3: Full Uses both standard and local synonym files.
}
"""
Yandex search params.
"""
yandex_search_params = {
}
"""
Bing search params.
"""
bing_search_params = {
}
"""
Yahoo search params.
"""
yahoo_search_params = {
}
"""
Baidu search params.
"""
baidu_search_params = {
}
"""Duckduckgo search params.
"""
duckduckgo_search_params = {
}
# ;The search params that control the Google search engine
# [GOOGLE_SEARCH_PARAMS]
#
# ; Shows the original query.
# oq: None
#
# ; the number of results per page
# num: 10
#
# ; Number of KeyMatch results to return with the results. A value between 0 to 50 can be specified for this option.
# numgm: None
#
# ; Specifies the index number of the first entry in the result set that is to be returned.
# page number = (start / num) + 1
# ; The maximum number of results available for a query is 1000 i.e. the value of the start parameter
# added to the value of the num parameter cannot exceed 1000.
# start: 0
#
# ; Request an accurate result count for up to 1M documents.
# rc: None
#
# ; Limits search results to the contents of the specified collection. If a user submits a search query without
# the site parameter the entire search index is queried.
# site: None
#
# ; Specifies a sorting method. Results can be sorted by date.
# sort: None
#
# ; Required parameter. If this parameter does not have a valid value other parameters in the query string
# ; do not work as expected. Set to firefox-a in mozilla firefox
# client: firefox-a
#
# output: None
# # required parameter. Selects the format of the search results. xml_no_dtd XML : XML results or custom HTML
# xml: XML results with Google DTD reference. When you use this value omit proxystylesheet.
# partialfields: None
# # Restricts the search results to documents with meta tags whose values contain the specified words or phrases.
# Meta tag names or values must be double URL-encoded
# requiredfields: None
# #Restricts the search results to documents that contain the exact meta tag names or name-value pairs.
# #See “Meta Tags” on page 32 for more information.
#
# ; personalization turned off
# pws: 0
#
# ; Specifies custom XML tags to be included in the XML results. The default XSLT stylesheet uses these
# ; values for this parameter: <HOME/> <ADVANCED/>. The proxycustom parameter can be used in custom
# ; XSLT applications. See “Custom HTML” on page 44 for more information.
# ; This parameter is disabled if the search request does not contain the proxystylesheet tag. If custom
# ; XML is specified search results are not returned with the search request.
# proxycustom: None
#
# ; Instructs the Google Search Appliance when to refresh the XSL stylesheet cache. A value of 1 indicates
# ; that the Google Search Appliance should update the XSL stylesheet cache to refresh the stylesheet
# ; currently being requested. This parameter is optional. By default the XSL stylesheet cache is updated
# ; approximately every 15 minutes.
# proxyreload: None
#
#
# ;If the value of the output parameter is xml_no_dtd the output format is modified by the
# ; proxystylesheet value as follows:
# ; Omitted: Results are in XML format.
# ; Front End Name: Results are in Custom HTML format. The XSL stylesheet associated
# ; with the specified Front End is used to transform the output.
# proxystylesheet: None
#
# ; Passes down the keyword rank clicked.
# cd: None
#
# ; Include omitted results if set to 0
# filter: 0
#
# ; Turn auto-suggest and Google Instant on (=1) or off (=0)
# complete: None
#
# ;Turn off auto-correction of spelling on=1 off=0
# nfpr: None
#
# ; No country redirect: Allows you to set the Google country engine you would like to use despite your
# current geographic location.
# ncr: None
#
# ; Turns the adult content filter on or off
# safe: off
#
# ; Source of query with version of the client and language set. With firefox set to org.mozilla:en-US:official
# rls: None
#
# ; User search behavior parameter sa=N: User searched sa=X: User clicked on related searches in the SERP
# sa: None
#
# ;Google navigational parameter specifying where you came from univ: universal search
# source: None
#
# ; When searching with chrome is set to chrome
# sourceid: None
#
# ;Specifies the number of bytes that would be used to return the search results title. If titles contain
# ; characters that need more bytes per character for example in utf-8 this parameter can be used to
# ; specify a higher number of bytes to get more characters for titles in the search results.
# tlen: None
#
# ;Specifies whether results include ud tags. A ud tag contains internationalized domain name (IDN)
# ; encoding for a result URL. IDN encoding is a mechanism for including non-ASCII characters. When a ud
# ; tag is present the search appliance uses its value to display the result URL including non-ASCII
# ; characters.The value of the ud parameter can be zero (0) or one (1):
# ; • A value of 0 excludes ud tags from the results.
# ; • A value of 1 includes ud tags in the results.
# ; As an example if the result URLs contain files whose names are in Chinese characters and the ud
# ; parameter is set to 1 the Chinese characters appear. If the ud parameter is set to 0 the Chinese
# ; characters are escaped.
# ud: None
#
# ; Used when you select any of the “special” searches like image search or video search
# tbm: None
#
# ; Also undocumented as `tbm` allows you to specialize the time frame of the results you want to obtain.
# ; Examples: Any time: tbs=qdr:a Last second: tbs=qdr:s Last minute: tbs=qdr:n Last day: tbs=qdr:d
# Time range: tbs=cdr:1cd_min:3/2/1984cd_max:6/5/1987
# ; But the tbs parameter is also used to specify content:
# ; Examples: Sites with images: tbs=img:1 Results by reading level Basic level: tbs=rl:1rls:0 Results that are
# translated from another language: tbs=clir:1
# ; For full documentation see http://stenevang.wordpress.com/2013/02/22/google-search-url-request-parameters/
# tbs: None
#
# ; Restricts searches to pages in the specified language. If there are no results in the specified language the
# search appliance displays results in all languages .
# ; lang_xx where xx is the country code such as en de fr ca ...
# lr: None
#
# ; Language settings passed down by your browser
# hl: None
#
# ; The region the results should come from
# cr: None
#
# ; Just as gl shows you how results look in a specified country gr limits the results to a certain region
# gr: None
#
# ; Limits results to a certain city you can also use latitude and longitude
# gcs: None
#
# ; Limits results to a certain zip code
# gpc: None
#
# ; Limits results to a certain metropolitan region
# gm: None
#
# ; as if the search was conducted in a specified location. Can be unreliable. for example: gl=countryUS
# gl: None
#
# ; Sets the character encoding that is used to interpret the query string.
# ie: UTF-8
#
# ; Sets the character encoding that is used to encode the results.
# oe: UTF-8
#
# ; When queries are made using the HTTP protocol the ip parameter contains the IP address of the user
# ; who submitted the search query. You do not supply this parameter with the search request. The ip
# ; parameter is returned in the XML search results. For example:
# ip: None
#
# ; Limits search results to documents in the specified domain host or web directory. Has no effect if the q
# ; parameter is empty. This parameter has the same effect as the site special query term.
# ; Unlike the as_sitesearch parameter the sitesearch parameter is not affected by the as_dt
# ; parameter. The sitesearch and as_sitesearch parameters are handled differently in the XML results.
# ; The sitesearch parameter’s value is not appended to the search query in the results. The original
# ; query term is not modified when you use the sitesearch parameter. The specified value for this
# ; parameter must contain fewer than 125 characters.
# sitesearch: None
#
# ; Specifies whether to search public content (p) secure content (s) or both (a).
# access: a
#
# ; Browser inner width in pixel
# biw: None
#
# ; Browser inner height in pixel
# bih: None
#
# ; If i is supplied: Include only results in the web directory specified by as_sitesearch
# as_dt: None
#
# ; if e is given: Exclude all results in the web directory specified by as_sitesearch
# as_epq: None
| 19,628 | 48.195489 | 128 | py |
GoogleScraper | GoogleScraper-master/GoogleScraper/__main__.py | # -*- coding: utf-8 -*-
from .core import main
"""
This file is executed by the toolchain and used as a entry point.
"""
# Call the main function of GoogleScraper.
main(return_results=False)
| 195 | 15.333333 | 65 | py |
GoogleScraper | GoogleScraper-master/GoogleScraper/output_converter.py | # -*- coding: utf-8 -*-
import csv
import sys
import json
import pprint
import logging
from GoogleScraper.database import Link, SERP
"""Stores SERP results in the appropriate output format.
Streamline process, one serp object at the time, because GoogleScraper works incrementally.
Furthermore we cannot accumulate all results and then process them, because it would be
impossible to launch lang scrape jobs with millions of keywords.
"""
output_format = 'stdout'
outfile = sys.stdout
csv_fieldnames = sorted(set(Link.__table__.columns._data.keys() + SERP.__table__.columns._data.keys()) - {'id', 'serp_id'})
logger = logging.getLogger(__name__)
class JsonStreamWriter():
"""Writes consecutive objects to an json output file."""
def __init__(self, filename):
self.file = open(filename, 'wt')
self.file.write('[')
self.last_object = None
def write(self, obj):
if self.last_object:
self.file.write(',')
json.dump(obj, self.file, indent=2, sort_keys=True)
self.last_object = id(obj)
def end(self):
self.file.write(']')
self.file.close()
class CsvStreamWriter():
"""
Writes consecutive objects to an csv output file.
"""
def __init__(self, filename):
# every row in the csv output file should contain all fields
# that are in the table definition. Except the id, they have the
# same name in both tables
self.file = open(filename, 'wt')
self.dict_writer = csv.DictWriter(self.file, fieldnames=csv_fieldnames, delimiter=',')
self.dict_writer.writeheader()
def write(self, data, serp):
# one row per link
for row in data['results']:
d = row2dict(serp)
d.update(row)
d = ({k: v if type(v) is str else v for k, v in d.items() if k in csv_fieldnames})
self.dict_writer.writerow(d)
def end(self):
self.file.close()
def init_outfile(config, force_reload=False):
global outfile, output_format
if not outfile or force_reload:
output_file = config.get('output_filename', '')
if output_file.endswith('.json'):
output_format = 'json'
elif output_file.endswith('.csv'):
output_format = 'csv'
# the output files. Either CSV or JSON or STDOUT
# It's little bit tricky to write the JSON output file, since we need to
# create the array of the most outer results ourselves because we write
# results as soon as we get them (it's impossible to hold the whole search in memory).
if output_format == 'json':
outfile = JsonStreamWriter(output_file)
elif output_format == 'csv':
outfile = CsvStreamWriter(output_file)
elif output_format == 'stdout':
outfile = sys.stdout
def store_serp_result(serp, config):
"""Store the parsed SERP page.
Stores the results from scraping in the appropriate output format.
Either stdout, json or csv output format.
This function may be called from a SearchEngineScrape or from
caching functionality. When called from SearchEngineScrape, then
a parser object is passed.
When called from caching, a list of serp object are given.
Args:
serp: A serp object
"""
global outfile, output_format
if outfile:
data = row2dict(serp)
data['results'] = []
for link in serp.links:
data['results'].append(row2dict(link))
if output_format == 'json':
# The problem here is, that we need to stream write the json data.
outfile.write(data)
elif output_format == 'csv':
outfile.write(data, serp)
elif output_format == 'stdout':
if config.get('print_results') == 'summarize':
print(serp)
elif config.get('print_results') == 'all':
pprint.pprint(data)
def row2dict(obj):
"""Convert sql alchemy object to dictionary."""
d = {}
for column in obj.__table__.columns:
d[column.name] = str(getattr(obj, column.name))
return d
def close_outfile():
"""
Closes the outfile.
"""
global outfile
if output_format in ('json', 'csv'):
outfile.end()
| 4,293 | 29.239437 | 123 | py |
GoogleScraper | GoogleScraper-master/GoogleScraper/core.py | # -*- coding: utf-8 -*-
from __future__ import print_function
import threading
import datetime
import sys
import hashlib
import os
import queue
from GoogleScraper.log import setup_logger
from GoogleScraper.commandline import get_command_line
from GoogleScraper.database import ScraperSearch, SERP, Link, get_session, fixtures
from GoogleScraper.proxies import parse_proxy_file, get_proxies_from_mysql_db, add_proxies_to_db
from GoogleScraper.caching import CacheManager
from GoogleScraper.config import get_config
from GoogleScraper.scrape_jobs import default_scrape_jobs_for_keywords
from GoogleScraper.scraping import ScrapeWorkerFactory
from GoogleScraper.output_converter import init_outfile
from GoogleScraper.async_mode import AsyncScrapeScheduler
import logging
from GoogleScraper.utils import get_base_path
import GoogleScraper.config
logger = logging.getLogger(__name__)
class WrongConfigurationError(Exception):
pass
def id_for_keywords(keywords):
"""Determine a unique id for the keywords.
Helps to continue the last scrape and to identify the last
scrape object.
Args:
keywords: All the keywords in the scrape process
Returns:
The unique md5 string of all keywords.
"""
m = hashlib.md5()
for kw in keywords:
m.update(kw.encode())
return m.hexdigest()
def scrape_with_config(config):
"""Runs GoogleScraper with the dict in config.
Args:
config: A configuration dictionary that updates the global configuration.
Returns:
The result of the main() function. Is a scraper search object.
In case you want to access the session, import it like this:
```from GoogleScraper database import session```
"""
if not isinstance(config, dict):
raise ValueError(
'The config parameter needs to be a configuration dictionary. Given parameter has type: {}'.format(
type(config)))
return main(return_results=True, parse_cmd_line=False, config_from_dict=config)
# taken from https://github.com/scrapy/utils/console.py
def start_python_console(namespace=None, noipython=False, banner=''):
"""Start Python console bound to the given namespace. If IPython is
available, an IPython console will be started instead, unless `noipython`
is True. Also, tab completion will be used on Unix systems.
"""
if namespace is None:
namespace = {}
try:
try: # use IPython if available
if noipython:
raise ImportError()
try:
from IPython.terminal.embed import InteractiveShellEmbed
from IPython.terminal.ipapp import load_default_config
except ImportError:
from IPython.frontend.terminal.embed import InteractiveShellEmbed
from IPython.frontend.terminal.ipapp import load_default_config
config = load_default_config()
shell = InteractiveShellEmbed(
banner1=banner, user_ns=namespace, config=config)
shell()
except ImportError:
import code
try: # readline module is only available on unix systems
import readline
except ImportError:
pass
else:
import rlcompleter
readline.parse_and_bind("tab:complete")
code.interact(banner=banner, local=namespace)
except SystemExit: # raised when using exit() in python code.interact
pass
class ShowProgressQueue(threading.Thread):
"""
Prints the number of keywords scraped already to show the user the progress of the scraping process.
In order to achieve this, we need to update the status whenever a new keyword is scraped.
"""
def __init__(self, config, queue, num_keywords):
"""Create a ShowProgressQueue thread instance.
Args:
queue: A queue.Queue instance to share among the worker threads.
num_keywords: The number of total keywords that need to be scraped.
"""
super().__init__()
self.queue = queue
self.num_keywords = num_keywords
self.num_already_processed = 0
self.progress_fmt = '\033[92m{}/{} keywords processed.\033[0m'
def run(self):
while self.num_already_processed < self.num_keywords:
e = self.queue.get()
if e == 'done':
break
self.num_already_processed += 1
print(self.progress_fmt.format(self.num_already_processed, self.num_keywords), end='\r')
# TODO: FIX THIS!
# self.verbosity == 2 and self.num_already_processed % 5 == 0:
# print(self.progress_fmt.format(self.num_already_processed, self.num_keywords))
self.queue.task_done()
def main(return_results=False, parse_cmd_line=True, config_from_dict=None):
"""Runs the GoogleScraper application as determined by the various configuration points.
The main() function encompasses the core functionality of GoogleScraper. But it
shouldn't be the main() functions job to check the validity of the provided
configuration.
Args:
return_results: When GoogleScrape is used from within another program, don't print results to stdout,
store them in a database instead.
parse_cmd_line: Whether to get options from the command line or not.
config_from_dict: Configuration that is passed when GoogleScraper is called as library.
Returns:
A database session to the results when return_results is True.
A status code can be returned.
"""
external_config_file_path = cmd_line_args = None
if parse_cmd_line:
cmd_line_args = get_command_line()
if cmd_line_args.get('config_file', None):
external_config_file_path = os.path.abspath(cmd_line_args.get('config_file'))
config = get_config(cmd_line_args, external_config_file_path, config_from_dict)
if isinstance(config['log_level'], int):
config['log_level'] = logging.getLevelName(config['log_level'])
setup_logger(level=config.get('log_level').upper(), format=config.get('log_format'), logfile=config.get('log_file'))
if config.get('view_config', False):
print(open(os.path.join(get_base_path(), 'scrape_config.py')).read())
return
if config.get('version'):
from GoogleScraper.version import __version__
print(__version__)
return
if config.get('clean', False):
try:
os.remove('google_scraper.db')
if sys.platform == 'linux':
os.system('rm {}/*'.format(config.get('cachedir')))
except:
pass
return
search_engine_name = config.get('check_detection', None)
if search_engine_name:
from GoogleScraper.selenium_mode import check_detection
code, status = check_detection(config, search_engine_name)
logger.debug(status)
print(code)
return
init_outfile(config, force_reload=True)
kwfile = config.get('keyword_file', '')
if kwfile:
kwfile = os.path.abspath(kwfile)
keyword = config.get('keyword')
keywords = set(config.get('keywords', []))
proxy_file = config.get('proxy_file', '')
proxy_db = config.get('mysql_proxy_db', '')
# when no search engine is specified, use google
search_engines = config.get('search_engines', ['google',])
if not isinstance(search_engines, list):
if search_engines == '*':
search_engines = config.get('supported_search_engines')
else:
search_engines = search_engines.split(',')
assert isinstance(search_engines, list), 'Search engines must be a list like data type!'
search_engines = list(map(lambda x: x.lower(), set(search_engines)))
for engine in search_engines:
assert engine in config.get('supported_search_engines'), 'Search engine "{}" not supported.'.format(engine)
num_search_engines = len(search_engines)
num_workers = int(config.get('num_workers'))
scrape_method = config.get('scrape_method')
pages = int(config.get('num_pages_for_keyword', 1))
method = config.get('scrape_method', 'http')
if config.get('shell', False):
namespace = {}
session_cls = get_session(config, scoped=False)
namespace['session'] = session_cls()
namespace['ScraperSearch'] = ScraperSearch
namespace['SERP'] = SERP
namespace['Link'] = Link
namespace['Proxy'] = GoogleScraper.database.Proxy
print('Available objects:')
print('session - A sqlalchemy session of the results database')
print('ScraperSearch - Search/Scrape job instances')
print('SERP - A search engine results page')
print('Link - A single link belonging to a SERP')
print('Proxy - Proxies stored for scraping projects.')
start_python_console(namespace)
return
if not (keyword or keywords) and not kwfile:
# Just print the help.
get_command_line(True)
print('No keywords to scrape for. Please provide either an keyword file (Option: --keyword-file) or specify and '
'keyword with --keyword.')
return
cache_manager = CacheManager(config)
if config.get('fix_cache_names'):
cache_manager.fix_broken_cache_names()
logger.info('renaming done. restart for normal use.')
return
keywords = [keyword, ] if keyword else keywords
scrape_jobs = {}
if kwfile:
if not os.path.exists(kwfile):
raise WrongConfigurationError('The keyword file {} does not exist.'.format(kwfile))
else:
if kwfile.endswith('.py'):
# we need to import the variable "scrape_jobs" from the module.
sys.path.append(os.path.dirname(kwfile))
try:
modname = os.path.split(kwfile)[-1].rstrip('.py')
scrape_jobs = getattr(__import__(modname, fromlist=['scrape_jobs']), 'scrape_jobs')
except ImportError as e:
logger.warning(e)
else:
# Clean the keywords of duplicates right in the beginning
# But make sure to keep the order
keywords = [line.strip() for line in open(kwfile, 'r', encoding='utf-8').read().split('\n') if line.strip()]
# this is the fastest in Python 3.6 and 3.7: https://stackoverflow.com/questions/7961363/removing-duplicates-in-lists
keywords = list(dict.fromkeys(keywords))
if not scrape_jobs:
scrape_jobs = default_scrape_jobs_for_keywords(keywords, search_engines, scrape_method, pages)
scrape_jobs = list(scrape_jobs)
if config.get('clean_cache_files', False):
cache_manager.clean_cachefiles()
return
if config.get('check_oto', False):
cache_manager._caching_is_one_to_one(keyword)
if config.get('num_results_per_page') > 100:
raise WrongConfigurationError('Not more that 100 results per page available for searches.')
if config.get('num_results_per_page') < 10:
raise WrongConfigurationError('num_results_per_page must be 10,20,30,40,50 or 100 with Google and in the range(10,100) with other search engines.')
proxies = []
if proxy_db:
proxies = get_proxies_from_mysql_db(proxy_db)
elif proxy_file:
proxies = parse_proxy_file(proxy_file)
if config.get('use_own_ip'):
proxies.append(None)
if not proxies:
raise Exception('No proxies available and using own IP is prohibited by configuration. Turning down.')
valid_search_types = ('normal', 'video', 'news', 'image')
if config.get('search_type') not in valid_search_types:
raise WrongConfigurationError('Invalid search type! Select one of {}'.format(repr(valid_search_types)))
if config.get('simulate', False):
print('*' * 60 + 'SIMULATION' + '*' * 60)
logger.info('If GoogleScraper would have been run without the --simulate flag, it would have:')
logger.info('Scraped for {} keywords, with {} results a page, in total {} pages for each keyword'.format(
len(keywords), int(config.get('num_results_per_page', 0)),
int(config.get('num_pages_for_keyword'))))
if None in proxies:
logger.info('Also using own ip address to scrape.')
else:
logger.info('Not scraping with own ip address.')
logger.info('Used {} unique ip addresses in total'.format(len(proxies)))
if proxies:
logger.info('The following proxies are used: \n\t\t{}'.format(
'\n\t\t'.join([proxy.host + ':' + proxy.port for proxy in proxies if proxy])))
logger.info('By using {} mode with {} worker instances'.format(config.get('scrape_method'),
int(config.get('num_workers'))))
return
# get a scoped sqlalchemy session
session_cls = get_session(config, scoped=False)
session = session_cls()
# add fixtures
fixtures(config, session)
# add proxies to the database
add_proxies_to_db(proxies, session)
# ask the user to continue the last scrape. We detect a continuation of a
# previously established scrape, if the keyword-file is the same and unmodified since
# the beginning of the last scrape.
scraper_search = None
if kwfile and config.get('continue_last_scrape', False):
searches = session.query(ScraperSearch). \
filter(ScraperSearch.keyword_file == kwfile). \
order_by(ScraperSearch.started_searching). \
all()
if searches:
last_search = searches[-1]
last_modified = datetime.datetime.utcfromtimestamp(os.path.getmtime(last_search.keyword_file))
# if the last modification is older then the starting of the search
if last_modified < last_search.started_searching:
scraper_search = last_search
logger.info('Continuing last scrape.')
if not scraper_search:
scraper_search = ScraperSearch(
keyword_file=kwfile,
number_search_engines_used=num_search_engines,
number_proxies_used=len(proxies),
number_search_queries=len(keywords),
started_searching=datetime.datetime.utcnow(),
used_search_engines=','.join(search_engines)
)
# First of all, lets see how many requests remain to issue after searching the cache.
if config.get('do_caching'):
scrape_jobs = cache_manager.parse_all_cached_files(scrape_jobs, session, scraper_search)
if scrape_jobs:
# Create a lock to synchronize database access in the sqlalchemy session
db_lock = threading.Lock()
# create a lock to cache results
cache_lock = threading.Lock()
# A lock to prevent multiple threads from solving captcha, used in selenium instances.
captcha_lock = threading.Lock()
logger.info('Going to scrape {num_keywords} keywords with {num_proxies} proxies by using {num_threads} threads.'.format(
num_keywords=len(list(scrape_jobs)),
num_proxies=len(proxies),
num_threads=num_search_engines))
progress_thread = None
# Let the games begin
if method in ('selenium', 'http'):
# Show the progress of the scraping
q = queue.Queue()
progress_thread = ShowProgressQueue(config, q, len(scrape_jobs))
progress_thread.start()
workers = queue.Queue()
num_worker = 0
for search_engine in search_engines:
for proxy in proxies:
for worker in range(num_workers):
num_worker += 1
workers.put(
ScrapeWorkerFactory(
config,
cache_manager=cache_manager,
mode=method,
proxy=proxy,
search_engine=search_engine,
session=session,
db_lock=db_lock,
cache_lock=cache_lock,
scraper_search=scraper_search,
captcha_lock=captcha_lock,
progress_queue=q,
browser_num=num_worker
)
)
# here we look for suitable workers
# for all jobs created.
for job in scrape_jobs:
while True:
worker = workers.get()
workers.put(worker)
if worker.is_suitabe(job):
worker.add_job(job)
break
threads = []
while not workers.empty():
worker = workers.get()
thread = worker.get_worker()
if thread:
threads.append(thread)
for t in threads:
t.start()
for t in threads:
t.join()
# after threads are done, stop the progress queue.
q.put('done')
progress_thread.join()
elif method == 'http-async':
scheduler = AsyncScrapeScheduler(config, scrape_jobs, cache_manager=cache_manager, session=session, scraper_search=scraper_search,
db_lock=db_lock)
scheduler.run()
else:
raise Exception('No such scrape_method {}'.format(config.get('scrape_method')))
from GoogleScraper.output_converter import close_outfile
close_outfile()
scraper_search.stopped_searching = datetime.datetime.utcnow()
session.add(scraper_search)
session.commit()
if return_results:
return scraper_search
| 18,163 | 37 | 155 | py |
GoogleScraper | GoogleScraper-master/GoogleScraper/scraping.py | # -*- coding: utf-8 -*-
import datetime
import random
import time
import os
import abc
import math
from GoogleScraper.proxies import Proxy
from GoogleScraper.database import db_Proxy
from GoogleScraper.output_converter import store_serp_result
from GoogleScraper.parsing import get_parser_by_search_engine, parse_serp
import logging
logger = logging.getLogger(__name__)
SEARCH_MODES = ('http', 'selenium', 'http-async')
class GoogleSearchError(Exception):
pass
class InvalidNumberResultsException(GoogleSearchError):
pass
class MaliciousRequestDetected(GoogleSearchError):
pass
class SeleniumMisconfigurationError(Exception):
pass
class SeleniumSearchError(Exception):
pass
class StopScrapingException(Exception):
pass
"""
GoogleScraper should be as robust as possible.
There are several conditions that may stop the scraping process. In such a case,
a StopScrapingException is raised with the reason.
Important events:
- All proxies are detected and we cannot request further keywords => Stop.
- No internet connection => Stop.
- If the proxy is detected by the search engine we try to get another proxy from the pool and we
call switch_proxy() => continue.
- If the proxy is detected by the search engine and there is no other proxy in the pool, we wait
{search_engine}_proxy_detected_timeout seconds => continue.
+ If the proxy is detected again after the waiting time, we discard the proxy for the whole scrape.
"""
def get_base_search_url_by_search_engine(config, search_engine_name, search_mode):
"""Retrieves the search engine base url for a specific search_engine.
This function cascades. So base urls will
be overwritten by search_engine urls in the specific mode sections.
On the other side, if a search engine has no special url in it' corresponding
mode, the default one from the SCRAPING config section will be loaded.
Args:
search_engine_name The name of the search engine
search_mode: The search mode that is used. selenium or http or http-async
Returns:
The base search url.
"""
assert search_mode in SEARCH_MODES, 'search mode "{}" is not available'.format(search_mode)
specific_base_url = config.get('{}_{}_search_url'.format(search_mode, search_engine_name), None)
if not specific_base_url:
specific_base_url = config.get('{}_search_url'.format(search_engine_name), None)
ipfile = config.get('{}_ip_file'.format(search_engine_name), '')
if os.path.exists(ipfile):
with open(ipfile, 'rt') as file:
ips = file.read().split('\n')
random_ip = random.choice(ips)
return random_ip
return specific_base_url
class SearchEngineScrape(metaclass=abc.ABCMeta):
"""Abstract base class that represents a search engine scrape.
Each subclass that derives from SearchEngineScrape needs to
implement some common functionality like setting a proxy,
returning the found results, caching results and pushing scraped
data to a storage like a database or an output file.
The derivation is divided in two hierarchies: First we divide child
classes in different Transport mechanisms. Scraping can happen over
different communication channels like Raw HTTP, scraping with the
selenium framework or using the an asynchronous HTTP client.
The next layer is the concrete implementation of the search functionality
of the specific search engines. This is not done in a extra derivation
hierarchy (otherwise there would be a lot of base classes for each
search engine and thus quite some boilerplate overhead),
instead we determine our search engine over the internal state
(An attribute name self.search_engine) and handle the different search
engines in the search function.
Each mode must behave similarly: It can only scape one search engine at the same time,
but it may search for multiple search keywords. The initial start number may be
set by the configuration. The number of pages that should be scraped for each
keyword is also configurable.
It may be possible to apply all the above rules dynamically for each
search query. This means that the search page offset, the number of
consecutive search pages may be provided for all keywords uniquely instead
that they are the same for all keywords. But this requires also a
sophisticated input format and more tricky engineering.
"""
malicious_request_needles = {
'google': {
'inurl': '/sorry/',
'inhtml': 'detected unusual traffic'
},
'bing': {},
'yahoo': {},
'baidu': {},
'yandex': {},
'ask': {},
'blekko': {},
'duckduckgo': {}
}
def __init__(self, config, cache_manager=None, jobs=None, scraper_search=None, session=None, db_lock=None, cache_lock=None,
start_page_pos=1, search_engine=None, search_type=None, proxy=None, progress_queue=None):
"""Instantiate an SearchEngineScrape object.
Args:
TODO
"""
# Set the config dictionary
self.config = config
# Set the cache manager
self.cache_manager = cache_manager
jobs = jobs or {}
self.search_engine_name = search_engine
assert self.search_engine_name, 'You need to specify an search_engine'
self.search_engine_name = self.search_engine_name.lower()
if not search_type:
self.search_type = self.config.get('search_type', 'normal')
else:
self.search_type = search_type
self.jobs = jobs
# the keywords that couldn't be scraped by this worker
self.missed_keywords = set()
# the number of queries to scrape
self.num_keywords = len(self.jobs)
# The actual keyword that is to be scraped next
self.query = ''
# The default pages per keywords
self.pages_per_keyword = [1, ]
# The number that shows how many searches have been done by the worker
self.search_number = 1
# The parser that should be used to parse the search engine results
self.parser = get_parser_by_search_engine(self.search_engine_name)(config=self.config)
# The number of results per page
self.num_results_per_page = int(self.config.get('num_results_per_page', 10))
# The page where to start scraping. By default the starting page is 1.
if start_page_pos:
self.start_page_pos = 1 if start_page_pos < 1 else start_page_pos
else:
self.start_page_pos = int(self.config.get('search_offset', 1))
# The page where we are right now
self.page_number = self.start_page_pos
# Install the proxy if one was provided
self.proxy = proxy
if isinstance(proxy, Proxy):
self.set_proxy()
self.requested_by = self.proxy.host + ':' + self.proxy.port
else:
self.requested_by = 'localhost'
# the scraper_search object
self.scraper_search = scraper_search
# the scrape mode
# to be set by subclasses
self.scrape_method = ''
# Whether the instance is ready to run
self.startable = True
# set the database lock
self.db_lock = db_lock
# init the cache lock
self.cache_lock = cache_lock
# a queue to put an element in whenever a new keyword is scraped.
# to visualize the progress
self.progress_queue = progress_queue
# set the session
self.session = session
# the current request time
self.requested_at = None
# The name of the scraper
self.scraper_name = '{}-{}'.format(self.__class__.__name__, self.search_engine_name)
# How long to sleep (in seconds) after every n-th request
self.sleeping_ranges = dict()
self.sleeping_ranges = self.config.get(
'{search_engine}_sleeping_ranges'.format(search_engine=self.search_engine_name),
self.config.get('sleeping_ranges'))
assert sum(self.sleeping_ranges.keys()) == 100, 'The sum of the keys of sleeping_ranges must be 100!'
# compute sleeping ranges
self.sleeping_times = self._create_random_sleeping_intervals(self.num_keywords)
logger.debug('Sleeping ranges: {}'.format(self.sleeping_times))
# the default timeout
self.timeout = 5
# the status of the thread after finishing or failing
self.status = 'successful'
self.html = ''
@abc.abstractmethod
def search(self, *args, **kwargs):
"""Send the search request(s) over the transport."""
@abc.abstractmethod
def set_proxy(self):
"""Install a proxy on the communication channel."""
@abc.abstractmethod
def switch_proxy(self, proxy):
"""Switch the proxy on the communication channel."""
@abc.abstractmethod
def proxy_check(self, proxy):
"""Check whether the assigned proxy works correctly and react"""
@abc.abstractmethod
def handle_request_denied(self, status_code):
"""Generic behaviour when search engines detect our scraping.
Args:
status_code: The status code of the http response.
"""
self.status = 'Malicious request detected: {}'.format(status_code)
def store(self):
"""Store the parsed data in the sqlalchemy scoped session."""
assert self.session, 'No database session.'
if self.html:
self.parser.parse(self.html)
else:
self.parser = None
with self.db_lock:
serp = parse_serp(self.config, parser=self.parser, scraper=self, query=self.query)
self.scraper_search.serps.append(serp)
self.session.add(serp)
self.session.commit()
store_serp_result(serp, self.config)
if serp.num_results:
return True
else:
return False
def next_page(self):
"""Increment the page. The next search request will request the next page."""
self.start_page_pos += 1
def keyword_info(self):
"""Print a short summary where we are in the scrape and what's the next keyword."""
logger.info(
'[{thread_name}][{ip}]]Keyword: "{keyword}" with {num_pages} pages, slept {delay} seconds before '
'scraping. {done}/{all} already scraped.'.format(
thread_name=self.scraper_name,
ip=self.requested_by,
keyword=self.query,
num_pages=self.pages_per_keyword,
delay=self.current_delay,
done=self.search_number,
all=self.num_keywords
))
def instance_creation_info(self, scraper_name):
"""Debug message whenever a scraping worker is created"""
logger.info('[+] {}[{}][search-type:{}][{}] using search engine "{}". Num keywords={}, num pages for keyword={}'.format(
scraper_name, self.requested_by, self.search_type, self.base_search_url, self.search_engine_name,
len(self.jobs),
self.pages_per_keyword))
def cache_results(self):
"""Caches the html for the current request."""
self.cache_manager.cache_results(self.parser, self.query, self.search_engine_name, self.scrape_method, self.page_number,
db_lock=self.db_lock)
def _create_random_sleeping_intervals(self, number_of_searches):
"""Sleep a given amount of time as a function of the number of searches done.
Args:
number_of_searches: How many searches the worker has to process.
Returns:
A list of tuples (intervals) of sleep ranges for everey search number.
"""
n = sum(self.sleeping_ranges.keys())
assert n == 100
assert number_of_searches >= 0
# if there are more searches than 100, multiply with the factor
x = math.ceil(number_of_searches/n)
sleeping_times = []
for key, value in self.sleeping_ranges.items():
for i in range(key):
sleeping_times.append(random.randrange(*value))
sleeping_times = sleeping_times*x
# randomly shuffle the whole thing
random.shuffle(sleeping_times)
return sleeping_times
def detection_prevention_sleep(self):
self.current_delay = 0
if self.config.get('do_sleep', True):
self.current_delay = self.sleeping_times[self.search_number]
time.sleep(self.current_delay)
if self.config.get('do_sleep', True):
try:
sleep_range = self.config.get('fixed_sleeping_ranges', {})[self.search_number]
self.current_delay = random.randrange(*sleep_range)
time.sleep(self.current_delay)
except KeyError as ke:
# normal case
pass
def after_search(self):
"""Store the results and parse em.
Notify the progress queue if necessary.
"""
self.search_number += 1
if not self.store():
logger.debug('No results to store for keyword: "{}" in search engine: {}'.format(self.query,
self.search_engine_name))
if self.progress_queue:
self.progress_queue.put(1)
self.cache_results()
def before_search(self):
"""Things that need to happen before entering the search loop."""
# check proxies first before anything
if self.config.get('check_proxies', True) and self.proxy:
if not self.proxy_check():
self.startable = False
def update_proxy_status(self, status, ipinfo=None, online=True):
"""Sets the proxy status with the results of ipinfo.io
Args:
status: A string the describes the status of the proxy.
ipinfo: The json results from ipinfo.io
online: Whether the proxy is usable or not.
"""
ipinfo = ipinfo or {}
with self.db_lock:
proxy = self.session.query(db_Proxy).filter(self.proxy.host == db_Proxy.ip).first()
if proxy:
for key in ipinfo.keys():
setattr(proxy, key, ipinfo[key])
proxy.checked_at = datetime.datetime.utcnow()
proxy.status = status
proxy.online = online
self.session.add(proxy)
self.session.commit()
from GoogleScraper.http_mode import HttpScrape
from GoogleScraper.selenium_mode import get_selenium_scraper_by_search_engine_name
class ScrapeWorkerFactory():
def __init__(self, config, cache_manager=None, mode=None, proxy=None, search_engine=None, session=None, db_lock=None,
cache_lock=None, scraper_search=None, captcha_lock=None, progress_queue=None, browser_num=1):
self.config = config
self.cache_manager = cache_manager
self.mode = mode
self.proxy = proxy
self.search_engine = search_engine
self.session = session
self.db_lock = db_lock
self.cache_lock = cache_lock
self.scraper_search = scraper_search
self.captcha_lock = captcha_lock
self.progress_queue = progress_queue
self.browser_num = browser_num
self.jobs = dict()
def is_suitabe(self, job):
return job['scrape_method'] == self.mode and job['search_engine'] == self.search_engine
def add_job(self, job):
query = job['query']
page_number = job['page_number']
if query not in self.jobs:
self.jobs[query] = []
self.jobs[query].append(page_number)
def get_worker(self):
if self.jobs:
if self.mode == 'selenium':
return get_selenium_scraper_by_search_engine_name(
self.config,
self.search_engine,
cache_manager=self.cache_manager,
search_engine=self.search_engine,
jobs=self.jobs,
session=self.session,
scraper_search=self.scraper_search,
cache_lock=self.cache_lock,
db_lock=self.db_lock,
proxy=self.proxy,
progress_queue=self.progress_queue,
captcha_lock=self.captcha_lock,
browser_num=self.browser_num,
)
elif self.mode == 'http':
return HttpScrape(
self.config,
cache_manager=self.cache_manager,
search_engine=self.search_engine,
jobs=self.jobs,
session=self.session,
scraper_search=self.scraper_search,
cache_lock=self.cache_lock,
db_lock=self.db_lock,
proxy=self.proxy,
progress_queue=self.progress_queue,
)
return None
| 17,276 | 33.211881 | 128 | py |
GoogleScraper | GoogleScraper-master/GoogleScraper/proxies.py | # -*- coding: utf-8 -*-
from collections import namedtuple
import os
import pymysql
import re
from GoogleScraper import database
import logging
Proxy = namedtuple('Proxy', 'proto, host, port, username, password')
logger = logging.getLogger(__name__)
def parse_proxy_file(fname):
"""Parses a proxy file
The format should be like the following:
socks5 23.212.45.13:1080 username:password
socks4 23.212.45.13:80 username:password
http 23.212.45.13:80
If username and password aren't provided, GoogleScraper assumes
that the proxy doesn't need auth credentials.
Args:
fname: The file name where to look for proxies.
Returns:
The parsed proxies.
Raises:
ValueError if no file with the path fname could be found.
"""
proxies = []
path = os.path.join(os.getcwd(), fname)
if os.path.exists(path):
with open(path, 'r') as pf:
for line in pf.readlines():
if not (line.strip().startswith('#') or line.strip().startswith('//')):
tokens = line.replace('\n', '').split(' ')
try:
proto = tokens[0]
host, port = tokens[1].split(':')
except:
raise Exception(
'Invalid proxy file. Should have the following format: {}'.format(parse_proxy_file.__doc__))
if len(tokens) == 3:
username, password = tokens[2].split(':')
proxies.append(Proxy(proto=proto, host=host, port=port, username=username, password=password))
else:
proxies.append(Proxy(proto=proto, host=host, port=port, username='', password=''))
return proxies
else:
raise ValueError('No such file/directory')
def get_proxies(host, user, password, database, port=3306, unix_socket=None):
""""Connect to a mysql database using pymysql and retrieve proxies for the scraping job.
Args:
host: The mysql database host
user: The mysql user
password: The database password
port: The mysql port, by default 3306
unix_socket: Sometimes you need to specify the mysql socket file when mysql doesn't reside
in a standard location.
Returns;
A list of proxies obtained from the database
Raisese:
An Exception when connecting to the database fails.
"""
try:
conn = pymysql.connect(host=host, port=port, user=user, passwd=password, unix_socket=unix_socket)
conn.select_db(database)
cur = conn.cursor(pymysql.cursors.DictCursor)
# Adapt this code for you to make it retrieving the proxies in the right format.
cur.execute('SELECT host, port, username, password, protocol FROM proxies')
proxies = [Proxy(proto=s['protocol'], host=s['host'], port=s['port'],
username=s['username'], password=s['password']) for s in cur.fetchall()]
return proxies
except Exception as e:
logger.error(e)
raise
def get_proxies_from_mysql_db(s):
"""Give this function a mysql connection string like this
mysql://<username>:<password>@<host>/<dbname>
and it will be happily returning all proxies found in the table 'proxies'
"""
pattern = re.compile(r'(?P<dbms>\w*?)://(?P<user>\w*?):(?P<pwd>.*?)@(?P<host>\w*?)/(?P<db>\w*)')
found = pattern.search(s)
return get_proxies(found.group('host'), found.group('user'),
found.group('pwd'), found.group('db'))
def add_proxies_to_db(proxies, session):
"""Adds the list of proxies to the database.
If the proxy-ip already exists and the other data differs,
it will be overwritten.
Will not check the status of the proxy.
Args:
proxies: A list of proxies.
session: A database session to work with.
"""
for proxy in proxies:
if proxy:
p = session.query(database.Proxy).filter(proxy.host == database.Proxy.ip).first()
if not p:
p = database.Proxy(ip=proxy.host)
p.port = proxy.port
p.username = proxy.username
p.password = proxy.password
p.proto = proxy.proto
session.add(p)
session.commit() | 4,396 | 33.351563 | 120 | py |
GoogleScraper | GoogleScraper-master/GoogleScraper/socks.py | """
SocksiPy - Python SOCKS module.
Version 1.5.0
Copyright 2006 Dan-Haim. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of Dan Haim nor the names of his contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
This module provides a standard socket-like interface for Python
for tunneling connections through SOCKS proxies.
===============================================================================
Minor modifications made by Christopher Gilbert (http://motomastyle.com/)
for use in PyLoris (http://pyloris.sourceforge.net/)
Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
mainly to merge bug fixes found in Sourceforge
Modifications made by Anorov (https://github.com/Anorov)
-Forked and renamed to PySocks
-Fixed issue with HTTP proxy failure checking (same bug that was in the old ___recvall() method)
-Included SocksiPyHandler (sockshandler.py), to be used as a urllib2 handler,
courtesy of e000 (https://github.com/e000): https://gist.github.com/869791#file_socksipyhandler.py
-Re-styled code to make it readable
-Aliased PROXY_TYPE_SOCKS5 -> SOCKS5 etc.
-Improved exception handling and output
-Removed irritating use of sequence indexes, replaced with tuple unpacked variables
-Fixed up Python 3 bytestring handling - chr(0x03).encode() -> b"\x03"
-Other general fixes
-Added clarification that the HTTP proxy connection method only supports CONNECT-style tunneling HTTP proxies
-Various small bug fixes
"""
__version__ = "1.5.1"
import socket
import struct
from errno import EOPNOTSUPP, EINVAL, EAGAIN
from io import BytesIO, SEEK_CUR
from collections import Callable
PROXY_TYPE_SOCKS4 = SOCKS4 = 1
PROXY_TYPE_SOCKS5 = SOCKS5 = 2
PROXY_TYPE_HTTP = HTTP = 3
PRINTABLE_PROXY_TYPES = {SOCKS4: "SOCKS4", SOCKS5: "SOCKS5", HTTP: "HTTP"}
_orgsocket = _orig_socket = socket.socket
class ProxyError(IOError):
"""
socket_err contains original socket.error exception.
"""
def __init__(self, msg, socket_err=None):
self.msg = msg
self.socket_err = socket_err
if socket_err:
self.msg += ": {0}".format(socket_err)
def __str__(self):
return self.msg
class GeneralProxyError(ProxyError): pass
class ProxyConnectionError(ProxyError): pass
class SOCKS5AuthError(ProxyError): pass
class SOCKS5Error(ProxyError): pass
class SOCKS4Error(ProxyError): pass
class HTTPError(ProxyError): pass
SOCKS4_ERRORS = { 0x5B: "Request rejected or failed",
0x5C: "Request rejected because SOCKS server cannot connect to identd on the client",
0x5D: "Request rejected because the client program and identd report different user-ids"
}
SOCKS5_ERRORS = { 0x01: "General SOCKS server failure",
0x02: "Connection not allowed by ruleset",
0x03: "Network unreachable",
0x04: "Host unreachable",
0x05: "Connection refused",
0x06: "TTL expired",
0x07: "Command not supported, or protocol error",
0x08: "Address type not supported"
}
DEFAULT_PORTS = { SOCKS4: 1080,
SOCKS5: 1080,
HTTP: 8080
}
def set_default_proxy(proxy_type=None, addr=None, port=None, rdns=True, username=None, password=None):
"""
set_default_proxy(proxy_type, addr[, port[, rdns[, username, password]]])
Sets a default proxy which all further socksocket objects will use,
unless explicitly changed. All parameters are as for socket.set_proxy().
"""
socksocket.default_proxy = (proxy_type, addr.encode(), port, rdns,
username.encode() if username else None,
password.encode() if password else None)
setdefaultproxy = set_default_proxy
def get_default_proxy():
"""
Returns the default proxy, set by set_default_proxy.
"""
return socksocket.default_proxy
getdefaultproxy = get_default_proxy
def wrap_module(module):
"""
Attempts to replace a module's socket library with a SOCKS socket. Must set
a default proxy using set_default_proxy(...) first.
This will only work on modules that import socket directly into the namespace;
most of the Python Standard Library falls into this category.
"""
if socksocket.default_proxy:
module.socket.socket = socksocket
else:
raise GeneralProxyError("No default proxy specified")
wrapmodule = wrap_module
def create_connection(dest_pair, proxy_type=None, proxy_addr=None,
proxy_port=None, proxy_username=None,
proxy_password=None, timeout=None):
"""create_connection(dest_pair, *[, timeout], **proxy_args) -> socket object
Like socket.create_connection(), but connects to proxy
before returning the socket object.
dest_pair - 2-tuple of (IP/hostname, port).
**proxy_args - Same args passed to socksocket.set_proxy().
timeout - Optional socket timeout value, in seconds.
"""
sock = socksocket()
if isinstance(timeout, (int, float)):
sock.settimeout(timeout)
sock.set_proxy(proxy_type, proxy_addr, proxy_port,
proxy_username, proxy_password)
sock.connect(dest_pair)
return sock
class _BaseSocket(socket.socket):
"""Allows Python 2's "delegated" methods such as send() to be overridden
"""
def __init__(self, *pos, **kw):
_orig_socket.__init__(self, *pos, **kw)
self._savedmethods = dict()
for name in self._savenames:
self._savedmethods[name] = getattr(self, name)
delattr(self, name) # Allows normal overriding mechanism to work
_savenames = list()
def _makemethod(name):
return lambda self, *pos, **kw: self._savedmethods[name](*pos, **kw)
for name in ("sendto", "send", "recvfrom", "recv"):
method = getattr(_BaseSocket, name, None)
# Determine if the method is not defined the usual way
# as a function in the class.
# Python 2 uses __slots__, so there are descriptors for each method,
# but they are not functions.
if not isinstance(method, Callable):
_BaseSocket._savenames.append(name)
setattr(_BaseSocket, name, _makemethod(name))
class socksocket(_BaseSocket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET and proto=0.
The "type" argument must be either SOCK_STREAM or SOCK_DGRAM.
"""
default_proxy = None
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None):
if type not in {socket.SOCK_STREAM, socket.SOCK_DGRAM}:
msg = "Socket type must be stream or datagram, not {!r}"
raise ValueError(msg.format(type))
_BaseSocket.__init__(self, family, type, proto, _sock)
self._proxyconn = None # TCP connection to keep UDP relay alive
if self.default_proxy:
self.proxy = self.default_proxy
else:
self.proxy = (None, None, None, None, None, None)
self.proxy_sockname = None
self.proxy_peername = None
def _readall(self, file, count):
"""
Receive EXACTLY the number of bytes requested from the file object.
Blocks until the required number of bytes have been received.
"""
data = b""
while len(data) < count:
d = file.read(count - len(data))
if not d:
raise GeneralProxyError("Connection closed unexpectedly")
data += d
return data
def set_proxy(self, proxy_type=None, addr=None, port=None, rdns=True, username=None, password=None):
"""set_proxy(proxy_type, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxy_type - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be performed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.proxy = (proxy_type, addr.encode(), port, rdns,
username.encode() if username else None,
password.encode() if password else None)
setproxy = set_proxy
def bind(self, *pos, **kw):
"""
Implements proxy connection for UDP sockets,
which happens during the bind() phase.
"""
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
if not proxy_type or self.type != socket.SOCK_DGRAM:
return _orig_socket.bind(self, *pos, **kw)
if self._proxyconn:
raise socket.error(EINVAL, "Socket already bound to an address")
if proxy_type != SOCKS5:
msg = "UDP only supported by SOCKS5 proxy type"
raise socket.error(EOPNOTSUPP, msg)
_BaseSocket.bind(self, *pos, **kw)
# Need to specify actual local port because
# some relays drop packets if a port of zero is specified.
# Avoid specifying host address in case of NAT though.
_, port = self.getsockname()
dst = ("0", port)
self._proxyconn = _orig_socket()
proxy = self._proxy_addr()
self._proxyconn.connect(proxy)
UDP_ASSOCIATE = b"\x03"
_, relay = self._SOCKS5_request(self._proxyconn, UDP_ASSOCIATE, dst)
# The relay is most likely on the same host as the SOCKS proxy,
# but some proxies return a private IP address (10.x.y.z)
host, _ = proxy
_, port = relay
_BaseSocket.connect(self, (host, port))
self.proxy_sockname = ("0.0.0.0", 0) # Unknown
def sendto(self, bytes, *args):
if self.type != socket.SOCK_DGRAM:
return _BaseSocket.sendto(self, bytes, *args)
if not self._proxyconn:
self.bind(("", 0))
address = args[-1]
flags = args[:-1]
header = BytesIO()
RSV = b"\x00\x00"
header.write(RSV)
STANDALONE = b"\x00"
header.write(STANDALONE)
self._write_SOCKS5_address(address, header)
sent = _BaseSocket.send(self, header.getvalue() + bytes, *flags)
return sent - header.tell()
def send(self, bytes, flags=0):
if self.type == socket.SOCK_DGRAM:
return self.sendto(bytes, flags, self.proxy_peername)
else:
return _BaseSocket.send(self, bytes, flags)
def recvfrom(self, bufsize, flags=0):
if self.type != socket.SOCK_DGRAM:
return _BaseSocket.recvfrom(self, bufsize, flags)
if not self._proxyconn:
self.bind(("", 0))
buf = BytesIO(_BaseSocket.recv(self, bufsize, flags))
buf.seek(+2, SEEK_CUR)
frag = buf.read(1)
if ord(frag):
raise NotImplementedError("Received UDP packet fragment")
fromhost, fromport = self._read_SOCKS5_address(buf)
peerhost, peerport = self.proxy_peername
filterhost = socket.inet_pton(self.family, peerhost).strip(b"\x00")
filterhost = filterhost and fromhost != peerhost
if filterhost or peerport not in (0, fromport):
raise socket.error(EAGAIN, "Packet filtered")
return (buf.read(), (fromhost, fromport))
def recv(self, *pos, **kw):
bytes, _ = self.recvfrom(*pos, **kw)
return bytes
def close(self):
if self._proxyconn:
self._proxyconn.close()
return _BaseSocket.close(self)
def get_proxy_sockname(self):
"""
Returns the bound IP address and port number at the proxy.
"""
return self.proxy_sockname
getproxysockname = get_proxy_sockname
def get_proxy_peername(self):
"""
Returns the IP and port number of the proxy.
"""
return _BaseSocket.getpeername(self)
getproxypeername = get_proxy_peername
def get_peername(self):
"""
Returns the IP address and port number of the destination
machine (note: get_proxy_peername returns the proxy)
"""
return self.proxy_peername
getpeername = get_peername
def _negotiate_SOCKS5(self, *dest_addr):
"""
Negotiates a stream connection through a SOCKS5 server.
"""
CONNECT = b"\x01"
self.proxy_peername, self.proxy_sockname = self._SOCKS5_request(self,
CONNECT, dest_addr)
def _SOCKS5_request(self, conn, cmd, dst):
"""
Send SOCKS5 request with given command (CMD field) and
address (DST field). Returns resolved DST address that was used.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
writer = conn.makefile("wb")
reader = conn.makefile("rb", 0) # buffering=0 renamed in Python 3
try:
# First we'll send the authentication packages we support.
if username and password:
# The username/password details were supplied to the
# set_proxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
writer.write(b"\x05\x02\x00\x02")
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
writer.write(b"\x05\x01\x00")
# We'll receive the server's response to determine which
# method was selected
writer.flush()
chosen_auth = self._readall(reader, 2)
if chosen_auth[0:1] != b"\x05":
# Note: string[i:i+1] is used because indexing of a bytestring
# via bytestring[i] yields an integer in Python 3
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
# Check the chosen authentication method
if chosen_auth[1:2] == b"\x02":
# Okay, we need to perform a basic username/password
# authentication.
writer.write(b"\x01" + chr(len(username)).encode()
+ username
+ chr(len(password)).encode()
+ password)
writer.flush()
auth_status = self._readall(reader, 2)
if auth_status[0:1] != b"\x01":
# Bad response
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
if auth_status[1:2] != b"\x00":
# Authentication failed
raise SOCKS5AuthError("SOCKS5 authentication failed")
# Otherwise, authentication succeeded
# No authentication is required if 0x00
elif chosen_auth[1:2] != b"\x00":
# Reaching here is always bad
if chosen_auth[1:2] == b"\xFF":
raise SOCKS5AuthError("All offered SOCKS5 authentication methods were rejected")
else:
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
# Now we can request the actual connection
writer.write(b"\x05" + cmd + b"\x00")
resolved = self._write_SOCKS5_address(dst, writer)
writer.flush()
# Get the response
resp = self._readall(reader, 3)
if resp[0:1] != b"\x05":
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
status = ord(resp[1:2])
if status != 0x00:
# Connection failed: server returned an error
error = SOCKS5_ERRORS.get(status, "Unknown error")
raise SOCKS5Error("{0:#04x}: {1}".format(status, error))
# Get the bound address/port
bnd = self._read_SOCKS5_address(reader)
return (resolved, bnd)
finally:
reader.close()
writer.close()
def _write_SOCKS5_address(self, addr, file):
"""
Return the host and port packed for the SOCKS5 protocol,
and the resolved address as a tuple object.
"""
host, port = addr
proxy_type, _, _, rdns, username, password = self.proxy
# If the given destination address is an IP address, we'll
# use the IPv4 address request even if remote resolving was specified.
try:
addr_bytes = socket.inet_aton(host)
file.write(b"\x01" + addr_bytes)
host = socket.inet_ntoa(addr_bytes)
except socket.error:
# Well it's not an IP number, so it's probably a DNS name.
if rdns:
# Resolve remotely
file.write(b"\x03" + chr(len(host)).encode() + host.encode())
else:
# Resolve locally
addr_bytes = socket.inet_aton(socket.gethostbyname(host))
file.write(b"\x01" + addr_bytes)
host = socket.inet_ntoa(addr_bytes)
file.write(struct.pack(">H", port))
return host, port
def _read_SOCKS5_address(self, file):
atyp = self._readall(file, 1)
if atyp == b"\x01":
addr = socket.inet_ntoa(self._readall(file, 4))
elif atyp == b"\x03":
length = self._readall(file, 1)
addr = self._readall(file, ord(length))
else:
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
port = struct.unpack(">H", self._readall(file, 2))[0]
return addr, port
def _negotiate_SOCKS4(self, dest_addr, dest_port):
"""
Negotiates a connection through a SOCKS4 server.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
writer = self.makefile("wb")
reader = self.makefile("rb", 0) # buffering=0 renamed in Python 3
try:
# Check if the destination address provided is an IP address
remote_resolve = False
try:
addr_bytes = socket.inet_aton(dest_addr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if rdns:
addr_bytes = b"\x00\x00\x00\x01"
remote_resolve = True
else:
addr_bytes = socket.inet_aton(socket.gethostbyname(dest_addr))
# Construct the request packet
writer.write(struct.pack(">BBH", 0x04, 0x01, dest_port))
writer.write(addr_bytes)
# The username parameter is considered userid for SOCKS4
if username:
writer.write(username)
writer.write(b"\x00")
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if remote_resolve:
writer.write(dest_addr.encode() + b"\x00")
writer.flush()
# Get the response from the server
resp = self._readall(reader, 8)
if resp[0:1] != b"\x00":
# Bad data
raise GeneralProxyError("SOCKS4 proxy server sent invalid data")
status = ord(resp[1:2])
if status != 0x5A:
# Connection failed: server returned an error
error = SOCKS4_ERRORS.get(status, "Unknown error")
raise SOCKS4Error("{0:#04x}: {1}".format(status, error))
# Get the bound address/port
self.proxy_sockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if remote_resolve:
self.proxy_peername = socket.inet_ntoa(addr_bytes), dest_port
else:
self.proxy_peername = dest_addr, dest_port
finally:
reader.close()
writer.close()
def _negotiate_HTTP(self, dest_addr, dest_port):
"""
Negotiates a connection through an HTTP server.
NOTE: This currently only supports HTTP CONNECT-style proxies.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
# If we need to resolve locally, we do this now
addr = dest_addr if rdns else socket.gethostbyname(dest_addr)
self.sendall(b"CONNECT " + addr.encode() + b":" + str(dest_port).encode() +
b" HTTP/1.1\r\n" + b"Host: " + dest_addr.encode() + b"\r\n\r\n")
# We just need the first line to check if the connection was successful
fobj = self.makefile()
status_line = fobj.readline()
fobj.close()
if not status_line:
raise GeneralProxyError("Connection closed unexpectedly")
try:
proto, status_code, status_msg = status_line.split(" ", 2)
except ValueError:
raise GeneralProxyError("HTTP proxy server sent invalid response")
if not proto.startswith("HTTP/"):
raise GeneralProxyError("Proxy server does not appear to be an HTTP proxy")
try:
status_code = int(status_code)
except ValueError:
raise HTTPError("HTTP proxy server did not return a valid HTTP status")
if status_code != 200:
error = "{0}: {1}".format(status_code, status_msg)
if status_code in (400, 403, 405):
# It's likely that the HTTP proxy server does not support the CONNECT tunneling method
error += ("\n[*] Note: The HTTP proxy server may not be supported by PySocks"
" (must be a CONNECT tunnel proxy)")
raise HTTPError(error)
self.proxy_sockname = (b"0.0.0.0", 0)
self.proxy_peername = addr, dest_port
_proxy_negotiators = {
SOCKS4: _negotiate_SOCKS4,
SOCKS5: _negotiate_SOCKS5,
HTTP: _negotiate_HTTP
}
def connect(self, dest_pair):
"""
Connects to the specified destination through a proxy.
Uses the same API as socket's connect().
To select the proxy server, use set_proxy().
dest_pair - 2-tuple of (IP/hostname, port).
"""
dest_addr, dest_port = dest_pair
if self.type == socket.SOCK_DGRAM:
if not self._proxyconn:
self.bind(("", 0))
dest_addr = socket.gethostbyname(dest_addr)
self.proxy_peername = (dest_addr, dest_port)
return
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
# Do a minimal input check first
if (not isinstance(dest_pair, (list, tuple))
or len(dest_pair) != 2
or not dest_addr
or not isinstance(dest_port, int)):
raise GeneralProxyError("Invalid destination-connection (host, port) pair")
if proxy_type is None:
# Treat like regular socket object
_BaseSocket.connect(self, (dest_addr, dest_port))
return
proxy_addr = self._proxy_addr()
try:
# Initial connection to proxy server
_BaseSocket.connect(self, proxy_addr)
except socket.error as error:
# Error while connecting to proxy
self.close()
proxy_addr, proxy_port = proxy_addr
proxy_server = "{0}:{1}".format(proxy_addr.decode(), proxy_port)
printable_type = PRINTABLE_PROXY_TYPES[proxy_type]
msg = "Error connecting to {0} proxy {1}".format(printable_type,
proxy_server)
raise ProxyConnectionError(msg, error)
else:
# Connected to proxy server, now negotiate
try:
# Calls negotiate_{SOCKS4, SOCKS5, HTTP}
negotiate = self._proxy_negotiators[proxy_type]
negotiate(self, dest_addr, dest_port)
except socket.error as error:
# Wrap socket errors
self.close()
raise GeneralProxyError("Socket error", error)
except ProxyError:
# Protocol error while negotiating with proxy
self.close()
raise
def _proxy_addr(self):
"""
Return proxy address to connect to as tuple object
"""
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
proxy_port = proxy_port or DEFAULT_PORTS.get(proxy_type)
if not proxy_port:
raise GeneralProxyError("Invalid proxy type")
return proxy_addr, proxy_port | 26,802 | 38.071429 | 109 | py |
GoogleScraper | GoogleScraper-master/GoogleScraper/utils.py | # -*- coding: utf-8 -*-
from itertools import zip_longest
import re
import requests
import os
def grouper(iterable, n, fillvalue=None):
"""Collect data into fixed-length chunks or blocks.
>>> grouper('ABCDEFG', 3, 'x')
[['A', 'B', 'C'], ['D', 'E', 'F'], ['G', 'x', 'x']]
Args:
iterable: An iterable
n: How long should the tokens be
fillvalue: What to pad non complete tokens with
Returns:
The tokens as list.
"""
args = [iter(iterable)] * n
groups = zip_longest(*args, fillvalue=fillvalue)
return [list(filter(None.__ne__, list(group))) for group in groups]
def chunk_it(seq, num):
"""Make num chunks from elements in seq.
>>> keywords = ['what', 'is', 'going', 'on', 'exactly']
>>> chunk_it(keywords, 6)
[['what'], ['is'], ['going'], ['on'], ['exactly']]
>>> chunk_it(keywords, 2)
[['what', 'is'], ['going', 'on', 'exactly']]
Args:
seq: A collection to be chunked
num: The number of chunks to yield.
Returns:
A list of num chunks.
"""
if num >= len(seq):
num = len(seq)
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
def random_words(n=100, wordlength=range(10, 15)):
"""Read a random english wiki article and extract some words.
Args:
n: The number of words to return. Returns all found ones, if n is more than we were able to found.
wordlength: A range that forces the words to have a specific length.
Returns:
Random words. What else you motherfucker?
"""
valid_words = re.compile(r'[a-zA-Z]{{{},{}}}'.format(wordlength.start, wordlength.stop))
found = list(set(valid_words.findall(requests.get('http://en.wikipedia.org/wiki/Special:Random').text)))
try:
return found[:n]
except IndexError:
return found
def get_some_words(n=100):
"""Get some words. How the fuck know where we get them from."""
if os.path.exists('/usr/share/dict/words'):
words = open('/usr/share/dict/words').read().splitlines()
if n < len(words):
words = words[:n]
else:
words = random_words(n=n)
return words
def get_base_path():
return os.path.dirname(os.path.realpath(__file__))
if __name__ == '__main__':
import doctest
doctest.testmod() | 2,442 | 24.715789 | 108 | py |
GoogleScraper | GoogleScraper-master/GoogleScraper/user_agents.py | # -*- coding: utf-8 -*-
import random
# Several different User-Agents to diversify the requests.
# Keep the User-Agents updated. Last update: 13th November 2014
# Get them here: http://techblog.willshouse.com/2012/01/03/most-common-user-agents/
user_agents = ['Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:64.0) Gecko/20100101 Firefox/64.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0.2 Safari/605.1.15',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:64.0) Gecko/20100101 Firefox/64.0',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/17.17134',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:64.0) Gecko/20100101 Firefox/64.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:64.0) Gecko/20100101 Firefox/64.0',
'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (X11; Linux x86_64; rv:64.0) Gecko/20100101 Firefox/64.0',
'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0.2 Safari/605.1.15',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:64.0) Gecko/20100101 Firefox/64.0',
'Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:64.0) Gecko/20100101 Firefox/64.0',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:60.0) Gecko/20100101 Firefox/60.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Safari/605.1.15',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 OPR/57.0.3098.116',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 YaBrowser/18.11.1.805 Yowser/2.5 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',
'Mozilla/5.0 (iPad; CPU OS 12_1_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1',
'Mozilla/5.0 (Windows NT 6.1; rv:60.0) Gecko/20100101 Firefox/60.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0.1 Safari/605.1.15',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 YaBrowser/18.11.1.805 Yowser/2.5 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0.3 Safari/605.1.15',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1.2 Safari/605.1.15',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/71.0.3578.98 Chrome/71.0.3578.98 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0.3 Safari/605.1.15',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0.2 Safari/605.1.15',
'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; Touch; rv:11.0) like Gecko',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 OPR/57.0.3098.106',
'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:64.0) Gecko/20100101 Firefox/64.0',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 OPR/57.0.3098.116',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:65.0) Gecko/20100101 Firefox/65.0',
'Mozilla/5.0 (Windows NT 6.1; rv:64.0) Gecko/20100101 Firefox/64.0',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:60.0) Gecko/20100101 Firefox/60.0',
'Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:64.0) Gecko/20100101 Firefox/64.0',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1.2 Safari/605.1.15',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:63.0) Gecko/20100101 Firefox/63.0',
'Mozilla/5.0 (X11; CrOS x86_64 11151.59.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.94 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:64.0) Gecko/20100101 Firefox/64.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:64.0) Gecko/20100101 Firefox/64.0',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0',
'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0',
'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko']
desktop_user_agents = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.86 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_1) AppleWebKit/601.2.7 (KHTML, like Gecko) Version/9.0.1 Safari/601.2.7',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.86 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.80 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.80 Safari/537.36',
]
def random_user_agent(only_desktop=False):
if only_desktop:
return random.choice(desktop_user_agents)
return random.choice(user_agents)
| 8,746 | 89.175258 | 147 | py |
GoogleScraper | GoogleScraper-master/GoogleScraper/parsing-new-version.py | # -*- coding: utf-8 -*-
import sys
import os
import re
import lxml.html
from lxml.html.clean import Cleaner
from urllib.parse import unquote
import pprint
import logging
from cssselect import HTMLTranslator
logger = logging.getLogger(__name__)
class InvalidSearchTypeException(Exception):
pass
class UnknowUrlException(Exception):
pass
class NoParserForSearchEngineException(Exception):
pass
class Parser():
"""Parses SERP pages.
Each search engine results page (SERP) has a similar layout:
The main search results are usually in a html container element (#main, .results, #leftSide).
There might be separate columns for other search results (like ads for example). Then each
result contains basically a link, a snippet and a description (usually some text on the
target site). It's really astonishing how similar other search engines are to Google.
Each child class (that can actual parse a concrete search engine results page) needs
to specify css selectors for the different search types (Like normal search, news search, video search, ...).
Attributes:
search_results: The results after parsing.
"""
# this selector specified the element that notifies the user whether the search
# had any results.
no_results_selector = []
# if subclasses specify an value for this attribute and the attribute
# targets an element in the serp page, then there weren't any results
# for the original query.
effective_query_selector = []
# the selector that gets the number of results (guessed) as shown by the search engine.
num_results_search_selectors = []
# some search engine show on which page we currently are. If supportd, this selector will get this value.
page_number_selectors = []
# The supported search types. For instance, Google supports Video Search, Image Search, News search
search_types = []
# Each subclass of Parser may declare an arbitrary amount of attributes that
# follow a naming convention like this:
# *_search_selectors
# where the asterix may be replaced with arbitrary identifier names.
# Any of these attributes represent css selectors for a specific search type.
# If you didn't specify the search type in the search_types list, this attribute
# will not be evaluated and no data will be parsed.
def __init__(self, search_type='normal', html='', query=''):
"""Create new Parser instance and parse all information.
Args:
html: The raw html from the search engine search. If not provided, you can parse
the data later by calling parse(html) directly.
searchtype: The search type. By default "normal"
Raises:
Assertion error if the subclassed
specific parser cannot handle the the settings.
"""
self.searchtype = search_type
assert self.searchtype in self.search_types, 'search type "{}" is not supported in {}'.format(
self.searchtype,
self.__class__.__name__
)
self.query = query
self.html = html
self.dom = None
self.search_results = {}
self.num_results_for_query = ''
self.num_results = 0
self.effective_query = ''
self.page_number = -1
self.no_results = False
# to be set by the implementing sub classes
self.search_engine = ''
# short alias because we use it so extensively
self.css_to_xpath = HTMLTranslator().css_to_xpath
if self.html:
self.parse()
def parse(self, html=None):
"""Public function to start parsing the search engine results.
Args:
html: The raw html data to extract the SERP entries from.
"""
if html:
self.html = html
# lets do the actual parsing
self._parse()
# Apply subclass specific behaviour after parsing has happened
# This is needed because different parsers need to clean/modify
# the parsed data uniquely.
self.after_parsing()
def _parse_lxml(self, cleaner=None):
try:
parser = lxml.html.HTMLParser(encoding='utf-8')
if cleaner:
self.dom = cleaner.clean_html(self.dom)
self.dom = lxml.html.document_fromstring(self.html, parser=parser)
self.dom.resolve_base_href()
except Exception as e:
# maybe wrong encoding
logger.error(e)
def _parse(self, cleaner=None):
"""Internal parse the dom according to the provided css selectors.
Raises: InvalidSearchTypeException if no css selectors for the searchtype could be found.
"""
self.num_results = 0
self._parse_lxml(cleaner)
# try to parse the number of results.
attr_name = self.searchtype + '_search_selectors'
selector_dict = getattr(self, attr_name, None)
# get the appropriate css selectors for the num_results for the keyword
num_results_selector = getattr(self, 'num_results_search_selectors', None)
self.num_results_for_query = self.first_match(num_results_selector, self.dom)
if not self.num_results_for_query:
logger.debug('{}: Cannot parse num_results from serp page with selectors {}'.format(self.__class__.__name__,
num_results_selector))
# get the current page we are at. Sometimes we search engines don't show this.
try:
self.page_number = int(self.first_match(self.page_number_selectors, self.dom))
except ValueError:
self.page_number = -1
# let's see if the search query was shitty (no results for that query)
self.effective_query = self.first_match(self.effective_query_selector, self.dom)
if self.effective_query:
logger.debug('{}: There was no search hit for the search query. Search engine used {} instead.'.format(
self.__class__.__name__, self.effective_query))
else:
self.effective_query = ''
# the element that notifies the user about no results.
self.no_results_text = self.first_match(self.no_results_selector, self.dom)
# get the stuff that is of interest in SERP pages.
if not selector_dict and not isinstance(selector_dict, dict):
raise InvalidSearchTypeException('There is no such attribute: {}. No selectors found'.format(attr_name))
for result_type, selector_class in selector_dict.items():
self.search_results[result_type] = []
for selector_specific, selectors in selector_class.items():
if 'result_container' in selectors and selectors['result_container']:
css = '{container} {result_container}'.format(**selectors)
else:
css = selectors['container']
results = self.dom.xpath(
self.css_to_xpath(css)
)
to_extract = set(selectors.keys()) - {'container', 'result_container'}
selectors_to_use = {key: selectors[key] for key in to_extract if key in selectors.keys()}
for index, result in enumerate(results):
# Let's add primitive support for CSS3 pseudo selectors
# We just need two of them
# ::text
# ::attr(attribute)
# You say we should use xpath expressions instead?
# Maybe you're right, but they are complicated when it comes to classes,
# have a look here: http://doc.scrapy.org/en/latest/topics/selectors.html
serp_result = {}
# key are for example 'link', 'snippet', 'visible-url', ...
# selector is the selector to grab these items
for key, selector in selectors_to_use.items():
serp_result[key] = self.advanced_css(selector, result)
serp_result['rank'] = index + 1
# Avoid duplicates. Duplicates are serp_result elemnts where the 'link' and 'title' are identical
# If statement below: Lazy evaluation. The more probable case first.
if not [e for e in self.search_results[result_type] if (e['link'] == serp_result['link'] and e['title'] == serp_result['title']) ]:
self.search_results[result_type].append(serp_result)
self.num_results += 1
def advanced_css(self, selector, element):
"""Evaluate the :text and ::attr(attr-name) additionally.
Args:
selector: A css selector.
element: The element on which to apply the selector.
Returns:
The targeted element.
"""
value = None
if selector.endswith('::text'):
try:
value = element.xpath(self.css_to_xpath(selector.split('::')[0]))[0].text_content()
except IndexError:
pass
else:
match = re.search(r'::attr\((?P<attr>.*)\)$', selector)
if match:
attr = match.group('attr')
try:
value = element.xpath(self.css_to_xpath(selector.split('::')[0]))[0].get(attr)
except IndexError:
pass
else:
try:
value = element.xpath(self.css_to_xpath(selector))[0].text_content()
except IndexError:
pass
return value
def first_match(self, selectors, element):
"""Get the first match.
Args:
selectors: The selectors to test for a match.
element: The element on which to apply the selectors.
Returns:
The very first match or False if all selectors didn't match anything.
"""
assert isinstance(selectors, list), 'selectors must be of type list!'
for selector in selectors:
if selector:
try:
match = self.advanced_css(selector, element=element)
if match:
return match
except IndexError as e:
pass
return False
def after_parsing(self):
"""Subclass specific behaviour after parsing happened.
Override in subclass to add search engine specific behaviour.
Commonly used to clean the results.
"""
def __str__(self):
"""Return a nicely formatted overview of the results."""
return pprint.pformat(self.search_results)
@property
def cleaned_html(self):
# Try to parse the provided HTML string using lxml
# strip all unnecessary information to save space
cleaner = Cleaner()
cleaner.scripts = True
cleaner.javascript = True
cleaner.comments = True
cleaner.style = True
self.dom = cleaner.clean_html(self.dom)
assert len(self.dom), 'The html needs to be parsed to get the cleaned html'
return lxml.html.tostring(self.dom)
def iter_serp_items(self):
"""Yields the key and index of any item in the serp results that has a link value"""
for key, value in self.search_results.items():
if isinstance(value, list):
for i, item in enumerate(value):
if isinstance(item, dict) and item['link']:
yield (key, i)
"""
Here follow the different classes that provide CSS selectors
for different types of SERP pages of several common search engines.
Just look at them and add your own selectors in a new class if you
want the Scraper to support them.
You can easily just add new selectors to a search engine. Just follow
the attribute naming convention and the parser will recognize them:
If you provide a dict with a name like finance_search_selectors,
then you're adding a new search type with the name finance.
Each class needs a attribute called num_results_search_selectors, that
extracts the number of searches that were found by the keyword.
Please note:
The actual selectors are wrapped in a dictionary to clarify with which IP
they were requested. The key to the wrapper div allows to specify distinct
criteria to whatever settings you used when you requested the page. So you
might add your own selectors for different User-Agents, distinct HTTP headers, what-
ever you may imagine. This allows the most dynamic parsing behaviour and makes
it very easy to grab all data the site has to offer.
"""
class GoogleParser(Parser):
"""Parses SERP pages of the Google search engine."""
search_engine = 'google'
search_types = ['normal', 'image']
effective_query_selector = ['#topstuff .med > b::text', '.med > a > b::text']
no_results_selector = []
num_results_search_selectors = ['#resultStats']
page_number_selectors = ['#navcnt td.cur::text']
normal_search_selectors = {
'results': {
'us_ip': {
'container': '#center_col',
'result_container': 'div.g ',
'link': 'h3.r > a:first-child::attr(href)',
'snippet': 'div.s span.st::text',
'title': 'h3.r > a:first-child::text',
'visible_link': 'cite::text'
},
'de_ip': {
'container': '#center_col',
'result_container': 'li.g ',
'link': 'h3.r > a:first-child::attr(href)',
'snippet': 'div.s span.st::text',
'title': 'h3.r > a:first-child::text',
'visible_link': 'cite::text'
},
'de_ip_news_items': {
'container': 'li.card-section',
'link': 'a._Dk::attr(href)',
'snippet': 'span._dwd::text',
'title': 'a._Dk::text',
'visible_link': 'cite::text'
},
},
'ads_main': {
'us_ip': {
'container': '#center_col',
'result_container': 'li.ads-ad',
'link': 'h3.r > a:first-child::attr(href)',
'snippet': 'div.s span.st::text',
'title': 'h3.r > a:first-child::text',
'visible_link': '.ads-visurl cite::text',
},
'de_ip': {
'container': '#center_col',
'result_container': '.ads-ad',
'link': 'h3 > a:first-child::attr(href)',
'snippet': '.ads-creative::text',
'title': 'h3 > a:first-child::text',
'visible_link': '.ads-visurl cite::text',
}
},
# those css selectors are probably not worth much
'maps_local': {
'de_ip': {
'container': '#center_col',
'result_container': '.ccBEnf > div',
'link': 'link::attr(href)',
'snippet': 'div.rl-qs-crs-t::text',
'title': 'div[role="heading"] span::text',
'rating': 'span.BTtC6e::text',
'num_reviews': '.rllt__details::text',
}
},
'ads_aside': {
}
}
image_search_selectors = {
'results': {
'de_ip': {
'container': 'li#isr_mc',
'result_container': 'div.rg_di',
'link': 'a.rg_l::attr(href)'
},
'de_ip_raw': {
'container': '.images_table',
'result_container': 'tr td',
'link': 'a::attr(href)',
'visible_link': 'cite::text',
}
}
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def after_parsing(self):
"""Clean the urls.
A typical scraped results looks like the following:
'/url?q=http://www.youtube.com/user/Apple&sa=U&ei=\
lntiVN7JDsTfPZCMgKAO&ved=0CFQQFjAO&usg=AFQjCNGkX65O-hKLmyq1FX9HQqbb9iYn9A'
Clean with a short regex.
"""
super().after_parsing()
if self.searchtype == 'normal':
if self.num_results > 0:
self.no_results = False
elif self.num_results <= 0:
self.no_results = True
if 'No results found for' in self.html or 'did not match any documents' in self.html:
self.no_results = True
# finally try in the snippets
if self.no_results is True:
for key, i in self.iter_serp_items():
if 'snippet' in self.search_results[key][i] and self.query:
if self.query.replace('"', '') in self.search_results[key][i]['snippet']:
self.no_results = False
clean_regexes = {
'normal': r'/url\?q=(?P<url>.*?)&sa=U&ei=',
'image': r'imgres\?imgurl=(?P<url>.*?)&'
}
for key, i in self.iter_serp_items():
result = re.search(
clean_regexes[self.searchtype],
self.search_results[key][i]['link']
)
if result:
self.search_results[key][i]['link'] = unquote(result.group('url'))
class YandexParser(Parser):
"""Parses SERP pages of the Yandex search engine."""
search_engine = 'yandex'
search_types = ['normal', 'image']
no_results_selector = ['.message .misspell__message::text']
effective_query_selector = ['.misspell__message .misspell__link']
# @TODO: In december 2015, I saw that yandex only shows the number of search results in the search input field
# with javascript. One can scrape it in plain http mode, but the values are hidden in some javascript and not
# accessible with normal xpath/css selectors. A normal text search is done.
num_results_search_selectors = ['.serp-list .serp-adv__found::text', '.input__found_visibility_visible font font::text']
page_number_selectors = ['.pager__group .button_checked_yes span::text']
normal_search_selectors = {
'results': {
'de_ip': {
'container': '.serp-list',
'result_container': '.serp-item',
'link': 'a.link::attr(href)',
'snippet': 'div.text-container::text',
'title': 'div.organic__url-text::text',
'visible_link': '.typo_type_greenurl::text'
}
}
}
image_search_selectors = {
'results': {
'de_ip': {
'container': '.page-layout__content-wrapper',
'result_container': '.serp-item__preview',
'link': '.serp-item__preview .serp-item__link::attr(onmousedown)'
},
'de_ip_raw': {
'container': '.page-layout__content-wrapper',
'result_container': '.serp-item__preview',
'link': '.serp-item__preview .serp-item__link::attr(href)'
}
}
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def after_parsing(self):
"""Clean the urls.
Normally Yandex image search store the image url in the onmousedown attribute in a json object. Its
pretty messsy. This method grabs the link with a quick regex.
c.hit({"dtype":"iweb","path":"8.228.471.241.184.141","pos":69,"reqid":\
"1418919408668565-676535248248925882431999-ws35-986-IMG-p2"}, \
{"href":"http://www.thewallpapers.org/wallpapers/3/382/thumb/600_winter-snow-nature002.jpg"});
Sometimes the img url is also stored in the href attribute (when requesting with raw http packets).
href="/images/search?text=snow&img_url=\
http%3A%2F%2Fwww.proza.ru%2Fpics%2F2009%2F12%2F07%2F1290.jpg&pos=2&rpt=simage&pin=1">
"""
super().after_parsing()
if self.searchtype == 'normal':
self.no_results = False
if self.no_results_text:
self.no_results = 'По вашему запросу ничего не нашлось' in self.no_results_text
if self.num_results == 0:
self.no_results = True
# very hackish, probably prone to all kinds of errors.
if not self.num_results_for_query:
substr = 'function() { var title = "%s —' % self.query
try:
i = self.html.index(substr)
if i:
self.num_results_for_query = re.search(r'— (.)*?"', self.html[i:i+len(self.query) + 150]).group()
except Exception as e:
logger.debug(str(e))
if self.searchtype == 'image':
for key, i in self.iter_serp_items():
for regex in (
r'\{"href"\s*:\s*"(?P<url>.*?)"\}',
r'img_url=(?P<url>.*?)&'
):
result = re.search(regex, self.search_results[key][i]['link'])
if result:
self.search_results[key][i]['link'] = result.group('url')
break
class BingParser(Parser):
"""Parses SERP pages of the Bing search engine."""
search_engine = 'bing'
search_types = ['normal', 'image']
no_results_selector = ['#b_results > .b_ans::text']
num_results_search_selectors = ['.sb_count']
effective_query_selector = ['#sp_requery a > strong', '#sp_requery + #sp_recourse a::attr(href)']
page_number_selectors = ['.sb_pagS::text']
normal_search_selectors = {
'results': {
'us_ip': {
'container': '#b_results',
'result_container': '.b_algo',
'link': 'h2 > a::attr(href)',
'snippet': '.b_caption > p::text',
'title': 'h2::text',
'visible_link': 'cite::text'
},
'de_ip': {
'container': '#b_results',
'result_container': '.b_algo',
'link': 'h2 > a::attr(href)',
'snippet': '.b_caption > p::text',
'title': 'h2::text',
'visible_link': 'cite::text'
},
'de_ip_news_items': {
'container': 'ul.b_vList li',
'link': ' h5 a::attr(href)',
'snippet': 'p::text',
'title': ' h5 a::text',
'visible_link': 'cite::text'
},
},
'ads_main': {
'us_ip': {
'container': '#b_results .b_ad',
'result_container': '.sb_add',
'link': 'h2 > a::attr(href)',
'snippet': '.sb_addesc::text',
'title': 'h2 > a::text',
'visible_link': 'cite::text'
},
'de_ip': {
'container': '#b_results .b_ad',
'result_container': '.sb_add',
'link': 'h2 > a::attr(href)',
'snippet': '.b_caption > p::text',
'title': 'h2 > a::text',
'visible_link': 'cite::text'
}
}
}
image_search_selectors = {
'results': {
'ch_ip': {
'container': '#dg_c .imgres',
'result_container': '.dg_u',
'link': 'a.dv_i::attr(m)'
},
}
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def after_parsing(self):
"""Clean the urls.
The image url data is in the m attribute.
m={ns:"images.1_4",k:"5018",mid:"46CE8A1D71B04B408784F0219B488A5AE91F972E",
surl:"http://berlin-germany.ca/",imgurl:"http://berlin-germany.ca/images/berlin250.jpg",
oh:"184",tft:"45",oi:"http://berlin-germany.ca/images/berlin250.jpg"}
"""
super().after_parsing()
if self.searchtype == 'normal':
self.no_results = False
if self.no_results_text:
self.no_results = self.query in self.no_results_text \
or 'Do you want results only for' in self.no_results_text
if self.searchtype == 'image':
for key, i in self.iter_serp_items():
for regex in (
r'imgurl:"(?P<url>.*?)"',
):
result = re.search(regex, self.search_results[key][i]['link'])
if result:
self.search_results[key][i]['link'] = result.group('url')
break
class YahooParser(Parser):
"""Parses SERP pages of the Yahoo search engine."""
search_engine = 'yahoo'
search_types = ['normal', 'image']
no_results_selector = []
effective_query_selector = ['.msg #cquery a::attr(href)']
num_results_search_selectors = ['#pg > span:last-child', '.compPagination span::text']
page_number_selectors = ['#pg > strong::text']
normal_search_selectors = {
'results': {
'de_ip': {
'container': '#main',
'result_container': '.res',
'link': 'div > h3 > a::attr(href)',
'snippet': 'div.abstr::text',
'title': 'div > h3 > a::text',
'visible_link': 'span.url::text'
},
'de_ip_december_2015': {
'container': '#main',
'result_container': '.searchCenterMiddle li',
'link': 'h3.title a::attr(href)',
'snippet': '.compText p::text',
'title': 'h3.title a::text',
'visible_link': 'span::text'
},
},
}
image_search_selectors = {
'results': {
'ch_ip': {
'container': '#results',
'result_container': '#sres > li',
'link': 'a::attr(href)'
},
}
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def after_parsing(self):
"""Clean the urls.
The url is in the href attribute and the &imgurl= parameter.
<a id="yui_3_5_1_1_1419284335995_1635" aria-label="<b>Matterhorn</b> sunrise"
href="/images/view;_ylt=AwrB8phvj5hU7moAFzOJzbkF;_ylu=\
X3oDMTIyc3ZrZ3RwBHNlYwNzcgRzbGsDaW1nBG9pZANmNTgyY2MyYTY4ZmVjYTI5YmYwNWZlM2E3ZTc1YzkyMARncG9zAzEEaXQDYmluZw--?
.origin=&back=https%3A%2F%2Fimages.search.yahoo.com%2Fsearch%2Fimages%3F\
p%3Dmatterhorn%26fr%3Dyfp-t-901%26fr2%3Dpiv-web%26tab%3Dorganic%26ri%3D1&w=4592&h=3056&
imgurl=www.summitpost.org%2Fimages%2Foriginal%2F699696.JPG&rurl=http%3A%2F%2Fwww.summitpost.org\
%2Fmatterhorn-sunrise%2F699696&size=5088.0KB&
name=%3Cb%3EMatterhorn%3C%2Fb%3E+sunrise&p=matterhorn&oid=f582cc2a68feca29bf05fe3a7e75c920&fr2=piv-web&
fr=yfp-t-901&tt=%3Cb%3EMatterhorn%3C%2Fb%3E+sunrise&b=0&ni=21&no=1&ts=&tab=organic&
sigr=11j056ue0&sigb=134sbn4gc&sigi=11df3qlvm&sigt=10pd8j49h&sign=10pd8j49h&.crumb=qAIpMoHvtm1&\
fr=yfp-t-901&fr2=piv-web">
"""
super().after_parsing()
if self.searchtype == 'normal':
self.no_results = False
if self.num_results == 0:
self.no_results = True
if len(self.dom.xpath(self.css_to_xpath('#cquery'))) >= 1:
self.no_results = True
for key, i in self.iter_serp_items():
if self.search_results[key][i]['visible_link'] is None:
del self.search_results[key][i]
if self.searchtype == 'image':
for key, i in self.iter_serp_items():
for regex in (
r'&imgurl=(?P<url>.*?)&',
):
result = re.search(regex, self.search_results[key][i]['link'])
if result:
# TODO: Fix this manual protocol adding by parsing "rurl"
self.search_results[key][i]['link'] = 'http://' + unquote(result.group('url'))
break
class BaiduParser(Parser):
"""Parses SERP pages of the Baidu search engine."""
search_engine = 'baidu'
search_types = ['normal', 'image']
num_results_search_selectors = ['#container .nums']
no_results_selector = []
# no such thing for baidu
effective_query_selector = ['']
page_number_selectors = ['.fk_cur + .pc::text']
normal_search_selectors = {
'results': {
'de_ip': {
'container': '#content_left',
'result_container': '.result-op',
'link': 'h3 > a.t::attr(href)',
'snippet': '.c-abstract::text',
'title': 'h3 > a.t::text',
'visible_link': 'span.c-showurl::text'
},
'nojs': {
'container': '#content_left',
'result_container': '.result',
'link': 'h3 > a::attr(href)',
'snippet': '.c-abstract::text',
'title': 'h3 > a::text',
'visible_link': 'span.g::text'
}
},
}
image_search_selectors = {
'results': {
'ch_ip': {
'container': '#imgContainer',
'result_container': '.pageCon > li',
'link': '.imgShow a::attr(href)'
},
}
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def after_parsing(self):
"""Clean the urls.
href="/i?ct=503316480&z=&tn=baiduimagedetail&ipn=d&word=matterhorn&step_word=&ie=utf-8&in=9250&
cl=2&lm=-1&st=&cs=3326243323,1574167845&os=1495729451,4260959385&pn=0&rn=1&di=69455168860&ln=1285&
fr=&&fmq=1419285032955_R&ic=&s=&se=&sme=0&tab=&width=&height=&face=&is=&istype=&ist=&jit=&
objurl=http%3A%2F%2Fa669.phobos.apple.com%2Fus%2Fr1000%2F077%2FPurple%2F\
v4%2F2a%2Fc6%2F15%2F2ac6156c-e23e-62fd-86ee-7a25c29a6c72%2Fmzl.otpvmwuj.1024x1024-65.jpg&adpicid=0"
"""
super().after_parsing()
if self.search_engine == 'normal':
if len(self.dom.xpath(self.css_to_xpath('.hit_top_new'))) >= 1:
self.no_results = True
if self.searchtype == 'image':
for key, i in self.iter_serp_items():
for regex in (
r'&objurl=(?P<url>.*?)&',
):
result = re.search(regex, self.search_results[key][i]['link'])
if result:
self.search_results[key][i]['link'] = unquote(result.group('url'))
break
class DuckduckgoParser(Parser):
"""Parses SERP pages of the Duckduckgo search engine."""
search_engine = 'duckduckgo'
search_types = ['normal']
num_results_search_selectors = []
no_results_selector = []
effective_query_selector = ['']
# duckduckgo is loads next pages with ajax
page_number_selectors = ['']
normal_search_selectors = {
'results': {
'de_ip': {
'container': '#links',
'result_container': '.result',
'link': '.result__title > a::attr(href)',
'snippet': 'result__snippet::text',
'title': '.result__title > a::text',
'visible_link': '.result__url__domain::text'
},
'non_javascript_mode': {
'container': '#content',
'result_container': '.results_links',
'link': '.links_main > a::attr(href)',
'snippet': '.snippet::text',
'title': '.links_main > a::text',
'visible_link': '.url::text'
},
},
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def after_parsing(self):
super().after_parsing()
if self.searchtype == 'normal':
try:
if 'No more results.' in self.dom.xpath(self.css_to_xpath('.no-results'))[0].text_content():
self.no_results = True
except:
pass
if self.num_results > 0:
self.no_results = False
elif self.num_results <= 0:
self.no_results = True
class AskParser(Parser):
"""Parses SERP pages of the Ask search engine."""
search_engine = 'ask'
search_types = ['normal']
num_results_search_selectors = []
no_results_selector = []
effective_query_selector = ['#spell-check-result > a']
page_number_selectors = ['.pgcsel .pg::text']
normal_search_selectors = {
'results': {
'de_ip': {
'container': '#midblock',
'result_container': '.ptbs.ur',
'link': '.abstract > a::attr(href)',
'snippet': '.abstract::text',
'title': '.txt_lg.b::text',
'visible_link': '.durl span::text'
},
'de_ip_december_2015': {
'container': '.l-mid-content',
'result_container': '.web-result',
'link': '.web-result-title > a::attr(href)',
'snippet': '.web-result-description::text',
'title': '.web-result-title > a::text',
'visible_link': '.web-result-url::text'
},
# as requested by httm mode
'de_ip_december_2015_raw_http': {
'container': '#midblock',
'result_container': '#teoma-results .wresult',
'link': 'a.title::attr(href)',
'snippet': '.abstract::text',
'title': 'a.title::text',
'visible_link': '.durl span::text'
}
},
}
class BlekkoParser(Parser):
"""Parses SERP pages of the Blekko search engine."""
search_engine = 'blekko'
search_types = ['normal']
effective_query_selector = ['']
no_results_selector = []
num_results_search_selectors = []
normal_search_selectors = {
'results': {
'de_ip': {
'container': '#links',
'result_container': '.result',
'link': '.result__title > a::attr(href)',
'snippet': 'result__snippet::text',
'title': '.result__title > a::text',
'visible_link': '.result__url__domain::text'
}
},
}
def get_parser_by_url(url):
"""Get the appropriate parser by an search engine url.
Args:
url: The url that was used to issue the search
Returns:
The correct parser that can parse results for this url.
Raises:
UnknowUrlException if no parser could be found for the url.
"""
parser = None
if re.search(r'^http[s]?://www\.google', url):
parser = GoogleParser
elif re.search(r'^http://yandex\.ru', url):
parser = YandexParser
elif re.search(r'^http://www\.bing\.', url):
parser = BingParser
elif re.search(r'^http[s]?://search\.yahoo.', url):
parser = YahooParser
elif re.search(r'^http://www\.baidu\.com', url):
parser = BaiduParser
elif re.search(r'^https://duckduckgo\.com', url):
parser = DuckduckgoParser
if re.search(r'^http[s]?://[a-z]{2}?\.ask', url):
parser = AskParser
if re.search(r'^http[s]?://blekko', url):
parser = BlekkoParser
if not parser:
raise UnknowUrlException('No parser for {}.'.format(url))
return parser
def get_parser_by_search_engine(search_engine):
"""Get the appropriate parser for the search_engine
Args:
search_engine: The name of a search_engine.
Returns:
A parser for the search_engine
Raises:
NoParserForSearchEngineException if no parser could be found for the name.
"""
if search_engine == 'google' or search_engine == 'googleimg':
return GoogleParser
elif search_engine == 'yandex':
return YandexParser
elif search_engine == 'bing':
return BingParser
elif search_engine == 'yahoo':
return YahooParser
elif search_engine == 'baidu' or search_engine == 'baiduimg':
return BaiduParser
elif search_engine == 'duckduckgo':
return DuckduckgoParser
elif search_engine == 'ask':
return AskParser
elif search_engine == 'blekko':
return BlekkoParser
else:
raise NoParserForSearchEngineException('No such parser for "{}"'.format(search_engine))
def parse_serp(html='', query='', search_engine='google'):
"""Store the parsed data in the sqlalchemy session.
If no parser is supplied then we are expected to parse again with
the provided html.
This function may be called from scraping and caching.
When called from caching, some info is lost (like current page number).
Args:
TODO: A whole lot
Returns:
The parsed SERP object.
"""
parser = get_parser_by_search_engine(search_engine)
parser = parser(html=html, query=query)
parser.parse(html)
return parser.search_results
if __name__ == '__main__':
"""Originally part of https://github.com/NikolaiT/GoogleScraper.
Only for testing purposes: May be called directly with an search engine
search url. For example:
python3 parsing.py 'http://yandex.ru/yandsearch?text=GoogleScraper&lr=178&csg=82%2C4317%2C20%2C20%2C0%2C0%2C0'
Please note: Using this module directly makes little sense, because requesting such urls
directly without imitating a real browser (which is done in my GoogleScraper module) makes
the search engines return crippled html, which makes it impossible to parse.
But for some engines it nevertheless works (for example: yandex, google, ...).
"""
import requests
assert len(sys.argv) >= 2, 'Usage: {} url/file'.format(sys.argv[0])
url = sys.argv[1]
if os.path.exists(url):
raw_html = open(url, 'r').read()
parser = get_parser_by_search_engine(sys.argv[2])
else:
raw_html = requests.get(url).text
parser = get_parser_by_url(url)
parser = parser(html=raw_html)
parser.parse()
print(parser)
with open('/tmp/testhtml.html', 'w') as of:
of.write(raw_html)
| 38,678 | 34.355576 | 151 | py |
GoogleScraper | GoogleScraper-master/GoogleScraper/log.py | # -*- coding: utf-8 -*-
import sys
import logging
"""
Loggers are created at the top of modules. Therefore each code may access
a logger. But there is a fundamental problem to this approach:
The configuration that determines the state of GoogleScraper may come from various
sources and is parsed only at runtime in the config.py module. In this config, the
loglevel is also specified.
So we need to adjust the loglevel to the value set in
the configuration for each submodule.
"""
def setup_logger(level=logging.INFO,
format='[%(threadName)s] - %(asctime)s - %(name)s - %(levelname)s - %(message)s',
logfile='googlescraper.log'):
"""
Configure global log settings for GoogleScraper
"""
logger = logging.getLogger()
logger.setLevel(level)
# See here: http://stackoverflow.com/questions/7173033/duplicate-log-output-when-using-python-logging-module
if not len(logger.handlers):
formatter = logging.Formatter(format)
sh = logging.StreamHandler(stream=sys.stderr)
sh.setFormatter(formatter)
logger.addHandler(sh)
fh = logging.FileHandler(logfile)
fh.setFormatter(formatter)
logger.addHandler(fh) | 1,202 | 30.657895 | 112 | py |
GoogleScraper | GoogleScraper-master/GoogleScraper/config.py | # -*- coding: utf-8 -*-
import GoogleScraper.scrape_config
import inspect
import os
try:
# SourceFileLoader is the recommended way in 3.3+
from importlib.machinery import SourceFileLoader
def load_source(name, path):
return SourceFileLoader(name, path).load_module()
except ImportError:
# but it does not exist in 3.2-, so fall back to imp
import imp
load_source = imp.load_source
def get_config(command_line_args=None, external_configuration_file=None, config_from_library_call=None):
"""
Parse the configuration from different sources:
- Internal config file
- External config file (As specified by the end user)
- Command Line args
- Config that is passed to GoogleScraper if it is used as library.
The order in which configuration is overwritten:
Config from library call > Command Line args > External config file > internal config file
So for example, a command line args overwrites an option in a user specified
config file. But the user specified config file is still more valued than the
same named option in the internal config file. But if GoogleScraper is called
as library, the config passed there will overwrite everything else (Even if this config
has specified an external config file...).
External configuration files may be only specified in the command line args.
"""
config = GoogleScraper.scrape_config
def update_members(d):
for k, v in d.items():
setattr(config, k, v)
if external_configuration_file:
if os.path.exists(external_configuration_file) and external_configuration_file.endswith('.py'):
exernal_config = load_source('external_config', external_configuration_file)
members = inspect.getmembers(exernal_config)
if isinstance(members, list):
members = dict(members)
update_members(members)
if command_line_args:
update_members(command_line_args)
if config_from_library_call:
update_members(config_from_library_call)
config = {k: v for k, v in vars(config).items() if not k.startswith('_')}
return config
| 2,194 | 33.84127 | 104 | py |
GoogleScraper | GoogleScraper-master/GoogleScraper/version.py | __version__ = '0.2.5'
| 22 | 10.5 | 21 | py |
GoogleScraper | GoogleScraper-master/GoogleScraper/http_mode.py | # -*- coding: utf-8 -*-
import threading
import json
import datetime
import socket
from urllib.parse import urlencode
import GoogleScraper.socks as socks
from GoogleScraper.scraping import SearchEngineScrape, get_base_search_url_by_search_engine
from GoogleScraper.parsing import get_parser_by_search_engine
from GoogleScraper.user_agents import random_user_agent
import logging
logger = logging.getLogger(__name__)
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate, sdch',
'Connection': 'keep-alive',
}
def get_GET_params_for_search_engine(query, search_engine, page_number=1, num_results_per_page=10,
search_type='normal'):
"""Returns the params of the url for the search engine and the search mode.
Args:
search_engine: The search engine. Example: 'google'
search_mode: The search mode. Example: 'image' or 'normal'
query: The search query
page_number: Which SERP page.
num_results_per_page: How many entries per page.
Returns:
The params for the GET url.
"""
search_params = {}
if search_engine == 'google':
# always use the english interface, such that we can detect
# state by some hard coded needles.
search_params['hl'] = 'en'
search_params['q'] = query
# only set when other num results than 10.
if num_results_per_page != 10:
search_params['num'] = str(num_results_per_page)
if page_number > 1:
search_params['start'] = str((page_number - 1) * int(num_results_per_page))
if search_type == 'image':
search_params.update({
'oq': query,
'site': 'imghp',
'tbm': 'isch',
'source': 'hp',
# 'sa': 'X',
'biw': 1920,
'bih': 979,
})
elif search_type == 'video':
search_params.update({
'tbm': 'vid',
'source': 'lnms',
'sa': 'X',
'biw': 1920,
'bih': 881
})
elif search_type == 'news':
search_params.update({
'tbm': 'nws',
'source': 'lnms',
'sa': 'X'
})
elif search_engine == 'yandex':
search_params['text'] = query
if page_number > 1:
search_params['p'] = str(page_number - 1)
# @todo: what was this for?
# if search_type == 'image':
# base_search_url = 'http://yandex.ru/images/search?'
elif search_engine == 'bing':
search_params['q'] = query
# bing doesn't support variable number of results (As far as I know).
if page_number > 1:
search_params['first'] = str(1 + ((page_number - 1) * 10))
elif search_engine == 'yahoo':
search_params['p'] = query
if page_number > 1:
search_params['b'] = str(1 + ((page_number - 1) * 10))
search_params['ei'] = 'UTF-8'
elif search_engine == 'baidu':
search_params['wd'] = query
if page_number > 1:
search_params['pn'] = str((page_number - 1) * 10)
search_params['ie'] = 'utf-8'
elif search_engine == 'duckduckgo':
search_params['q'] = query
elif search_engine == 'ask':
search_params['q'] = query
search_params['qsrc'] = '0'
search_params['l'] = 'dir'
search_params['qo'] = 'homepageSearchBox'
if page_number > 1:
search_params['page'] = str(page_number)
elif search_engine == 'blekko':
search_params['q'] = query
return search_params
class HttpScrape(SearchEngineScrape, threading.Timer):
"""Offers a fast way to query any search engine using raw HTTP requests.
Overrides the run() method of the superclass threading.Timer.
Each thread represents a crawl for one Search Engine SERP page. Inheriting
from threading.Timer allows the deriving class to delay execution of the run()
method.
This is a base class, Any supported search engine needs to subclass HttpScrape to
implement this specific scrape type.
Attributes:
results: Returns the found results.
"""
def __init__(self, config, *args, time_offset=0.0, **kwargs):
"""Initialize an HttScrape object to scrape over blocking http.
HttpScrape inherits from SearchEngineScrape
and from threading.Timer.
"""
threading.Timer.__init__(self, time_offset, self.search)
SearchEngineScrape.__init__(self, config, *args, **kwargs)
# Bind the requests module to this instance such that each
# instance may have an own proxy
self.requests = __import__('requests')
# initialize the GET parameters for the search request
self.search_params = {}
# initialize the HTTP headers of the search request
# to some base values that mozilla uses with requests.
# the Host and User-Agent field need to be set additionally.
self.headers = headers
# the mode
self.scrape_method = 'http'
# get the base search url based on the search engine.
self.base_search_url = get_base_search_url_by_search_engine(self.config, self.search_engine_name, self.scrape_method)
super().instance_creation_info(self.__class__.__name__)
if self.search_engine_name == 'blekko':
logger.critical('blekko does not support http mode.')
self.startable = False
def set_proxy(self):
"""Setup a socks connection for the socks module bound to this instance.
Args:
proxy: Namedtuple, Proxy to use for this thread.
"""
def create_connection(address, timeout=None, source_address=None):
sock = socks.socksocket()
sock.connect(address)
return sock
pmapping = {
'socks4': 1,
'socks5': 2,
'http': 3
}
# Patch the socket module
# rdns is by default on true. Never use rnds=False with TOR, otherwise you are screwed!
socks.setdefaultproxy(pmapping.get(self.proxy.proto), self.proxy.host, int(self.proxy.port), rdns=True)
socks.wrap_module(socket)
socket.create_connection = create_connection
def switch_proxy(self, proxy):
super().switch_proxy()
def proxy_check(self, proxy):
assert self.proxy and self.requests, 'ScraperWorker needs valid proxy instance and requests library to make ' \
'the proxy check.'
online = False
status = 'Proxy check failed: {host}:{port} is not used while requesting'.format(**self.proxy.__dict__)
ipinfo = {}
try:
text = self.requests.get(self.config.get('proxy_info_url')).text
try:
ipinfo = json.loads(text)
except ValueError:
pass
except self.requests.ConnectionError as e:
status = 'No connection to proxy server possible, aborting: {}'.format(e)
except self.requests.Timeout as e:
status = 'Timeout while connecting to proxy server: {}'.format(e)
except self.requests.exceptions.RequestException as e:
status = 'Unknown exception: {}'.format(e)
if 'ip' in ipinfo and ipinfo['ip']:
online = True
status = 'Proxy is working.'
else:
logger.warning(status)
super().update_proxy_status(status, ipinfo, online)
return online
def handle_request_denied(self, status_code=''):
"""Handle request denied by the search engine.
This is the perfect place to distinguish the different responses
if search engine detect exhaustive searching.
Args:
status_code: The status code of the HTTP response.
Returns:
"""
super().handle_request_denied(status_code)
def build_search(self):
"""Build the headers and params for the search request for the search engine."""
self.search_params = get_GET_params_for_search_engine(self.query, self.search_engine_name,
self.page_number, self.num_results_per_page,
self.search_type)
self.parser = get_parser_by_search_engine(self.search_engine_name)
self.parser = self.parser(config=self.config)
def search(self, rand=True, timeout=15):
"""The actual search for the search engine.
When raising StopScrapingException, the scraper will stop.
When return False, the scraper tries to continue with next keyword.
"""
success = True
self.build_search()
if rand:
self.headers['User-Agent'] = random_user_agent(only_desktop=True)
try:
super().detection_prevention_sleep()
super().keyword_info()
request = self.requests.get(self.base_search_url + urlencode(self.search_params),
headers=self.headers, timeout=timeout)
self.requested_at = datetime.datetime.utcnow()
self.html = request.text
logger.debug('[HTTP - {url}, headers={headers}, params={params}'.format(
url=request.url,
headers=self.headers,
params=self.search_params))
except self.requests.ConnectionError as ce:
self.status = 'Network problem occurred {}'.format(ce)
success = False
except self.requests.Timeout as te:
self.status = 'Connection timeout {}'.format(te)
success = False
except self.requests.exceptions.RequestException as e:
# In case of any http networking exception that wasn't caught
# in the actual request, just end the worker.
self.status = 'Stopping scraping because {}'.format(e)
else:
if not request.ok:
self.handle_request_denied(request.status_code)
success = False
super().after_search()
return success
def run(self):
super().before_search()
if self.startable:
for self.query, self.pages_per_keyword in self.jobs.items():
for self.page_number in self.pages_per_keyword:
if not self.search(rand=True):
self.missed_keywords.add(self.query)
| 10,723 | 33.931596 | 125 | py |
GoogleScraper | GoogleScraper-master/GoogleScraper/scrape_jobs.py | # -*- coding: utf-8 -*-
import logging
logger = logging.getLogger(__name__)
"""
The core logic of GoogleScraper is handled here.
By default, every keyword is scraped on all given search engines for the supplied
number of pages.
Example:
keywords = ('one', 'two')
search_eninges = ('google, 'yandex')
num_pages = 5
Then the following requests are issued:
[('one', 'google', 0),
('one', 'google', 1),
('one', 'google', 2),
('one', 'google', 3),
('one', 'google', 4),
('one', 'yandex', 0),
('one', 'yandex', 1),
('one', 'yandex', 2),
('one', 'yandex', 3),
('one', 'yandex', 4),
('two', 'google', 0),
('two', 'google', 1),
('two', 'google', 2),
('two', 'google', 3),
('two', 'google', 4),
('two', 'yandex', 0),
('two', 'yandex', 1),
('two', 'yandex', 2),
('two', 'yandex', 3),
('two', 'yandex', 4)]
But sometimes you want to fine tune this generic behaviour. Some keywords should be scraped on
only some search engines. Some keywords should be only used with specific proxies. Maybe
a specific keyword should be searched Y times, whereas another needs to be scraped X times.
Therefore we need am special format, where you can specify the single settings for each
keyword.
The best format for such a keyword file is just a python module with a dictionary with one
mandatory key: The 'query'. The dictionary must be called 'scrape_jobs'.
You can see such a example file in the examples/ directory.
"""
def default_scrape_jobs_for_keywords(keywords, search_engines, scrape_method, num_pages):
"""Get scrape jobs by keywords.
If you just submit a keyword file, then it is assumed that every keyword
should be scraped on
- all supplied search engines
- for num_pages
- in the specified search mode.
Args:
keywords: A list of keywords to scrape.
Returns:
A dict of scrapejobs.
"""
for keyword in keywords:
for search_engine in search_engines:
for page in range(1, num_pages + 1):
yield {
'query': keyword,
'search_engine': search_engine,
'scrape_method': scrape_method,
'page_number': page
} | 2,205 | 26.924051 | 94 | py |
GoogleScraper | GoogleScraper-master/GoogleScraper/commandline.py | # -*- coding: utf-8 -*-
import argparse
from GoogleScraper.version import __version__
def get_command_line(only_print_help=False):
"""
Parse command line arguments when GoogleScraper is used as a CLI application.
Returns:
The configuration as a dictionary that determines the behaviour of the app.
"""
parser = argparse.ArgumentParser(prog='GoogleScraper',
description='Scrapes the Google, Yandex, Bing and many other search engines by '
'forging http requests that imitate browser searches or by using real '
'browsers controlled by the selenium framework. '
'Multithreading support.',
epilog='GoogleScraper {version}. This program might infringe the TOS of the '
'search engines. Please use it on your own risk. (c) by Nikolai Tschacher'
', 2012-2019. https://scrapeulous.com/'.format(version=__version__))
parser.add_argument('-m', '--scrape-method', type=str, default='http',
help='The scraping type. There are currently three types: "http", "selenium" and "http-async". '
'"Http" scrapes with raw http requests, whereas "selenium" uses the selenium framework to '
'remotely control browsers. "http-async" makes use of gevent and is well suited for '
'extremely fast and explosive scraping jobs. You may search more than 1000 requests per '
'second if you have the necessary number of proxies available. ',
choices=('http', 'selenium', 'http-async'))
parser.add_argument('--sel-browser', choices=['firefox', 'chrome'], default='chrome',
help='The browser frontend for selenium scraping mode. Takes only effect if --scrape-method is set to "selenium"')
parser.add_argument('--browser-mode', choices=['normal', 'headless'], default='normal',
help='In which mode the browser is started. Valid values = (normal, headless)')
keyword_group = parser.add_mutually_exclusive_group()
keyword_group.add_argument('-q', '--keyword', type=str, action='store', dest='keyword',
help='The search keyword to scrape for. If you need to scrape multiple keywords, use '
'the --keyword-file flag')
keyword_group.add_argument('--keyword-file', type=str, action='store', default='',
help='Keywords to search for. One keyword per line. Empty lines are ignored. '
'Alternatively, you may specify the path to an python module (must end with the '
'.py suffix) where the keywords must be held in a dictionary with the name "scrape_'
'jobs".')
parser.add_argument('-o-', '--output-filename', type=str, action='store', default='',
help='The name of the output file. If the file ending is "json", write a json file, if the '
'ending is "csv", write a csv file.')
parser.add_argument('--shell', action='store_true', default=False,
help='Fire up a shell with a loaded sqlalchemy session.')
parser.add_argument('-n', '--num-results-per-page', type=int,
action='store', default=10,
help='The number of results per page. Must be smaller than 100, by default 50 for raw mode and '
'10 for selenium mode. Some search engines ignore this setting.')
parser.add_argument('-p', '--num-pages-for-keyword', type=int, action='store',
default=1,
help='The number of pages to request for each keyword. Each page is requested by a unique '
'connection and if possible by a unique IP (at least in "http" mode).')
parser.add_argument('-z', '--num-workers', type=int, default=1,
action='store',
help='This arguments sets the number of browser instances for selenium mode or the number of '
'worker threads in http mode.')
parser.add_argument('-t', '--search-type', type=str, action='store', default='normal',
help='The searchtype to launch. May be normal web search, image search, news search or video '
'search.')
parser.add_argument('--proxy-file', type=str, dest='proxy_file', action='store',
required=False, help='A filename for a list of proxies (supported are HTTP PROXIES, SOCKS4/5) '
'with the following format: "Proxyprotocol (proxy_ip|proxy_host):Port\n"'
'Example file: socks4 127.0.0.1:99\nsocks5 33.23.193.22:1080\n')
parser.add_argument('--config-file', type=str, dest='config_file', action='store',
help='The path to the configuration file for GoogleScraper. Normally you won\'t need this, '
'because GoogleScrape comes shipped with a thoroughly commented configuration file named '
'"scrape_config.py"')
parser.add_argument('--check-detection', type=str, dest='check_detection', action='store',
help='Check if the given search engine blocked you from scrapign. Often detection can be determined'
'if you have to solve a captcha.')
parser.add_argument('--simulate', action='store_true', default=False, required=False,
help='''If this flag is set, the scrape job and its estimated length will be printed.''')
loglevel_help = '''
Set the debug level of the application. Use the string representation
instead of the numbers. High numbers will output less, low numbers more.
CRITICAL = 50,
FATAL = CRITICAL,
ERROR = 40,
WARNING = 30,
WARN = WARNING,
INFO = 20,
DEBUG = 10,
NOTSET = 0
'''
parser.add_argument('-v', '--verbosity', '--loglevel',
dest='log_level', default='INFO', type = str.lower,
choices=['debug', 'info', 'warning', 'warn', 'error', 'critical', 'fatal'], help=loglevel_help)
parser.add_argument('--print-results', choices=['all', 'summarize'], default='all',
help='Whether to print all results ("all"), or only print a summary ("summarize")')
parser.add_argument('--view-config', action='store_true', default=False,
help="Print the current configuration to stdout. You may use it to create and tweak your own "
"config file from it.")
parser.add_argument('-V', '--v', '--version', action='store_true', default=False, dest='version',
help='Prints the version of GoogleScraper')
parser.add_argument('--clean', action='store_true', default=False,
help='Cleans all stored data. Please be very careful when you use this flag.')
parser.add_argument('--mysql-proxy-db', action='store',
help="A mysql connection string for proxies to use. Format: mysql://<username>:<password>@"
"<host>/<dbname>. Has precedence over proxy files.")
parser.add_argument('-s', '--search-engines', action='store', default=['google'],
help='What search engines to use (See GoogleScraper --config for the all supported). If you '
'want to use more than one at the same time, just separate with commatas: "google, bing, '
'yandex". If you want to use all search engines that are available, give \'*\' as '
'argument.')
if only_print_help:
parser.print_help()
else:
args = parser.parse_args()
return vars(args)
| 8,247 | 57.914286 | 138 | py |
GoogleScraper | GoogleScraper-master/GoogleScraper/database.py | # -*- coding: utf-8 -*-
"""
The database schema of GoogleScraper.
There are four entities:
ScraperSearch: Represents a call to GoogleScraper. A search job.
SearchEngineResultsPage: Represents a SERP result page of a search_engine
Link: Represents a LINK on a SERP
Proxy: Stores all proxies and their statuses.
Because searches repeat themselves and we avoid doing them again (caching), one SERP page
can be assigned to more than one ScraperSearch. Therefore we need a n:m relationship.
"""
import datetime
from urllib.parse import urlparse
from sqlalchemy import Column, String, Integer, ForeignKey, Table, DateTime, Enum, Boolean
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, backref
from sqlalchemy import create_engine, UniqueConstraint
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
scraper_searches_serps = Table('scraper_searches_serps', Base.metadata,
Column('scraper_search_id', Integer, ForeignKey('scraper_search.id')),
Column('serp_id', Integer, ForeignKey('serp.id')))
class ScraperSearch(Base):
__tablename__ = 'scraper_search'
id = Column(Integer, primary_key=True)
keyword_file = Column(String)
number_search_engines_used = Column(Integer)
used_search_engines = Column(String)
number_proxies_used = Column(Integer)
number_search_queries = Column(Integer)
started_searching = Column(DateTime, default=datetime.datetime.utcnow)
stopped_searching = Column(DateTime)
serps = relationship(
'SearchEngineResultsPage',
secondary=scraper_searches_serps,
backref=backref('scraper_searches', uselist=True)
)
def __str__(self):
return '<ScraperSearch[{id}] scraped for {number_search_queries} unique keywords. Started scraping: {started_' \
'searching} and stopped: {stopped_searching}>'.format(**self.__dict__)
def __repr__(self):
return self.__str__()
class SearchEngineResultsPage(Base):
__tablename__ = 'serp'
id = Column(Integer, primary_key=True)
status = Column(String, default='successful')
search_engine_name = Column(String)
scrape_method = Column(String)
page_number = Column(Integer)
requested_at = Column(DateTime, default=datetime.datetime.utcnow)
requested_by = Column(String, default='127.0.0.1')
# The string in the SERP that indicates how many results we got for the search term.
num_results_for_query = Column(String, default='')
# Whether we got any results at all. This is the same as len(serp.links)
num_results = Column(Integer, default=-1)
query = Column(String)
# if the query was modified by the search engine because there weren't any
# results, this variable is set to the query that was used instead.
# Otherwise it remains empty.
effective_query = Column(String, default='')
# Whether the search engine has no results.
# This is not the same as num_results, because some search engines
# automatically search other similar search queries when they find no results.
# Sometimes they have results for the query, but detect a spelling mistake and only
# suggest an alternative. This is another case!
# If no_results is true, then there weren't ANY RESULTS FOUND FOR THIS QUERY!!! But there
# could have been results for an auto corrected query.
no_results = Column(Boolean, default=False)
def __str__(self):
return '<SERP[{search_engine_name}] has [{num_results}] link results for query "{query}">'.format(
**self.__dict__)
def __repr__(self):
return self.__str__()
def has_no_results_for_query(self):
"""
Returns True if the original query did not yield any results.
Returns False if either there are no serp entries, or the search engine auto corrected the query.
"""
return self.num_results == 0 or self.effective_query
def set_values_from_parser(self, parser):
"""Populate itself from a parser object.
Args:
A parser object.
"""
self.num_results_for_query = parser.num_results_for_query
self.num_results = parser.num_results
self.effective_query = parser.effective_query
self.no_results = parser.no_results
for key, value in parser.search_results.items():
if isinstance(value, list):
for link in value:
parsed = urlparse(link['link'])
# fill with nones to prevent key errors
[link.update({key: None}) for key in ('snippet', 'title', 'visible_link', 'rating', 'num_reviews') if key not in link]
Link(
link=link['link'],
snippet=link['snippet'],
title=link['title'],
visible_link=link['visible_link'],
rating=link['rating'],
num_reviews=link['num_reviews'],
domain=parsed.netloc,
rank=link['rank'],
serp=self,
link_type=key
)
def set_values_from_scraper(self, scraper):
"""Populate itself from a scraper object.
A scraper may be any object of type:
- SelScrape
- HttpScrape
- AsyncHttpScrape
Args:
A scraper object.
"""
self.query = scraper.query
self.search_engine_name = scraper.search_engine_name
self.scrape_method = scraper.scrape_method
self.page_number = scraper.page_number
self.requested_at = scraper.requested_at
self.requested_by = scraper.requested_by
self.status = scraper.status
def was_correctly_requested(self):
return self.status == 'successful'
# Alias as a shorthand for working in the shell
SERP = SearchEngineResultsPage
class Link(Base):
__tablename__ = 'link'
id = Column(Integer, primary_key=True)
title = Column(String)
snippet = Column(String)
link = Column(String)
domain = Column(String)
visible_link = Column(String)
rating = Column(String)
num_reviews = Column(String)
rank = Column(Integer)
link_type = Column(String)
serp_id = Column(Integer, ForeignKey('serp.id'))
serp = relationship(SearchEngineResultsPage, backref=backref('links', uselist=True))
def __str__(self):
return '<Link at rank {rank} has url: {link}>'.format(**self.__dict__)
def __repr__(self):
return self.__str__()
class Proxy(Base):
__tablename__ = 'proxy'
id = Column(Integer, primary_key=True)
ip = Column(String)
hostname = Column(String)
port = Column(Integer)
proto = Column(Enum('socks5', 'socks4', 'http'))
username = Column(String)
password = Column(String)
online = Column(Boolean)
status = Column(String)
checked_at = Column(DateTime)
created_at = Column(DateTime, default=datetime.datetime.utcnow)
city = Column(String)
region = Column(String)
country = Column(String)
loc = Column(String)
org = Column(String)
postal = Column(String)
UniqueConstraint(ip, port, name='unique_proxy')
def __str__(self):
return '<Proxy {ip}>'.format(**self.__dict__)
def __repr__(self):
return self.__str__()
db_Proxy = Proxy
class SearchEngine(Base):
__tablename__ = 'search_engine'
id = Column(Integer, primary_key=True)
name = Column(String, unique=True)
http_url = Column(String)
selenium_url = Column(String)
image_url = Column(String)
class SearchEngineProxyStatus(Base):
"""Stores last proxy status for the given search engine.
A proxy can either work on a search engine or not.
"""
__tablename__ = 'search_engine_proxy_status'
id = Column(Integer, primary_key=True)
proxy_id = Column(Integer, ForeignKey('proxy.id'))
search_engine_id = Column(Integer, ForeignKey('search_engine.id'))
available = Column(Boolean)
last_check = Column(DateTime)
def get_engine(config, path=None):
"""Return the sqlalchemy engine.
Args:
path: The path/name of the database to create/read from.
Returns:
The sqlalchemy engine.
"""
db_path = path if path else config.get('database_name', 'google_scraper') + '.db'
echo = config.get('log_sqlalchemy', False)
engine = create_engine('sqlite:///' + db_path, echo=echo, connect_args={'check_same_thread': False})
Base.metadata.create_all(engine)
return engine
def get_session(config, scoped=False, engine=None, path=None):
if not engine:
engine = get_engine(config, path=path)
session_factory = sessionmaker(
bind=engine,
autoflush=True,
autocommit=False,
)
if scoped:
ScopedSession = scoped_session(session_factory)
return ScopedSession
else:
return session_factory
def fixtures(config, session):
"""Add some base data."""
for se in config.get('supported_search_engines', []):
if se:
search_engine = session.query(SearchEngine).filter(SearchEngine.name == se).first()
if not search_engine:
session.add(SearchEngine(name=se))
session.commit()
| 9,479 | 31.027027 | 138 | py |
GoogleScraper | GoogleScraper-master/GoogleScraper/parsing.py | # -*- coding: utf-8 -*-
import sys
import os
import re
import lxml.html
from lxml.html.clean import Cleaner
from urllib.parse import unquote
import pprint
from GoogleScraper.database import SearchEngineResultsPage
import logging
from cssselect import HTMLTranslator
logger = logging.getLogger(__name__)
class InvalidSearchTypeException(Exception):
pass
class UnknowUrlException(Exception):
pass
class NoParserForSearchEngineException(Exception):
pass
class Parser():
"""Parses SERP pages.
Each search engine results page (SERP) has a similar layout:
The main search results are usually in a html container element (#main, .results, #leftSide).
There might be separate columns for other search results (like ads for example). Then each
result contains basically a link, a snippet and a description (usually some text on the
target site). It's really astonishing how similar other search engines are to Google.
Each child class (that can actual parse a concrete search engine results page) needs
to specify css selectors for the different search types (Like normal search, news search, video search, ...).
Attributes:
search_results: The results after parsing.
"""
# this selector specified the element that notifies the user whether the search
# had any results.
no_results_selector = []
# if subclasses specify an value for this attribute and the attribute
# targets an element in the serp page, then there weren't any results
# for the original query.
effective_query_selector = []
# the selector that gets the number of results (guessed) as shown by the search engine.
num_results_search_selectors = []
# some search engine show on which page we currently are. If supportd, this selector will get this value.
page_number_selectors = []
# The supported search types. For instance, Google supports Video Search, Image Search, News search
search_types = []
# Each subclass of Parser may declare an arbitrary amount of attributes that
# follow a naming convention like this:
# *_search_selectors
# where the asterix may be replaced with arbitrary identifier names.
# Any of these attributes represent css selectors for a specific search type.
# If you didn't specify the search type in the search_types list, this attribute
# will not be evaluated and no data will be parsed.
def __init__(self, config={}, html='', query=''):
"""Create new Parser instance and parse all information.
Args:
html: The raw html from the search engine search. If not provided, you can parse
the data later by calling parse(html) directly.
searchtype: The search type. By default "normal"
Raises:
Assertion error if the subclassed
specific parser cannot handle the the settings.
"""
self.config = config
self.searchtype = self.config.get('search_type', 'normal')
assert self.searchtype in self.search_types, 'search type "{}" is not supported in {}'.format(
self.searchtype,
self.__class__.__name__
)
self.query = query
self.html = html
self.dom = None
self.search_results = {}
self.num_results_for_query = ''
self.num_results = 0
self.effective_query = ''
self.page_number = -1
self.no_results = False
# to be set by the implementing sub classes
self.search_engine = ''
# short alias because we use it so extensively
self.css_to_xpath = HTMLTranslator().css_to_xpath
if self.html:
self.parse()
def parse(self, html=None):
"""Public function to start parsing the search engine results.
Args:
html: The raw html data to extract the SERP entries from.
"""
if html:
self.html = html
# lets do the actual parsing
self._parse()
# Apply subclass specific behaviour after parsing has happened
# This is needed because different parsers need to clean/modify
# the parsed data uniquely.
self.after_parsing()
def _parse_lxml(self, cleaner=None):
try:
parser = lxml.html.HTMLParser(encoding='utf-8')
if cleaner:
self.dom = cleaner.clean_html(self.dom)
self.dom = lxml.html.document_fromstring(self.html, parser=parser)
self.dom.resolve_base_href()
except Exception as e:
# maybe wrong encoding
logger.error(e)
def _parse(self, cleaner=None):
"""Internal parse the dom according to the provided css selectors.
Raises: InvalidSearchTypeException if no css selectors for the searchtype could be found.
"""
self.num_results = 0
self._parse_lxml(cleaner)
# try to parse the number of results.
attr_name = self.searchtype + '_search_selectors'
selector_dict = getattr(self, attr_name, None)
# get the appropriate css selectors for the num_results for the keyword
num_results_selector = getattr(self, 'num_results_search_selectors', None)
self.num_results_for_query = self.first_match(num_results_selector, self.dom)
if not self.num_results_for_query:
logger.debug('{}: Cannot parse num_results from serp page with selectors {}'.format(self.__class__.__name__,
num_results_selector))
# get the current page we are at. Sometimes we search engines don't show this.
try:
self.page_number = int(self.first_match(self.page_number_selectors, self.dom))
except ValueError:
self.page_number = -1
# let's see if the search query was shitty (no results for that query)
self.effective_query = self.first_match(self.effective_query_selector, self.dom)
if self.effective_query:
logger.debug('{}: There was no search hit for the search query. Search engine used {} instead.'.format(
self.__class__.__name__, self.effective_query))
else:
self.effective_query = ''
# the element that notifies the user about no results.
self.no_results_text = self.first_match(self.no_results_selector, self.dom)
# get the stuff that is of interest in SERP pages.
if not selector_dict and not isinstance(selector_dict, dict):
raise InvalidSearchTypeException('There is no such attribute: {}. No selectors found'.format(attr_name))
for result_type, selector_class in selector_dict.items():
self.search_results[result_type] = []
for selector_specific, selectors in selector_class.items():
if 'result_container' in selectors and selectors['result_container']:
css = '{container} {result_container}'.format(**selectors)
else:
css = selectors['container']
results = self.dom.xpath(
self.css_to_xpath(css)
)
to_extract = set(selectors.keys()) - {'container', 'result_container'}
selectors_to_use = {key: selectors[key] for key in to_extract if key in selectors.keys()}
for index, result in enumerate(results):
# Let's add primitive support for CSS3 pseudo selectors
# We just need two of them
# ::text
# ::attr(attribute)
# You say we should use xpath expressions instead?
# Maybe you're right, but they are complicated when it comes to classes,
# have a look here: http://doc.scrapy.org/en/latest/topics/selectors.html
serp_result = {}
# key are for example 'link', 'snippet', 'visible-url', ...
# selector is the selector to grab these items
for key, selector in selectors_to_use.items():
serp_result[key] = self.advanced_css(selector, result)
serp_result['rank'] = index + 1
# Avoid duplicates. Duplicates are serp_result elemnts where the 'link' and 'title' are identical
# If statement below: Lazy evaluation. The more probable case first.
# if not [e for e in self.search_results[result_type] if (e['link'] == serp_result['link'] and e['title'] == serp_result['title']) ]:
self.search_results[result_type].append(serp_result)
self.num_results += 1
def advanced_css(self, selector, element):
"""Evaluate the :text and ::attr(attr-name) additionally.
Args:
selector: A css selector.
element: The element on which to apply the selector.
Returns:
The targeted element.
"""
value = None
if selector.endswith('::text'):
try:
value = element.xpath(self.css_to_xpath(selector.split('::')[0]))[0].text_content()
except IndexError:
pass
else:
match = re.search(r'::attr\((?P<attr>.*)\)$', selector)
if match:
attr = match.group('attr')
try:
value = element.xpath(self.css_to_xpath(selector.split('::')[0]))[0].get(attr)
except IndexError:
pass
else:
try:
value = element.xpath(self.css_to_xpath(selector))[0].text_content()
except IndexError:
pass
return value
def first_match(self, selectors, element):
"""Get the first match.
Args:
selectors: The selectors to test for a match.
element: The element on which to apply the selectors.
Returns:
The very first match or False if all selectors didn't match anything.
"""
assert isinstance(selectors, list), 'selectors must be of type list!'
for selector in selectors:
if selector:
try:
match = self.advanced_css(selector, element=element)
if match:
return match
except IndexError as e:
pass
return False
def after_parsing(self):
"""Subclass specific behaviour after parsing happened.
Override in subclass to add search engine specific behaviour.
Commonly used to clean the results.
"""
def __str__(self):
"""Return a nicely formatted overview of the results."""
return pprint.pformat(self.search_results)
@property
def cleaned_html(self):
# Try to parse the provided HTML string using lxml
# strip all unnecessary information to save space
cleaner = Cleaner()
cleaner.scripts = True
cleaner.javascript = True
cleaner.comments = True
cleaner.style = True
self.dom = cleaner.clean_html(self.dom)
assert len(self.dom), 'The html needs to be parsed to get the cleaned html'
return lxml.html.tostring(self.dom)
def iter_serp_items(self):
"""Yields the key and index of any item in the serp results that has a link value"""
for key, value in self.search_results.items():
if isinstance(value, list):
for i, item in enumerate(value):
if isinstance(item, dict) and item['link']:
yield (key, i)
"""
Here follow the different classes that provide CSS selectors
for different types of SERP pages of several common search engines.
Just look at them and add your own selectors in a new class if you
want the Scraper to support them.
You can easily just add new selectors to a search engine. Just follow
the attribute naming convention and the parser will recognize them:
If you provide a dict with a name like finance_search_selectors,
then you're adding a new search type with the name finance.
Each class needs a attribute called num_results_search_selectors, that
extracts the number of searches that were found by the keyword.
Please note:
The actual selectors are wrapped in a dictionary to clarify with which IP
they were requested. The key to the wrapper div allows to specify distinct
criteria to whatever settings you used when you requested the page. So you
might add your own selectors for different User-Agents, distinct HTTP headers, what-
ever you may imagine. This allows the most dynamic parsing behaviour and makes
it very easy to grab all data the site has to offer.
"""
class GoogleParser(Parser):
"""Parses SERP pages of the Google search engine."""
search_engine = 'google'
search_types = ['normal', 'image']
effective_query_selector = ['#topstuff .med > b::text', '.med > a > b::text']
no_results_selector = []
num_results_search_selectors = ['#resultStats']
page_number_selectors = ['#navcnt td.cur::text']
normal_search_selectors = {
'results': {
'us_ip': {
'container': '#center_col',
'result_container': 'div.g ',
'link': 'div.r > a:first-child::attr(href)',
'snippet': 'div.s span.st::text',
'title': 'div.r > a > h3::text',
'visible_link': 'cite::text'
},
'de_ip': {
'container': '#center_col',
'result_container': 'li.g ',
'link': 'h3.r > a:first-child::attr(href)',
'snippet': 'div.s span.st::text',
'title': 'h3.r > a:first-child::text',
'visible_link': 'cite::text'
},
'de_ip_news_items': {
'container': 'li.card-section',
'link': 'a._Dk::attr(href)',
'snippet': 'span._dwd::text',
'title': 'a._Dk::text',
'visible_link': 'cite::text'
},
},
'ads_main': {
'us_ip': {
'container': '#center_col',
'result_container': 'li.ads-ad',
'link': 'div.r > a:first-child::attr(href)',
'snippet': 'div.s span.st::text',
'title': 'div.r > a > h3::text',
'visible_link': '.ads-visurl cite::text',
},
'de_ip': {
'container': '#center_col',
'result_container': '.ads-ad',
'link': 'h3 > a:first-child::attr(href)',
'snippet': '.ads-creative::text',
'title': 'h3 > a:first-child::text',
'visible_link': '.ads-visurl cite::text',
}
},
# those css selectors are probably not worth much
'maps_local': {
'de_ip': {
'container': '#center_col',
'result_container': '.ccBEnf > div',
'link': 'link::attr(href)',
'snippet': 'div.rl-qs-crs-t::text',
'title': 'div[role="heading"] span::text',
'rating': 'span.BTtC6e::text',
'num_reviews': '.rllt__details::text',
}
},
'ads_aside': {
}
}
image_search_selectors = {
'results': {
'de_ip': {
'container': '#res',
'result_container': '.rg_bx',
'link': 'a.rg_l::attr(href)',
'snippet': '.a-no-hover-decoration::text',
},
'de_ip_http_mode': {
'container': '#search',
'result_container': '.rg_bx',
'link': 'a.rg_l::attr(href)',
'snippet': '.a-no-hover-decoration::text',
}
}
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def after_parsing(self):
"""Clean the urls.
A typical scraped results looks like the following:
'/url?q=http://www.youtube.com/user/Apple&sa=U&ei=\
lntiVN7JDsTfPZCMgKAO&ved=0CFQQFjAO&usg=AFQjCNGkX65O-hKLmyq1FX9HQqbb9iYn9A'
Clean with a short regex.
"""
super().after_parsing()
if self.searchtype == 'normal':
if self.num_results > 0:
self.no_results = False
elif self.num_results <= 0:
self.no_results = True
if 'No results found for' in self.html or 'did not match any documents' in self.html:
self.no_results = True
# finally try in the snippets
if self.no_results is True:
for key, i in self.iter_serp_items():
if 'snippet' in self.search_results[key][i] and self.query:
if self.query.replace('"', '') in self.search_results[key][i]['snippet']:
self.no_results = False
clean_regexes = {
'normal': r'/url\?q=(?P<url>.*?)&sa=U&ei=',
'image': r'imgres\?imgurl=(?P<url>.*?)&'
}
for key, i in self.iter_serp_items():
result = re.search(
clean_regexes[self.searchtype],
self.search_results[key][i]['link']
)
if result:
self.search_results[key][i]['link'] = unquote(result.group('url'))
class YandexParser(Parser):
"""Parses SERP pages of the Yandex search engine."""
search_engine = 'yandex'
search_types = ['normal', 'image']
no_results_selector = ['.message .misspell__message::text']
effective_query_selector = ['.misspell__message .misspell__link']
# @TODO: In december 2015, I saw that yandex only shows the number of search results in the search input field
# with javascript. One can scrape it in plain http mode, but the values are hidden in some javascript and not
# accessible with normal xpath/css selectors. A normal text search is done.
num_results_search_selectors = ['.serp-list .serp-adv__found::text', '.input__found_visibility_visible font font::text']
page_number_selectors = ['.pager__group .button_checked_yes span::text']
normal_search_selectors = {
'results': {
'de_ip': {
'container': '.serp-list',
'result_container': '.serp-item',
'link': 'a.link::attr(href)',
'snippet': 'div.text-container::text',
'title': 'div.organic__url-text::text',
'visible_link': '.typo_type_greenurl::text'
}
}
}
image_search_selectors = {
'results': {
'de_ip': {
'container': '.page-layout__content-wrapper',
'result_container': '.serp-item__preview',
'link': '.serp-item__preview .serp-item__link::attr(onmousedown)'
},
'de_ip_raw': {
'container': '.page-layout__content-wrapper',
'result_container': '.serp-item__preview',
'link': '.serp-item__preview .serp-item__link::attr(href)'
}
}
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def after_parsing(self):
"""Clean the urls.
Normally Yandex image search store the image url in the onmousedown attribute in a json object. Its
pretty messsy. This method grabs the link with a quick regex.
c.hit({"dtype":"iweb","path":"8.228.471.241.184.141","pos":69,"reqid":\
"1418919408668565-676535248248925882431999-ws35-986-IMG-p2"}, \
{"href":"http://www.thewallpapers.org/wallpapers/3/382/thumb/600_winter-snow-nature002.jpg"});
Sometimes the img url is also stored in the href attribute (when requesting with raw http packets).
href="/images/search?text=snow&img_url=\
http%3A%2F%2Fwww.proza.ru%2Fpics%2F2009%2F12%2F07%2F1290.jpg&pos=2&rpt=simage&pin=1">
"""
super().after_parsing()
if self.searchtype == 'normal':
self.no_results = False
if self.no_results_text:
self.no_results = 'По вашему запросу ничего не нашлось' in self.no_results_text
if self.num_results == 0:
self.no_results = True
# very hackish, probably prone to all kinds of errors.
if not self.num_results_for_query:
substr = 'function() { var title = "%s —' % self.query
try:
i = self.html.index(substr)
if i:
self.num_results_for_query = re.search(r'— (.)*?"', self.html[i:i+len(self.query) + 150]).group()
except Exception as e:
logger.debug(str(e))
if self.searchtype == 'image':
for key, i in self.iter_serp_items():
for regex in (
r'\{"href"\s*:\s*"(?P<url>.*?)"\}',
r'img_url=(?P<url>.*?)&'
):
result = re.search(regex, self.search_results[key][i]['link'])
if result:
self.search_results[key][i]['link'] = result.group('url')
break
class BingParser(Parser):
"""Parses SERP pages of the Bing search engine."""
search_engine = 'bing'
search_types = ['normal', 'image']
no_results_selector = ['#b_results > .b_ans::text']
num_results_search_selectors = ['.sb_count']
effective_query_selector = ['#sp_requery a > strong', '#sp_requery + #sp_recourse a::attr(href)']
page_number_selectors = ['.sb_pagS::text']
normal_search_selectors = {
'results': {
'us_ip': {
'container': '#b_results',
'result_container': '.b_algo',
'link': 'h2 > a::attr(href)',
'snippet': '.b_caption > p::text',
'title': 'h2::text',
'visible_link': 'cite::text'
},
'de_ip': {
'container': '#b_results',
'result_container': '.b_algo',
'link': 'h2 > a::attr(href)',
'snippet': '.b_caption > p::text',
'title': 'h2::text',
'visible_link': 'cite::text'
},
'de_ip_news_items': {
'container': 'ul.b_vList li',
'link': ' h5 a::attr(href)',
'snippet': 'p::text',
'title': ' h5 a::text',
'visible_link': 'cite::text'
},
},
'ads_main': {
'us_ip': {
'container': '#b_results .b_ad',
'result_container': '.sb_add',
'link': 'h2 > a::attr(href)',
'snippet': '.sb_addesc::text',
'title': 'h2 > a::text',
'visible_link': 'cite::text'
},
'de_ip': {
'container': '#b_results .b_ad',
'result_container': '.sb_add',
'link': 'h2 > a::attr(href)',
'snippet': '.b_caption > p::text',
'title': 'h2 > a::text',
'visible_link': 'cite::text'
}
}
}
image_search_selectors = {
'results': {
'ch_ip': {
'container': '#dg_c .imgres',
'result_container': '.dg_u',
'link': 'a.dv_i::attr(m)'
},
}
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def after_parsing(self):
"""Clean the urls.
The image url data is in the m attribute.
m={ns:"images.1_4",k:"5018",mid:"46CE8A1D71B04B408784F0219B488A5AE91F972E",
surl:"http://berlin-germany.ca/",imgurl:"http://berlin-germany.ca/images/berlin250.jpg",
oh:"184",tft:"45",oi:"http://berlin-germany.ca/images/berlin250.jpg"}
"""
super().after_parsing()
if self.searchtype == 'normal':
self.no_results = False
if self.no_results_text:
self.no_results = self.query in self.no_results_text \
or 'Do you want results only for' in self.no_results_text
if self.searchtype == 'image':
for key, i in self.iter_serp_items():
for regex in (
r'imgurl:"(?P<url>.*?)"',
):
result = re.search(regex, self.search_results[key][i]['link'])
if result:
self.search_results[key][i]['link'] = result.group('url')
break
class YahooParser(Parser):
"""Parses SERP pages of the Yahoo search engine."""
search_engine = 'yahoo'
search_types = ['normal', 'image']
no_results_selector = []
effective_query_selector = ['.msg #cquery a::attr(href)']
num_results_search_selectors = ['#pg > span:last-child', '.compPagination span::text']
page_number_selectors = ['#pg > strong::text']
normal_search_selectors = {
'results': {
'de_ip': {
'container': '#main',
'result_container': '.res',
'link': 'div > h3 > a::attr(href)',
'snippet': 'div.abstr::text',
'title': 'div > h3 > a::text',
'visible_link': 'span.url::text'
},
'de_ip_december_2015': {
'container': '#main',
'result_container': '.searchCenterMiddle li',
'link': 'h3.title a::attr(href)',
'snippet': '.compText p::text',
'title': 'h3.title a::text',
'visible_link': 'span::text'
},
},
}
image_search_selectors = {
'results': {
'ch_ip': {
'container': '#results',
'result_container': '#sres > li',
'link': 'a::attr(href)'
},
}
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def after_parsing(self):
"""Clean the urls.
The url is in the href attribute and the &imgurl= parameter.
<a id="yui_3_5_1_1_1419284335995_1635" aria-label="<b>Matterhorn</b> sunrise"
href="/images/view;_ylt=AwrB8phvj5hU7moAFzOJzbkF;_ylu=\
X3oDMTIyc3ZrZ3RwBHNlYwNzcgRzbGsDaW1nBG9pZANmNTgyY2MyYTY4ZmVjYTI5YmYwNWZlM2E3ZTc1YzkyMARncG9zAzEEaXQDYmluZw--?
.origin=&back=https%3A%2F%2Fimages.search.yahoo.com%2Fsearch%2Fimages%3F\
p%3Dmatterhorn%26fr%3Dyfp-t-901%26fr2%3Dpiv-web%26tab%3Dorganic%26ri%3D1&w=4592&h=3056&
imgurl=www.summitpost.org%2Fimages%2Foriginal%2F699696.JPG&rurl=http%3A%2F%2Fwww.summitpost.org\
%2Fmatterhorn-sunrise%2F699696&size=5088.0KB&
name=%3Cb%3EMatterhorn%3C%2Fb%3E+sunrise&p=matterhorn&oid=f582cc2a68feca29bf05fe3a7e75c920&fr2=piv-web&
fr=yfp-t-901&tt=%3Cb%3EMatterhorn%3C%2Fb%3E+sunrise&b=0&ni=21&no=1&ts=&tab=organic&
sigr=11j056ue0&sigb=134sbn4gc&sigi=11df3qlvm&sigt=10pd8j49h&sign=10pd8j49h&.crumb=qAIpMoHvtm1&\
fr=yfp-t-901&fr2=piv-web">
"""
super().after_parsing()
if self.searchtype == 'normal':
self.no_results = False
if self.num_results == 0:
self.no_results = True
if len(self.dom.xpath(self.css_to_xpath('#cquery'))) >= 1:
self.no_results = True
for key, i in self.iter_serp_items():
if self.search_results[key][i]['visible_link'] is None:
del self.search_results[key][i]
if self.searchtype == 'image':
for key, i in self.iter_serp_items():
for regex in (
r'&imgurl=(?P<url>.*?)&',
):
result = re.search(regex, self.search_results[key][i]['link'])
if result:
# TODO: Fix this manual protocol adding by parsing "rurl"
self.search_results[key][i]['link'] = 'http://' + unquote(result.group('url'))
break
class BaiduParser(Parser):
"""Parses SERP pages of the Baidu search engine."""
search_engine = 'baidu'
search_types = ['normal', 'image']
num_results_search_selectors = ['#container .nums']
no_results_selector = []
# no such thing for baidu
effective_query_selector = ['']
page_number_selectors = ['.fk_cur + .pc::text']
normal_search_selectors = {
'results': {
'de_ip': {
'container': '#content_left',
'result_container': '.result-op',
'link': 'h3 > a.t::attr(href)',
'snippet': '.c-abstract::text',
'title': 'h3 > a.t::text',
'visible_link': 'span.c-showurl::text'
},
'nojs': {
'container': '#content_left',
'result_container': '.result',
'link': 'h3 > a::attr(href)',
'snippet': '.c-abstract::text',
'title': 'h3 > a::text',
'visible_link': 'span.g::text'
}
},
}
image_search_selectors = {
'results': {
'ch_ip': {
'container': '#imgContainer',
'result_container': '.pageCon > li',
'link': '.imgShow a::attr(href)'
},
}
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def after_parsing(self):
"""Clean the urls.
href="/i?ct=503316480&z=&tn=baiduimagedetail&ipn=d&word=matterhorn&step_word=&ie=utf-8&in=9250&
cl=2&lm=-1&st=&cs=3326243323,1574167845&os=1495729451,4260959385&pn=0&rn=1&di=69455168860&ln=1285&
fr=&&fmq=1419285032955_R&ic=&s=&se=&sme=0&tab=&width=&height=&face=&is=&istype=&ist=&jit=&
objurl=http%3A%2F%2Fa669.phobos.apple.com%2Fus%2Fr1000%2F077%2FPurple%2F\
v4%2F2a%2Fc6%2F15%2F2ac6156c-e23e-62fd-86ee-7a25c29a6c72%2Fmzl.otpvmwuj.1024x1024-65.jpg&adpicid=0"
"""
super().after_parsing()
if self.search_engine == 'normal':
if len(self.dom.xpath(self.css_to_xpath('.hit_top_new'))) >= 1:
self.no_results = True
if self.searchtype == 'image':
for key, i in self.iter_serp_items():
for regex in (
r'&objurl=(?P<url>.*?)&',
):
result = re.search(regex, self.search_results[key][i]['link'])
if result:
self.search_results[key][i]['link'] = unquote(result.group('url'))
break
class DuckduckgoParser(Parser):
"""Parses SERP pages of the Duckduckgo search engine."""
search_engine = 'duckduckgo'
search_types = ['normal']
num_results_search_selectors = []
no_results_selector = []
effective_query_selector = ['']
# duckduckgo is loads next pages with ajax
page_number_selectors = ['']
normal_search_selectors = {
'results': {
'de_ip': {
'container': '#links',
'result_container': '.result',
'link': '.result__title > a::attr(href)',
'snippet': 'result__snippet::text',
'title': '.result__title > a::text',
'visible_link': '.result__url__domain::text'
},
'non_javascript_mode': {
'container': '#content',
'result_container': '.results_links',
'link': '.links_main > a::attr(href)',
'snippet': '.snippet::text',
'title': '.links_main > a::text',
'visible_link': '.url::text'
},
},
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def after_parsing(self):
super().after_parsing()
if self.searchtype == 'normal':
try:
if 'No more results.' in self.dom.xpath(self.css_to_xpath('.no-results'))[0].text_content():
self.no_results = True
except:
pass
if self.num_results > 0:
self.no_results = False
elif self.num_results <= 0:
self.no_results = True
class AskParser(Parser):
"""Parses SERP pages of the Ask search engine."""
search_engine = 'ask'
search_types = ['normal']
num_results_search_selectors = []
no_results_selector = []
effective_query_selector = ['#spell-check-result > a']
page_number_selectors = ['.pgcsel .pg::text']
normal_search_selectors = {
'results': {
'de_ip': {
'container': '#midblock',
'result_container': '.ptbs.ur',
'link': '.abstract > a::attr(href)',
'snippet': '.abstract::text',
'title': '.txt_lg.b::text',
'visible_link': '.durl span::text'
},
'de_ip_december_2015': {
'container': '.l-mid-content',
'result_container': '.web-result',
'link': '.web-result-title > a::attr(href)',
'snippet': '.web-result-description::text',
'title': '.web-result-title > a::text',
'visible_link': '.web-result-url::text'
},
# as requested by httm mode
'de_ip_december_2015_raw_http': {
'container': '#midblock',
'result_container': '#teoma-results .wresult',
'link': 'a.title::attr(href)',
'snippet': '.abstract::text',
'title': 'a.title::text',
'visible_link': '.durl span::text'
}
},
}
class BlekkoParser(Parser):
"""Parses SERP pages of the Blekko search engine."""
search_engine = 'blekko'
search_types = ['normal']
effective_query_selector = ['']
no_results_selector = []
num_results_search_selectors = []
normal_search_selectors = {
'results': {
'de_ip': {
'container': '#links',
'result_container': '.result',
'link': '.result__title > a::attr(href)',
'snippet': 'result__snippet::text',
'title': '.result__title > a::text',
'visible_link': '.result__url__domain::text'
}
},
}
def get_parser_by_url(url):
"""Get the appropriate parser by an search engine url.
Args:
url: The url that was used to issue the search
Returns:
The correct parser that can parse results for this url.
Raises:
UnknowUrlException if no parser could be found for the url.
"""
parser = None
if re.search(r'^http[s]?://www\.google', url):
parser = GoogleParser
elif re.search(r'^http://yandex\.ru', url):
parser = YandexParser
elif re.search(r'^http://www\.bing\.', url):
parser = BingParser
elif re.search(r'^http[s]?://search\.yahoo.', url):
parser = YahooParser
elif re.search(r'^http://www\.baidu\.com', url):
parser = BaiduParser
elif re.search(r'^https://duckduckgo\.com', url):
parser = DuckduckgoParser
if re.search(r'^http[s]?://[a-z]{2}?\.ask', url):
parser = AskParser
if re.search(r'^http[s]?://blekko', url):
parser = BlekkoParser
if not parser:
raise UnknowUrlException('No parser for {}.'.format(url))
return parser
def get_parser_by_search_engine(search_engine):
"""Get the appropriate parser for the search_engine
Args:
search_engine: The name of a search_engine.
Returns:
A parser for the search_engine
Raises:
NoParserForSearchEngineException if no parser could be found for the name.
"""
if search_engine == 'google' or search_engine == 'googleimg':
return GoogleParser
elif search_engine == 'yandex':
return YandexParser
elif search_engine == 'bing':
return BingParser
elif search_engine == 'yahoo':
return YahooParser
elif search_engine == 'baidu' or search_engine == 'baiduimg':
return BaiduParser
elif search_engine == 'duckduckgo':
return DuckduckgoParser
elif search_engine == 'ask':
return AskParser
elif search_engine == 'blekko':
return BlekkoParser
else:
raise NoParserForSearchEngineException('No such parser for "{}"'.format(search_engine))
def parse_serp(config, html=None, parser=None, scraper=None, search_engine=None, query=''):
"""Store the parsed data in the sqlalchemy session.
If no parser is supplied then we are expected to parse again with
the provided html.
This function may be called from scraping and caching.
When called from caching, some info is lost (like current page number).
Args:
TODO: A whole lot
Returns:
The parsed SERP object.
"""
if not parser and html:
parser = get_parser_by_search_engine(search_engine)
parser = parser(config, query=query)
parser.parse(html)
serp = SearchEngineResultsPage()
if query:
serp.query = query
if parser:
serp.set_values_from_parser(parser)
if scraper:
serp.set_values_from_scraper(scraper)
return serp
if __name__ == '__main__':
"""Originally part of https://github.com/NikolaiT/GoogleScraper.
Only for testing purposes: May be called directly with an search engine
search url. For example:
python3 parsing.py 'http://yandex.ru/yandsearch?text=GoogleScraper&lr=178&csg=82%2C4317%2C20%2C20%2C0%2C0%2C0'
Please note: Using this module directly makes little sense, because requesting such urls
directly without imitating a real browser (which is done in my GoogleScraper module) makes
the search engines return crippled html, which makes it impossible to parse.
But for some engines it nevertheless works (for example: yandex, google, ...).
"""
import requests
assert len(sys.argv) >= 2, 'Usage: {} url/file'.format(sys.argv[0])
url = sys.argv[1]
if os.path.exists(url):
raw_html = open(url, 'r').read()
parser = get_parser_by_search_engine(sys.argv[2])
else:
raw_html = requests.get(url).text
parser = get_parser_by_url(url)
parser = parser(raw_html)
parser.parse()
print(parser)
with open('/tmp/testhtml.html', 'w') as of:
of.write(raw_html)
| 39,004 | 34.203069 | 153 | py |
GoogleScraper | GoogleScraper-master/GoogleScraper/__init__.py | # -*- coding: utf-8 -*-
"""
I switched my motto; instead of saying "fuck tomorrow"
That buck that bought a bottle could've struck the lotto.
"""
__author__ = 'Nikolai Tschacher'
__updated__ = '18.08.2018' # day.month.year
__home__ = 'incolumitas.com'
from GoogleScraper.proxies import Proxy
from GoogleScraper.config import get_config
import logging
"""
All objects imported here are exposed as the public API of GoogleScraper
"""
from GoogleScraper.core import scrape_with_config
from GoogleScraper.scraping import GoogleSearchError, MaliciousRequestDetected
logging.getLogger(__name__)
| 595 | 23.833333 | 78 | py |
GoogleScraper | GoogleScraper-master/GoogleScraper/async_mode.py | import asyncio
import aiohttp
import datetime
from urllib.parse import urlencode
from GoogleScraper.parsing import get_parser_by_search_engine, parse_serp
from GoogleScraper.http_mode import get_GET_params_for_search_engine, headers
from GoogleScraper.scraping import get_base_search_url_by_search_engine
from GoogleScraper.utils import get_some_words
from GoogleScraper.output_converter import store_serp_result
import logging
logger = logging.getLogger(__name__)
class AsyncHttpScrape(object):
"""Scrape asynchronously using asyncio.
https://aiohttp.readthedocs.io/en/v3.0.1/client.html
Some search engines don't block after a certain amount of requests.
Google surely does (after very few parallel requests).
But with bing or example, it's now (18.01.2015) no problem to
scrape 100 unique pages in 3 seconds.
"""
def __init__(self, config, query='', page_number=1, search_engine='google', scrape_method='http-async'):
"""
"""
self.config = config
self.query = query
self.page_number = page_number
self.search_engine_name = search_engine
self.search_type = 'normal'
self.scrape_method = scrape_method
self.requested_at = None
self.requested_by = 'localhost'
self.parser = get_parser_by_search_engine(self.search_engine_name)
self.base_search_url = get_base_search_url_by_search_engine(self.config, self.search_engine_name, 'http')
self.params = get_GET_params_for_search_engine(self.query, self.search_engine_name,
search_type=self.search_type)
self.headers = headers
self.status = 'successful'
async def __call__(self):
url = self.base_search_url + urlencode(self.params)
async with aiohttp.ClientSession() as session:
async with session.get(url, params=self.params, headers=self.headers) as response:
if response.status != 200:
self.status = 'not successful: ' + str(response.status)
self.requested_at = datetime.datetime.utcnow()
logger.info('[+] {} requested keyword \'{}\' on {}. Response status: {}'.format(
self.requested_by,
self.query,
self.search_engine_name,
response.status))
logger.debug('[i] URL: {} HEADERS: {}'.format(
url,
self.headers))
if response.status == 200:
body = await response.text()
self.parser = self.parser(config=self.config, html=body)
return self
return None
return request
class AsyncScrapeScheduler(object):
"""
Processes the single requests in an asynchronous way.
"""
def __init__(self, config, scrape_jobs, cache_manager=None, session=None, scraper_search=None, db_lock=None):
self.cache_manager = cache_manager
self.config = config
self.max_concurrent_requests = self.config.get('max_concurrent_requests')
self.scrape_jobs = scrape_jobs
self.session = session
self.scraper_search = scraper_search
self.db_lock = db_lock
self.loop = asyncio.get_event_loop()
self.requests = []
self.results = []
def get_requests(self):
self.requests = []
request_number = 0
while True:
request_number += 1
try:
job = self.scrape_jobs.pop()
except IndexError:
break
if job:
self.requests.append(AsyncHttpScrape(self.config, **job))
if request_number >= self.max_concurrent_requests:
break
def run(self):
while True:
self.get_requests()
if not self.requests:
break
self.results = self.loop.run_until_complete(asyncio.wait([r() for r in self.requests]))
for task in self.results[0]:
scrape = task.result()
if scrape:
if self.cache_manager:
self.cache_manager.cache_results(scrape.parser, scrape.query, scrape.search_engine_name, scrape.scrape_method,
scrape.page_number)
if scrape.parser:
serp = parse_serp(self.config, parser=scrape.parser, scraper=scrape, query=scrape.query)
if self.scraper_search:
self.scraper_search.serps.append(serp)
if self.session:
self.session.add(serp)
self.session.commit()
store_serp_result(serp, self.config)
if __name__ == '__main__':
from GoogleScraper.config import get_config
from GoogleScraper.scrape_jobs import default_scrape_jobs_for_keywords
some_words = get_some_words(n=1)
cfg = get_config()
scrape_jobs = list(default_scrape_jobs_for_keywords(some_words, ['bing'], 'http-async', 1))
manager = AsyncScrapeScheduler(cfg, scrape_jobs)
manager.run()
| 5,254 | 33.123377 | 134 | py |
GoogleScraper | GoogleScraper-master/GoogleScraper/scrape_config.py | # -*- coding: utf-8 -*-
"""
This is the basic GoogleScraper configuration file.
All options are basic Python data types. You may use all of Python's language
capabilities to specify settings in this file.
"""
"""
[OUTPUT]
Settings which control how GoogleScraper represents it's results
and handles output.
"""
# How and if results are printed when running GoogleScraper.
# if set to 'all', then all data from results are outputted
# if set to 'summarize', then only a summary of results is given.
# if set to anything else, no output will be given at all.
print_results = 'all'
# The name of the database that is written to the same
# directory where GoogleScraper will be called.
database_name = 'google_scraper'
# The file name of the output
# The file name also determine the format of how
# to store the results.
# filename.json => save results as json
# filename.csv => save a csv file
# If set to None, don't write any file.
output_filename = ''
# Whether sqlalchemy should log all stuff to stdout
# useful for devs. Don't set this to True if you don't know
# what you are doing.
log_sqlalchemy = False
# Set the debug level of the application. Use the string representation
# instead of the numbers. High numbers will output less, lower numbers more.
# CRITICAL = 50
# FATAL = CRITICAL
# ERROR = 40
# WARNING = 30
# WARN = WARNING
# INFO = 20
# DEBUG = 10
# NOTSET = 0
log_level = 'INFO'
# Log format string
log_format = '[%(threadName)s] - %(asctime)s - %(name)s - %(levelname)s - %(message)s'
# Logfile
log_file = 'googlescraper.log'
"""
[SCRAPING]
Configuration parameters that control the scraping process. You will most
likely want to change these values.
"""
# The search queries to search for, separated by newlines. Intend every new
# keyword-line at least more than the next keyword.
keywords = []
# The keyword file. If this is a valid file path, the keywords params will be ignored and
# the ones from the file will be taken. Each keyword must be on a separate line.
keyword_file = ''
# How many results per SERP page
num_results_per_page = 10
# How many pages should be requested for each single keyword
num_pages_for_keyword = 1
# This arguments sets the number of browser instances for selenium mode or the number of worker threads in http mode.
num_workers = 1
# Maximum of workers
# When scraping with multiple search engines and more than one worker, the number of total workers
# becomes quite high very fast, so we set a upper limit here. Leaving this out, is quite dangerous in selenium mode.
maximum_workers = 20
# The search offset on which page to start scraping.
# Pages begin at 1
search_offset = 1
# In some countries the main search engine domain is blocked. Thus, search engines
# have different ip on which they are reachable. If you set a file with urls for the search engine,
# then GoogleScraper will pick a random url for any scraper instance.
# One url per line. It needs to be a valid url, not just an ip address!
google_ip_file = ''
# List of supported search engines
# If you add support for another search engine (of course implement it in the
# appropriate places) add it in this list.
supported_search_engines = ['google', 'yandex', 'bing', 'yahoo', 'baidu', 'duckduckgo', 'ask']
# The search engine(s) to use. For the supported search engines, see above "supported_search_engines"
search_engines = ['google', ]
# The base search urls
# Ready to append the parameters at the end to fine tune the search.
# The google base search url
google_search_url = 'https://www.google.com/search?'
# whether to change the search settings prior to scraping
# when this is set to False google will search with
# the default search settings that your (selenium) browser supports
google_selenium_search_settings = False
# manually select search settings
# only possible in visible browsers
# when this is set, google won't block you as likely
google_selenium_manual_settings = False
# the following options only take effect when
# google_selenium_search_settings is set to True
# Search Settings for Google Scraping in Selenium Mode
# 10, 20, 30, 50, 100
google_selenium_num_results = 100
# Private results help find more relevant content for you, including content and connections that only you can see.
google_selenium_personalization = False
# use a country code such as US, DE, GB, CH, ...
google_selenium_region = 'DE'
google_selenium_safe_search = False
# the language for google search results
google_selenium_language = 'English'
# The yandex base search url
yandex_search_url = 'http://yandex.ru/yandsearch?'
# The bing base search url
bing_search_url = 'http://www.bing.com/search?'
# The yahoo base search url
yahoo_search_url = 'https://de.search.yahoo.com/search?'
# The baidu base search url
baidu_search_url = 'http://www.baidu.com/s?'
# The duckduckgo base search url
duckduckgo_search_url = 'https://duckduckgo.com/'
# duckduckgo url for http mode
http_duckduckgo_search_url = 'https://duckduckgo.com/html/?'
# The ask base search url
ask_search_url = 'http://de.ask.com/web?'
# The search type. Currently, the following search modes are
# supported for some search engine= normal, video, news and image search.
# "normal" search type is supported in all search engines.
search_type = 'normal'
# The scrape method. Can be 'http' or 'selenium' or 'http-async'
# http mode uses http packets directly, whereas selenium mode uses a real browser.
# http_async uses asyncio.
scrape_method = 'selenium'
# If scraping with the own IP address should be allowed.
# If this is set to False and you don't specify any proxies,
# GoogleScraper cannot run.
use_own_ip = True
# Whether to check proxies before starting the scrape
check_proxies = True
# You can set the internal behaviour of GoogleScraper here
# When GoogleScraper is invoked as a command line script, it is very much desirable
# to be as robust as possible. But when used from another program, we need immediate
# response when something fails.
raise_exceptions_while_scraping = True
# The following two options only make sense when search_engine is set to "googleimg"
# do NOT use them unless you are sure what you are going to do
image_type = None
image_size = None
"""
[GLOBAL]
Global configuration parameters that apply on all modes.
"""
# The proxy file. If this is a valid file path, each line will represent a proxy.
# Example file:
# socks5 23.212.45.13= 1080 username= password
# socks4 23.212.45.13= 80 username= password
# http 23.212.45.13= 80
proxy_file = ''
# Whether to continue the last scrape when ended early.
continue_last_scrape = True
# Proxies stored in a MySQL database. If you set a parameter here, GoogleScraper will look for proxies
# in a table named 'proxies' for proxies with the following format=
# CREATE TABLE proxies (
# id INTEGER PRIMARY KEY NOT NULL,
# host VARCHAR(255) NOT NULL,
# port SMALLINT,
# username VARCHAR(255),
# password VARCHAR(255),
# protocol ENUM('socks5', 'socks4', 'http')
# );
# Specify the connection details in the following format= mysql= //<username>= <password>@<host>/<dbname>
# Example= mysql= //root= soemshittypass@localhost/supercoolproxies
mysql_proxy_db = ''
# Whether to manually clean cache files. For development purposes
clean_cache_files = False
# Proxy checker url
proxy_check_url = 'http://canihazip.com/s'
# Proxy info url
proxy_info_url = 'http://ipinfo.io/json'
# The basic search url
# Default is google
base_search_url = 'http://www.google.com/search'
# Whether caching shall be enabled
do_caching = True
# Whether the whole html files should be cached or
# if the file should be stripped from unnecessary data like javascripts, comments, ...
minimize_caching_files = True
# If set, then compress/decompress cached files
compress_cached_files = True
# Use either bz2 or gz to compress cached files
compressing_algorithm = 'gz'
# The relative path to the cache directory
cachedir = '.scrapecache/'
# After how many hours should the cache be cleaned
clean_cache_after = 48
# turn off sleeping pauses alltogether.
# Dont set this to False if you don't know what you are doing.
do_sleep = True
# Sleeping distribution.
# Sleep a given amount of time as a function of the number of searches done.
# The scraper in selenium mode makes random pauses at certain times.
# Please add integer keys to sleeping_ranges such that the sum of
# the keys amounts to 100. Then the key defines the probability of how many times
# this sleeping range occurs in a total of 100 searches.
# For example:
# sleeping_ranges = {
# 70: (1, 2), # sleep between 1-2 seconds with probability 70/100
# 20: (2, 6), # sleep between 2-6 seconds with probability 20/100
# 5: (10, 20), # sleep between 10-20 seconds with probability 5/100
# 3: (20, 30), # ...
# 2: (20, 40),
# }
sleeping_ranges = {
70: (1, 3),
20: (3, 6),
5: (10, 20),
3: (20, 25),
2: (25, 30),
}
# Search engine specific sleeping ranges
# If you add the name of the search engine before a
# option {search_engine_name}_sleeping_ranges, then
# only this search engine will sleep the given ranges.
google_sleeping_ranges = {
70: (1, 3),
20: (3, 6),
5: (10, 20),
3: (20, 25),
2: (25, 30),
}
# sleep a certain time after the Nth page has been scraped
fixed_sleeping_ranges = {
1000: (180, 420),
2000: (180, 420),
3000: (180, 420),
4000: (180, 420),
5000: (180, 420),
6000: (180, 420),
7000: (180, 420),
8000: (180, 420),
9000: (180, 420),
10000: (180, 420),
}
# If the search should be simulated instead of being done.
# Useful to learn about the quantity of keywords to scrape and such.
# Won't fire any requests.
simulate = False
# Internal use only
fix_cache_names = False
"""
[SELENIUM]
All settings that only apply for requesting with real browsers.
"""
# which browser to use in selenium mode. Valid values = ('chrome', 'firefox')
sel_browser = 'chrome'
# in which mode the browser is started. Valid values = ('normal', 'headless')
browser_mode = 'headless'
# chrome driver executable path
# get chrome drivers here: https://chromedriver.storage.googleapis.com/index.html?path=2.41/
chromedriver_path = '/home/nikolai/projects/private/Drivers/chromedriver'
# geckodriver executable path
# get gecko drivers here: https://github.com/mozilla/geckodriver/releases
geckodriver_path = '/home/nikolai/projects/private/Drivers/geckodriver'
# path to firefox binary
firefox_binary_path = '/home/nikolai/firefox/firefox'
# path to chromium browser binary
chrome_binary_path = '/usr/bin/chromium-browser'
# Manual captcha solving
# If this parameter is set to a Integer, the browser waits for the user
# to enter the captcha manually whenever Google detected the script as malicious.
# Set to False to disable.
# If the captcha isn't solved in the specified time interval, the browser instance
# with the current proxy is discarded.
manual_captcha_solving = True
# captch solving service
# enable captcha solving service
captcha_solving_service = False
# @TODO: Integrate https://2captcha.com/
# Xvfb display option
# You should start xvfb on your own before this option has any effect.
# Format= [hostname]= displaynumber[.screennumber], see X(7) manuel for details
# will set environment variable $DISPLAY to it
xvfb_display = None
# how many tabs per instance
num_tabs = 1
"""
[HTTP]
All settings that target the raw http packet scraping mode.
"""
# You may overwrite the global search urls in the SCRAPING section
# for each mode
# search engine urls for the specific engines
# The google search url specifiably for http mode
google_search_url = 'https://www.google.com/search?'
"""
[HTTP_ASYNC]
Settings specific for the asynchronous mode.
"""
# The number of concurrent requests that are used for scraping
max_concurrent_requests = 100
"""
[PROXY_POLICY]
How the proxy policy works.
"""
# How long to sleep (in seconds) when the proxy got detected.
proxy_detected_timeout = 400
# Whether to stop workers when they got detected instead of waiting.
stop_on_detection = True
| 12,119 | 30.480519 | 117 | py |
GoogleScraper | GoogleScraper-master/GoogleScraper/selenium_mode.py | # -*- coding: utf-8 -*-
import tempfile
import threading
from urllib.parse import quote
import json
import datetime
import time
import math
import random
import re
import sys
import os
try:
from selenium import webdriver
from selenium.common.exceptions import TimeoutException, WebDriverException
from selenium.common.exceptions import ElementNotVisibleException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
from selenium.webdriver.support import expected_conditions as EC # available since 2.26.0
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
except ImportError as ie:
print(ie)
sys.exit('You can install missing modules with `pip3 install [modulename]`')
from GoogleScraper.scraping import SearchEngineScrape, SeleniumSearchError, get_base_search_url_by_search_engine, MaliciousRequestDetected
from GoogleScraper.user_agents import random_user_agent
import logging
logger = logging.getLogger(__name__)
class NotSupportedException(Exception):
pass
def check_detection(config, search_engine_name):
"""
Checks whether the search engine specified by search_engine_name
blocked us.
"""
status = ''
chromedriver = config.get('chromedriver_path', '/usr/bin/chromedriver')
options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_argument('window-size=1200x600')
browser = webdriver.Chrome(chrome_options=options, executable_path=chromedriver)
if search_engine_name == 'google':
url = get_base_search_url_by_search_engine(config, 'google', 'selenium')
browser.get(url)
def check(browser, status):
needles = SearchEngineScrape.malicious_request_needles['google']
if needles['inurl'] in browser.current_url and needles['inhtml'] in browser.page_source:
status += 'Google is asking for a captcha! '
code = 'DETECTED'
else:
status += 'No captcha prompt detected. '
code = 'UNDETECTED'
return (code, status)
search_input = None
try:
search_input = WebDriverWait(browser, 5).until(
EC.visibility_of_element_located((By.NAME, 'q')))
status += 'Got a search input field. '
except TimeoutException:
status += 'No search input field located after 5 seconds. '
return check(browser, status)
try:
# random query
search_input.send_keys('President of Finland'+ Keys.ENTER)
status += 'Google Search successful! '
except WebDriverException:
status += 'Cannot make a google search! '
return check(browser, status)
return check(browser, status)
else:
raise NotImplementedError('Detection check only implemented for Google Right now.')
browser.quit()
return status
def get_selenium_scraper_by_search_engine_name(config, search_engine_name, *args, **kwargs):
"""Get the appropriate selenium scraper for the given search engine name.
Args:
search_engine_name: The search engine name.
args: The arguments for the target search engine instance creation.
kwargs: The keyword arguments for the target search engine instance creation.
Returns;
Either a concrete SelScrape instance specific for the given search engine or the abstract SelScrape object.
"""
class_name = search_engine_name[0].upper() + search_engine_name[1:].lower() + 'SelScrape'
ns = globals()
if class_name in ns:
return ns[class_name](config, *args, **kwargs)
return SelScrape(config, *args, **kwargs)
class SelScrape(SearchEngineScrape, threading.Thread):
"""Instances of this class make use of selenium browser
objects to query the search engines on a high level.
"""
next_page_selectors = {
'google': '#pnnext',
'yandex': '.pager__item_kind_next',
'bing': '.sb_pagN',
'yahoo': '#pg-next',
'baidu': '.n',
'ask': '#paging div a.txt3.l_nu',
'blekko': '',
'duckduckgo': '',
'googleimg': '#pnnext',
'baiduimg': '.n',
}
input_field_selectors = {
'google': (By.NAME, 'q'),
'yandex': (By.NAME, 'text'),
'bing': (By.NAME, 'q'),
'yahoo': (By.NAME, 'p'),
'baidu': (By.NAME, 'wd'),
'duckduckgo': (By.NAME, 'q'),
'ask': (By.NAME, 'q'),
'blekko': (By.NAME, 'q'),
'google': (By.NAME, 'q'),
'googleimg': (By.NAME, 'as_q'),
'baiduimg': (By.NAME, 'word'),
}
param_field_selectors = {
'googleimg': {
'image_type': (By.ID, 'imgtype_input'),
'image_size': (By.ID, 'imgsz_input'),
},
}
search_params = {
'googleimg': {
'image_type': None,
'image_size': None,
},
}
normal_search_locations = {
'google': 'https://www.google.com/',
'yandex': 'http://www.yandex.ru/',
'bing': 'http://www.bing.com/',
'yahoo': 'https://yahoo.com/',
'baidu': 'http://baidu.com/',
'duckduckgo': 'https://duckduckgo.com/',
'ask': 'http://ask.com/',
'blekko': 'http://blekko.com/',
}
image_search_locations = {
'google': 'https://www.google.com/imghp?tbm=isch',
'yandex': 'http://yandex.ru/images/',
'bing': 'https://www.bing.com/?scope=images',
'yahoo': 'http://images.yahoo.com/',
'baidu': 'http://image.baidu.com/',
'duckduckgo': None, # duckduckgo doesnt't support direct image search
'ask': 'http://www.ask.com/pictures/',
'blekko': None,
'googleimg':'https://www.google.com/advanced_image_search',
'baiduimg': 'http://image.baidu.com/',
}
def __init__(self, config, *args, captcha_lock=None, browser_num=1, **kwargs):
"""Create a new SelScraper thread Instance.
Args:
captcha_lock: To sync captcha solving (stdin)
proxy: Optional, if set, use the proxy to route all scrapign through it.
browser_num: A unique, semantic number for each thread.
"""
self.search_input = None
threading.Thread.__init__(self)
SearchEngineScrape.__init__(self, config, *args, **kwargs)
self.browser_type = self.config.get('sel_browser', 'chrome').lower()
self.browser_mode = self.config.get('browser_mode', 'headless').lower()
self.browser_num = browser_num
self.captcha_lock = captcha_lock
self.scrape_method = 'selenium'
# number of tabs per instance
self.number_of_tabs = self.config.get('num_tabs', 1)
self.xvfb_display = self.config.get('xvfb_display', None)
self.search_param_values = self._get_search_param_values()
self.user_agent = random_user_agent()
# get the base search url based on the search engine.
self.base_search_url = get_base_search_url_by_search_engine(self.config, self.search_engine_name, self.scrape_method)
super().instance_creation_info(self.__class__.__name__)
def switch_to_tab(self, tab_number):
"""Switch to tab identified by tab_number
https://stackoverflow.com/questions/46425797/opening-link-in-the-new-tab-and-switching-between-tabs-selenium-webdriver-pyt
https://gist.github.com/lrhache/7686903
"""
assert tab_number < self.number_of_tabs
first_link = first_result.find_element_by_tag_name('a')
# Save the window opener (current window, do not mistaken with tab... not the same)
main_window = browser.current_window_handle
# Open the link in a new tab by sending key strokes on the element
# Use: Keys.CONTROL + Keys.SHIFT + Keys.RETURN to open tab on top of the stack
first_link.send_keys(Keys.CONTROL + Keys.RETURN)
# Switch tab to the new tab, which we will assume is the next one on the right
browser.find_element_by_tag_name('body').send_keys(Keys.CONTROL + Keys.TAB)
# Put focus on current window which will, in fact, put focus on the current visible tab
browser.switch_to_window(main_window)
# do whatever you have to do on this page, we will just got to sleep for now
sleep(2)
# Close current tab
browser.find_element_by_tag_name('body').send_keys(Keys.CONTROL + 'w')
# Put focus on current window which will be the window opener
browser.switch_to_window(main_window)
def set_proxy(self):
"""Install a proxy on the communication channel."""
def switch_proxy(self, proxy):
"""Switch the proxy on the communication channel."""
def proxy_check(self, proxy):
assert self.proxy and self.webdriver, 'Scraper instance needs valid webdriver and proxy instance to make the proxy check'
online = False
status = 'Proxy check failed: {host}:{port} is not used while requesting'.format(**self.proxy.__dict__)
ipinfo = {}
try:
self.webdriver.get(self.config.get('proxy_info_url'))
try:
text = re.search(r'(\{.*?\})', self.webdriver.page_source, flags=re.DOTALL).group(0)
ipinfo = json.loads(text)
except ValueError as v:
logger.critical(v)
except Exception as e:
status = str(e)
if 'ip' in ipinfo and ipinfo['ip']:
online = True
status = 'Proxy is working.'
else:
logger.warning(status)
super().update_proxy_status(status, ipinfo, online)
return online
def _save_debug_screenshot(self):
"""
Saves a debug screenshot of the browser window to figure
out what went wrong.
"""
tempdir = tempfile.gettempdir()
location = os.path.join(tempdir, '{}_{}_debug_screenshot.png'.format(self.search_engine_name, self.browser_type))
self.webdriver.get_screenshot_as_file(location)
def _set_xvfb_display(self):
# TODO: should we check the format of the config?
if self.xvfb_display:
os.environ['DISPLAY'] = self.xvfb_display
def _get_webdriver(self):
"""Return a webdriver instance and set it up with the according profile/ proxies.
https://stackoverflow.com/questions/49162667/unknown-error-call-function-result-missing-value-for-selenium-send-keys-even
Get Chrome Drivers here: https://chromedriver.storage.googleapis.com/index.html?path=2.41/
Returns:
The appropriate webdriver mode according to self.browser_type. If no webdriver mode
could be found, return False.
"""
if self.browser_type == 'chrome':
return self._get_Chrome()
elif self.browser_type == 'firefox':
return self._get_Firefox()
return False
def _get_Chrome(self):
try:
chrome_options = webdriver.ChromeOptions()
chrome_options.binary_location = ""
# save resouces, options are experimental
# See here:
# https://news.ycombinator.com/item?id=14103503
# https://stackoverflow.com/questions/49008008/chrome-headless-puppeteer-too-much-cpu
# https://engineering.21buttons.com/crawling-thousands-of-products-using-aws-lambda-80332e259de1
chrome_options.add_argument("test-type")
chrome_options.add_argument('--js-flags="--expose-gc --max-old-space-size=500"')
chrome_options.add_argument(
'user-agent={}'.format(self.user_agent))
chrome_options.add_argument('--enable-precise-memory-info')
chrome_options.add_argument('--disable-default-apps')
chrome_options.add_argument('--disable-extensions')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--incognito')
chrome_options.add_argument('--disable-application-cache')
if self.browser_mode == 'headless':
chrome_options.add_argument('headless')
#chrome_options.add_argument('window-size=1200x600') # optional
if self.proxy:
chrome_options.add_argument(
'--proxy-server={}://{}:{}'.format(self.proxy.proto, self.proxy.host, self.proxy.port))
chromedriver_path = self.config.get('chromedriver_path')
self.webdriver = webdriver.Chrome(executable_path=chromedriver_path,
chrome_options=chrome_options)
return True
except WebDriverException as e:
# we don't have a chrome executable or a chrome webdriver installed
raise
return False
def _get_Firefox(self):
try:
bin_path = self.config.get('firefox_binary_path')
binary = FirefoxBinary(bin_path)
geckodriver_path = self.config.get('geckodriver_path')
options = FirefoxOptions()
profile = webdriver.FirefoxProfile()
options.add_argument(
'user-agent={}'.format(self.user_agent))
if self.browser_mode == 'headless':
options.set_headless(headless=True)
#options.add_argument('window-size=1200x600') # optional
if self.proxy:
# this means that the proxy is user set, regardless of the type
profile.set_preference("network.proxy.type", 1)
if self.proxy.proto.lower().startswith('socks'):
profile.set_preference("network.proxy.socks", self.proxy.host)
profile.set_preference("network.proxy.socks_port", self.proxy.port)
profile.set_preference("network.proxy.socks_version", 5 if self.proxy.proto[-1] == '5' else 4)
profile.update_preferences()
elif self.proxy.proto == 'http':
profile.set_preference("network.proxy.http", self.proxy.host)
profile.set_preference("network.proxy.http_port", self.proxy.port)
else:
raise ValueError('Invalid protocol given in proxyfile.')
profile.update_preferences()
self.webdriver = webdriver.Firefox(firefox_binary=binary, firefox_options=options,
executable_path=geckodriver_path, firefox_profile=profile)
return True
except WebDriverException as e:
# reaching here is bad, since we have no available webdriver instance.
logger.error(e)
return False
def malicious_request_detected(self):
"""Checks whether a malicious request was detected.
"""
needles = self.malicious_request_needles[self.search_engine_name]
return needles and needles['inurl'] in self.webdriver.current_url \
and needles['inhtml'] in self.webdriver.page_source
def handle_request_denied(self):
"""Checks whether Google detected a potentially harmful request.
Whenever such potential abuse is detected, Google shows an captcha.
This method just blocks as long as someone entered the captcha in the browser window.
When the window is not visible (For example when using chrome headless), this method
makes a png from the html code and shows it to the user, which should enter it in a command
line.
Returns:
The search input field.
Raises:
MaliciousRequestDetected when there was not way to stp Google From denying our requests.
"""
# selenium webdriver objects have no status code :/
if self.malicious_request_detected():
super().handle_request_denied('400')
# only solve when in non headless mode
if self.config.get('manual_captcha_solving', False) and self.config.get('browser_mode') != 'headless':
with self.captcha_lock:
solution = input('Please solve the captcha in the browser! Enter any key when done...')
try:
self.search_input = WebDriverWait(self.webdriver, 7).until(
EC.visibility_of_element_located(self._get_search_input_field()))
except TimeoutException:
raise MaliciousRequestDetected('Requesting with this IP address or cookies is not possible at the moment.')
elif self.config.get('captcha_solving_service', False):
# implement request to manual captcha solving service such
# as https://2captcha.com/
pass
else:
# Just wait until the user solves the captcha in the browser window
# 10 hours if needed :D
logger.info('Waiting for user to solve captcha')
return self._wait_until_search_input_field_appears(10 * 60 * 60)
def _get_search_param_values(self):
search_param_values = {}
if self.search_engine_name in self.search_params:
for param_key in self.search_params[self.search_engine_name]:
cfg = self.config.get(param_key, None)
if cfg:
search_param_values[param_key] = cfg
return search_param_values
def _get_search_input_field(self):
"""Get the search input field for the current search_engine.
Returns:
A tuple to locate the search field as used by seleniums function presence_of_element_located()
"""
return self.input_field_selectors[self.search_engine_name]
def _get_search_param_fields(self):
if self.search_engine_name in self.param_field_selectors:
return self.param_field_selectors[self.search_engine_name]
else:
return {}
def _wait_until_search_input_field_appears(self, max_wait=5):
"""Waits until the search input field can be located for the current search engine
Args:
max_wait: How long to wait maximally before returning False.
Returns: False if the search input field could not be located within the time
or the handle to the search input field.
"""
def find_visible_search_input(driver):
input_field = driver.find_element(*self._get_search_input_field())
return input_field
try:
search_input = WebDriverWait(self.webdriver, max_wait).until(find_visible_search_input)
return search_input
except TimeoutException as e:
logger.error('{}: TimeoutException waiting for search input field: {}'.format(self.name, e))
return False
def _wait_until_search_param_fields_appears(self, max_wait=5):
"""Waits until the search input field contains the query.
Args:
max_wait: How long to wait maximally before returning False.
"""
def find_visible_search_param(driver):
for param, field in self._get_search_param_fields().items():
input_field = driver.find_element(*field)
if not input_field:
return False
return True
try:
fields = WebDriverWait(self.webdriver, max_wait).until(find_visible_search_param)
return fields
except TimeoutException as e:
logger.error('{}: TimeoutException waiting for search param field: {}'.format(self.name, e))
return False
def _goto_next_page(self):
"""
Click the next page element,
Returns:
The url of the next page or False if there is no such url
(end of available pages for instance).
"""
next_url = ''
element = self._find_next_page_element()
if element and hasattr(element, 'click'):
next_url = element.get_attribute('href')
try:
element.click()
except WebDriverException:
# See http://stackoverflow.com/questions/11908249/debugging-element-is-not-clickable-at-point-error
# first move mouse to the next element, some times the element is not visibility, like blekko.com
selector = self.next_page_selectors[self.search_engine_name]
if selector:
try:
next_element = WebDriverWait(self.webdriver, 5).until(
EC.presence_of_element_located((By.CSS_SELECTOR, selector)))
webdriver.ActionChains(self.webdriver).move_to_element(next_element).perform()
# wait until the next page link emerges
WebDriverWait(self.webdriver, 8).until(
EC.visibility_of_element_located((By.CSS_SELECTOR, selector)))
element = self.webdriver.find_element_by_css_selector(selector)
next_url = element.get_attribute('href')
element.click()
except WebDriverException:
pass
# wait until the next page was loaded
if not next_url:
return False
else:
return next_url
def _find_next_page_element(self):
"""Finds the element that locates the next page for any search engine.
Returns:
The element that needs to be clicked to get to the next page or a boolean value to
indicate an error condition.
"""
if self.search_type == 'normal':
selector = self.next_page_selectors[self.search_engine_name]
try:
# wait until the next page link is clickable
WebDriverWait(self.webdriver, 5).until(EC.element_to_be_clickable((By.CSS_SELECTOR, selector)))
except (WebDriverException, TimeoutException) as e:
# If we can't detect the next page element because there is no
# next page (for example because the search query is to unique)
# we need to return false
self._save_debug_screenshot()
logger.warning('{}: Cannot locate next page element: {}'.format(self.name, str(e)))
return False
return self.webdriver.find_element_by_css_selector(selector)
elif self.search_type == 'image':
self.page_down()
return True
def wait_until_serp_loaded(self):
"""
This method tries to wait until the page requested is loaded.
We know that the correct page is loaded when self.page_number appears
in the navigation of the page.
"""
if self.search_type == 'normal':
if self.search_engine_name == 'google':
selector = '#navcnt td.cur'
elif self.search_engine_name == 'yandex':
selector = '.pager__item_current_yes'
elif self.search_engine_name == 'bing':
selector = 'nav li a.sb_pagS'
elif self.search_engine_name == 'yahoo':
selector = '.compPagination strong'
elif self.search_engine_name == 'baidu':
selector = '#page .fk_cur + .pc'
elif self.search_engine_name == 'duckduckgo':
# no pagination in duckduckgo
pass
elif self.search_engine_name == 'ask':
selector = '#paging .pgcsel .pg'
if self.search_engine_name == 'duckduckgo':
time.sleep(1.5)
else:
try:
WebDriverWait(self.webdriver, 5).\
until(EC.text_to_be_present_in_element((By.CSS_SELECTOR, selector), str(self.page_number)))
except TimeoutException as e:
self._save_debug_screenshot()
logger.warning('Pagenumber={} did not appear in serp. Maybe there is only one result for this query?'.format(self.page_number))
elif self.search_type == 'image':
self.wait_until_title_contains_keyword()
else:
self.wait_until_title_contains_keyword()
def wait_until_title_contains_keyword(self):
try:
WebDriverWait(self.webdriver, 5).until(EC.title_contains(self.query))
except TimeoutException:
logger.debug(SeleniumSearchError(
'{}: Keyword "{}" not found in title: {}'.format(self.name, self.query, self.webdriver.title)))
def build_search(self):
"""Build the search for SelScrapers"""
assert self.webdriver, 'Webdriver needs to be ready to build the search'
if self.config.get('search_type', 'normal') == 'image':
starting_url = self.image_search_locations[self.search_engine_name]
else:
starting_url = self.base_search_url
num_results = self.config.get('num_results_per_page', 10)
if self.search_engine_name == 'google':
if num_results not in (10, 20, 30, 50, 100):
raise Exception('num_results_per_page for selenium mode and search engine Google must be in (10, 20, 30, 50, 100)')
starting_url += 'num={}'.format(num_results)
elif self.search_engine_name == 'bing':
if num_results not in range(1, 100):
raise Exception('num_results_per_page for selenium mode and search engine Bing must be in range(1, 100)')
starting_url += 'count={}'.format(num_results)
elif self.search_engine_name == 'yahoo':
if num_results not in range(1, 100):
raise Exception('num_results_per_page for selenium mode and search engine Yahoo must be in range(1, 100)')
starting_url += 'n={}'.format(num_results)
self.webdriver.get(starting_url)
def search(self):
"""Search with webdriver.
Fills out the search form of the search engine for each keyword.
Clicks the next link while pages_per_keyword is not reached.
"""
for self.query, self.pages_per_keyword in self.jobs.items():
self.search_input = self._wait_until_search_input_field_appears()
if self.search_input is False and self.config.get('stop_on_detection'):
self.status = 'Malicious request detected'
return
# check if request denied
self.handle_request_denied()
if self.search_input:
self.search_input.clear()
time.sleep(.25)
self.search_param_fields = self._get_search_param_fields()
if self.search_param_fields:
wait_res = self._wait_until_search_param_fields_appears()
if wait_res is False:
raise Exception('Waiting search param input fields time exceeds')
for param, field in self.search_param_fields.items():
if field[0] == By.ID:
js_tpl = '''
var field = document.getElementById("%s");
field.setAttribute("value", "%s");
'''
elif field[0] == By.NAME:
js_tpl = '''
var fields = document.getElementsByName("%s");
for (var f in fields) {
f.setAttribute("value", "%s");
}
'''
js_str = js_tpl % (field[1], self.search_param_values[param])
self.webdriver.execute_script(js_str)
try:
self.search_input.send_keys(self.query + Keys.ENTER)
except ElementNotVisibleException:
time.sleep(2)
self.search_input.send_keys(self.query + Keys.ENTER)
self.requested_at = datetime.datetime.utcnow()
else:
logger.debug('{}: Cannot get handle to the input form for keyword {}.'.format(self.name, self.query))
continue
super().detection_prevention_sleep()
super().keyword_info()
for self.page_number in self.pages_per_keyword:
self.wait_until_serp_loaded()
try:
self.html = self.webdriver.execute_script('return document.body.innerHTML;')
except WebDriverException as e:
self.html = self.webdriver.page_source
super().after_search()
# Click the next page link not when leaving the loop
# in the next iteration.
if self.page_number in self.pages_per_keyword:
next_url = self._goto_next_page()
self.requested_at = datetime.datetime.utcnow()
if not next_url:
break
def page_down(self):
"""Scrolls down a page with javascript.
Used for next page in image search mode or when the
next results are obtained by scrolling down a page.
"""
js = '''
var w = window,
d = document,
e = d.documentElement,
g = d.getElementsByTagName('body')[0],
y = w.innerHeight|| e.clientHeight|| g.clientHeight;
window.scrollBy(0,y);
return y;
'''
self.webdriver.execute_script(js)
def run(self):
"""Run the SelScraper."""
self._set_xvfb_display()
if not self._get_webdriver():
raise Exception('{}: Aborting: No available selenium webdriver.'.format(self.name))
try:
self.webdriver.set_window_size(400, 400)
self.webdriver.set_window_position(400 * (self.browser_num % 4), 400 * (math.floor(self.browser_num // 4)))
except WebDriverException as e:
logger.debug('Cannot set window size: {}'.format(e))
super().before_search()
if self.startable:
self.build_search()
self.search()
if self.webdriver:
self.webdriver.quit()
"""
For most search engines, the normal SelScrape works perfectly, but sometimes
the scraping logic is different for other search engines.
Duckduckgo loads new results on the fly (via ajax) and doesn't support any "next page"
link. Other search engines like gekko.com have a completely different SERP page format.
That's why we need to inherit from SelScrape for specific logic that only applies for the given
search engine.
The following functionality may differ in particular:
- _goto_next_page()
- _get_search_input()
- _wait_until_search_input_field_appears()
- _handle_request_denied()
- wait_until_serp_loaded()
"""
class GoogleSelScrape(SelScrape):
"""
Add Google Settings via this subclass.
"""
def __init__(self, *args, **kwargs):
SelScrape.__init__(self, *args, **kwargs)
self.largest_id = 0
def build_search(self):
"""
Specify google page settings according to config.
Doing this automatically often provocates a captcha question.
This is highly sensitive.
"""
super().build_search()
if self.config.get('google_selenium_search_settings', False):
# assume we are on the normal google search page right now
self.webdriver.get('https://www.google.com/preferences?hl=en')
time.sleep(random.randint(1,4))
if self.config.get('google_selenium_manual_settings', False):
return input('Press any Key after search settings completed...')
oldsize = self.webdriver.get_window_size()
self.webdriver.maximize_window()
# wait until we see the settings
element = WebDriverWait(self.webdriver, 7).until(EC.presence_of_element_located((By.NAME, 'safeui')))
try:
if self.config.get('google_selenium_safe_search', False):
if self.webdriver.find_element_by_name('safeui').get_attribute('value') != 'on':
self.webdriver.find_element_by_name('safeui').click()
try:
if self.config.get('google_selenium_personalization', False):
self.webdriver.find_element_by_css_selector('#pson-radio > div:first-child').click()
else:
self.webdriver.find_element_by_css_selector('#pson-radio > div:nth-child(2)').click()
except WebDriverException as e:
logger.warning('Cannot set personalization settings.')
time.sleep(random.randint(1,4))
# set the region
try:
self.webdriver.find_element_by_id('regionanchormore').click()
except WebDriverException as e:
logger.warning('Regions probably already expanded.')
try:
region = self.config.get('google_selenium_region', 'US')
self.webdriver.find_element_by_css_selector('div[data-value="{}"]'.format(region)).click()
except WebDriverException as e:
logger.warning('Cannot set region settings.')
# set the number of results
try:
num_results = self.config.get('google_selenium_num_results', 10)
self.webdriver.find_element_by_id('result_slider').click()
# reset
for i in range(5):
self.webdriver.find_element_by_id('result_slider').send_keys(Keys.LEFT)
# move to desicred result
for i in range((num_results//10)-1):
time.sleep(.25)
self.webdriver.find_element_by_id('result_slider').send_keys(Keys.RIGHT)
except WebDriverException as e:
logger.warning('Cannot set number of results settings.')
time.sleep(random.randint(1,4))
# save settings
self.webdriver.find_element_by_css_selector('#form-buttons div:first-child').click()
time.sleep(1)
# accept alert
self.webdriver.switch_to.alert.accept()
time.sleep(random.randint(1,4))
self.handle_request_denied()
except WebDriverException as e:
logger.error('Unable to set google page settings')
wait = input('waiting...')
raise e
driver.set_window_size(oldsize['width'], oldsize['height'])
class DuckduckgoSelScrape(SelScrape):
"""
Duckduckgo is a little special since new results are obtained by ajax.
next page thus is then to scroll down.
It cannot be the User-Agent, because I already tried this.
"""
def __init__(self, *args, **kwargs):
SelScrape.__init__(self, *args, **kwargs)
self.largest_id = 0
def _goto_next_page(self):
super().page_down()
return 'No more results' not in self.html
def wait_until_serp_loaded(self):
super()._wait_until_search_input_field_appears()
class BlekkoSelScrape(SelScrape):
def __init__(self, *args, **kwargs):
SelScrape.__init__(self, *args, **kwargs)
def _goto_next_page(self):
pass
class AskSelScrape(SelScrape):
def __init__(self, *args, **kwargs):
SelScrape.__init__(self, *args, **kwargs)
def wait_until_serp_loaded(self):
def wait_until_keyword_in_url(driver):
try:
return quote(self.query) in driver.current_url or \
self.query.replace(' ', '+') in driver.current_url
except WebDriverException:
pass
WebDriverWait(self.webdriver, 5).until(wait_until_keyword_in_url)
| 36,591 | 37.845011 | 147 | py |
GoogleScraper | GoogleScraper-master/GoogleScraper/caching.py | # -*- coding: utf-8 -*-
import os
import time
import hashlib
import gzip
import bz2
import re
from sqlalchemy.orm.exc import NoResultFound
from GoogleScraper.database import SearchEngineResultsPage
from GoogleScraper.parsing import parse_serp
from GoogleScraper.output_converter import store_serp_result
import logging
"""
GoogleScraper is a complex application and thus searching is error prone. While developing,
you may need to repeat the same searches several times and you might end up being banned by
the search engine providers. This is why all searches are chached by default.
Every SERP page is cached in a separate file. In the future, it might be more straightforward to
cache scraping jobs in archives (zip files).
What determines the uniqueness of a SERP result?
- The complete url (because in URLs search queries and params are included)
- The scrape mode: Raw Http might request different resources than a browser.
- Optionally the http headers (because different User-Agents yield different results)
Using these three pieces of information would guarantee that we cache only unique requests,
but then we couldn't read back the information of the cache files, since these parameters
are only available at runtime of the scrapers. So we have to be satisfied with the
keyword, search_engine and scrapemode as identifying params.
How does caching work on a higher level?
Assume the user interrupted his scrape job at 1000/2000 keywords and there remain
quite some keywords to scrape for. Then the previously parsed 1000 results are already
stored in the database and shouldn't be added a second time.
"""
logger = logging.getLogger(__name__)
ALLOWED_COMPRESSION_ALGORITHMS = ('gz', 'bz2')
class InvalidConfigurationFileException(Exception):
"""
Used when the cache module cannot
determine the kind (compression for instance) of a
configuration file
"""
pass
class CompressedFile(object):
"""Read and write the data of a compressed file.
Used to cache files for GoogleScraper.s
Supported algorithms: gz, bz2
>>> import os
>>> f = CompressedFile('/tmp/test.txt', algorithm='gz')
>>> f.write('hello world')
>>> assert os.path.exists('/tmp/test.txt.gz')
>>> f2 = CompressedFile('/tmp/test.txt.gz', algorithm='gz')
>>> assert f2.read() == 'hello world'
"""
def __init__(self, path, algorithm='gz'):
"""Create a new compressed file to read and write data to.
Args:
algorithm: Which algorithm to use.
path: A valid file path to the file to read/write. Depends
on the action called.
@todo: it would be a better approach to pass an Algorithm object instead of a string
"""
self.algorithm = algorithm
assert self.algorithm in ALLOWED_COMPRESSION_ALGORITHMS, \
'{algo} is not an supported compression algorithm'.format(algo=self.algorithm)
if path.endswith(self.algorithm):
self.path = path
else:
self.path = '{path}.{ext}'.format(path=path, ext=algorithm)
self.readers = {
'gz': self.read_gz,
'bz2': self.read_bz2
}
self.writers = {
'gz': self.write_gz,
'bz2': self.write_bz2
}
def read_gz(self):
with gzip.open(self.path, 'rb') as f:
return f.read().decode()
def read_bz2(self):
with bz2.open(self.path, 'rb') as f:
return f.read().decode()
def write_gz(self, data):
with gzip.open(self.path, 'wb') as f:
f.write(data)
def write_bz2(self, data):
with bz2.open(self.path, 'wb') as f:
f.write(data)
def read(self):
assert os.path.exists(self.path)
return self.readers[self.algorithm]()
def write(self, data):
if not isinstance(data, bytes):
data = data.encode()
return self.writers[self.algorithm](data)
class CacheManager():
"""
Manages caching for GoogleScraper.
"""
def __init__(self, config):
self.config = config
self.maybe_create_cache_dir()
def maybe_create_cache_dir(self):
if self.config.get('do_caching', True):
cd = self.config.get('cachedir', '.scrapecache')
if not os.path.exists(cd):
os.mkdir(cd)
def maybe_clean_cache(self):
"""
Clean the cache.
Clean all cached searches (the obtained html code) in the cache directory iff
the respective files are older than specified in the configuration. Defaults to 12 hours.
"""
cachedir = self.config.get('cachedir', '.scrapecache')
if os.path.exists(cachedir):
for fname in os.listdir(cachedir):
path = os.path.join(cachedir, fname)
if time.time() > os.path.getmtime(path) + (60 * 60 * int(self.config.get('clean_cache_after', 48))):
# Remove the whole directory if necessary
if os.path.isdir(path):
import shutil
shutil.rmtree(path)
else:
os.remove(os.path.join(cachedir, fname))
def cached_file_name(self, keyword, search_engine, scrape_mode, page_number):
"""Make a unique file name from the search engine search request.
Important! The order of the sequence is darn important! If search queries have the same
words but in a different order, they are unique searches.
Args:
keyword: The keyword that was used in the search.
search_engine: The search engine the keyword was scraped for.
scrapemode: The scrapemode that was used.
page_number: The number of the SERP page.
Returns:
A unique file name based on the parameters of the search request.
"""
assert isinstance(keyword, str), 'Keyword {} must be a string'.format(keyword)
assert isinstance(search_engine, str), 'Search engine {} must be a string'.format(search_engine)
assert isinstance(scrape_mode, str), 'Scrapemode {} needs to be a string'.format(scrape_mode)
assert isinstance(page_number, int), 'Page_number {} needs to be an int'.format(page_number)
unique = [keyword, search_engine, scrape_mode, page_number]
sha = hashlib.sha256()
sha.update(b''.join(str(s).encode() for s in unique))
return '{file_name}.{extension}'.format(file_name=sha.hexdigest(), extension='cache')
def get_cached(self, keyword, search_engine, scrapemode, page_number):
"""Loads a cached SERP result.
Args:
keyword: The keyword that was used in the search.
search_engine: The search engine the keyword was scraped for.
scrapemode: The scrapemode that was used.
page_number: page_number
Returns:
The contents of the HTML that was shipped while searching. False if there couldn't
be found a file based on the above params.
"""
if self.config.get('do_caching', False):
fname = self.cached_file_name(keyword, search_engine, scrapemode, page_number)
cdir = self.config.get('cachedir', '.scrapecache')
if fname in os.listdir(cdir):
# If the cached file is older than 12 hours, return False and thus
# make a new fresh request.
try:
modtime = os.path.getmtime(os.path.join(cdir, fname))
except FileNotFoundError:
return False
if (time.time() - modtime) / 60 / 60 > int(self.config('clean_cache_after', 48)):
return False
path = os.path.join(cdir, fname)
return self.read_cached_file(path)
else:
return False
def read_cached_file(self, path):
"""Read a compressed or uncompressed file.
The compressing schema is determined by the file extension. For example
a file that ends with .gz needs to be gunzipped.
Supported algorithms:
gzip and bzip2
Args:
path: The path to the cached file.
Returns:
The data of the cached file as a string.
Raises:
InvalidConfigurationFileException: When the type of the cached file
cannot be determined.
"""
if self.config.get('do_caching', False):
ext = path.split('.')[-1]
# The path needs to have an extension in any case.
# When uncompressed, ext is 'cache', else it is the
# compressing scheme file ending like .gz or .bz2 ...
assert ext in ALLOWED_COMPRESSION_ALGORITHMS or ext == 'cache', 'Invalid extension: {}'.format(ext)
if ext == 'cache':
with open(path, 'r') as fd:
try:
data = fd.read()
return data
except UnicodeDecodeError as e:
logger.warning(str(e))
# If we get this error, the cache files are probably
# compressed but the 'compress_cached_files' flag was
# set to False. Try to decompress them, but this may
# lead to a infinite recursion. This isn't proper coding,
# but convenient for the end user.
self.config['compress_cached_files'] = True
elif ext in ALLOWED_COMPRESSION_ALGORITHMS:
f = CompressedFile(path)
return f.read()
else:
raise InvalidConfigurationFileException('"{}" is a invalid configuration file.'.format(path))
def cache_results(self, parser, query, search_engine, scrape_mode, page_number, db_lock=None):
"""Stores the html of an parser in a file.
The file name is determined by the parameters query, search_engine, scrape_mode and page_number.
See cached_file_name() for more information.
This will always write(overwrite) the cached file. If compress_cached_files is
True, the page is written in bytes (obviously).
Args:
parser: A parser with the data to cache.
query: The keyword that was used in the search.
search_engine: The search engine the keyword was scraped for.
scrape_mode: The scrapemode that was used.
page_number: The page number that the serp page is.
db_lock: If an db_lock is given, all action are wrapped in this lock.
"""
if self.config.get('do_caching', False):
if db_lock:
db_lock.acquire()
if self.config.get('minimize_caching_files', True):
html = parser.cleaned_html
else:
html = parser.html
fname = self.cached_file_name(query, search_engine, scrape_mode, page_number)
cachedir = self.config.get('cachedir', '.scrapecache')
path = os.path.join(cachedir, fname)
if self.config.get('compress_cached_files'):
algorithm = self.config.get('compressing_algorithm', 'gz')
f = CompressedFile(path, algorithm=algorithm)
f.write(html)
else:
with open(path, 'w') as fd:
if isinstance(html, bytes):
fd.write(html.decode())
else:
fd.write(html)
if db_lock:
db_lock.release()
def _get_all_cache_files(self):
"""Return all files found in the cachedir.
Returns:
All files that have the string "cache" in it within the cache directory.
Files are either uncompressed filename.cache or are compressed with a
compression algorithm: "filename.cache.zip"
"""
files = set()
for dirpath, dirname, filenames in os.walk(self.config.get('cachedir', '.scrapecache')):
for name in filenames:
if 'cache' in name:
files.add(os.path.join(dirpath, name))
return files
def _caching_is_one_to_one(self, keywords, search_engine, scrapemode, page_number):
"""Check whether all keywords map to a unique file name.
Args:
keywords: All keywords for which to check the uniqueness of the hash
search_engine: The search engine the keyword was scraped for.
scrapemode: The scrapemode that was used.
page_number: page_number
Returns:
True if all keywords map to a unique hash and False if not.
"""
mappings = {}
for kw in keywords:
file_hash = self.cached_file_name(kw, search_engine, scrapemode, page_number)
if file_hash not in mappings:
mappings.update({file_hash: [kw, ]})
else:
mappings[file_hash].append(kw)
duplicates = [v for k, v in mappings.items() if len(v) > 1]
if duplicates:
logger.info('Not one-to-one. {}'.format(duplicates))
return False
else:
logger.info('one-to-one')
return True
def parse_all_cached_files(self, scrape_jobs, session, scraper_search):
"""Walk recursively through the cachedir (as given by the Config) and parse all cached files.
Args:
session: An sql alchemy session to add the entities
scraper_search: Abstract object representing the current search.
Returns:
The scrape jobs that couldn't be parsed from the cache directory.
"""
files = self._get_all_cache_files()
num_cached = num_total = 0
mapping = {}
for job in scrape_jobs:
cache_name = self.cached_file_name(
job['query'],
job['search_engine'],
job['scrape_method'],
job['page_number']
)
mapping[cache_name] = job
num_total += 1
for path in files:
# strip of the extension of the path if it has eny
fname = os.path.split(path)[1]
clean_filename = fname
for ext in ALLOWED_COMPRESSION_ALGORITHMS:
if fname.endswith(ext):
clean_filename = fname.rstrip('.' + ext)
job = mapping.get(clean_filename, None)
if job:
# We found a file that contains the keyword, search engine name and
# search mode that fits our description. Let's see if there is already
# an record in the database and link it to our new ScraperSearch object.
serp = self.get_serp_from_database(session, job['query'], job['search_engine'], job['scrape_method'],
job['page_number'])
# if no serp was found or the serp has no results
# parse again
if not serp or (serp and len(serp.links) <= 0):
serp = self.parse_again(fname, job['search_engine'], job['scrape_method'], job['query'])
serp.scraper_searches.append(scraper_search)
session.add(serp)
if num_cached % 200 == 0:
session.commit()
store_serp_result(serp, self.config)
num_cached += 1
scrape_jobs.remove(job)
logger.info('{} cache files found in {}'.format(len(files), self.config.get('cachedir')))
logger.info('{}/{} objects have been read from the cache. {} remain to get scraped.'.format(
num_cached, num_total, num_total - num_cached))
session.add(scraper_search)
session.commit()
return scrape_jobs
def parse_again(self, fname, search_engine, scrape_method, query):
"""
@todo: `scrape_method` is not used here -> check if scrape_method is passed to this function and remove it
"""
path = os.path.join(self.config.get('cachedir', '.scrapecache'), fname)
html = self.read_cached_file(path)
return parse_serp(
self.config,
html=html,
search_engine=search_engine,
query=query
)
def get_serp_from_database(self, session, query, search_engine, scrape_method, page_number):
try:
serp = session.query(SearchEngineResultsPage).filter(
SearchEngineResultsPage.query == query,
SearchEngineResultsPage.search_engine_name == search_engine,
SearchEngineResultsPage.scrape_method == scrape_method,
SearchEngineResultsPage.page_number == page_number).first()
return serp
except NoResultFound:
# that shouldn't happen
# we have a cache file that matches the above identifying information
# but it was never stored to the database.
return False
def clean_cachefiles(self):
"""Clean silly html from all cachefiles in the cachdir"""
if input(
'Do you really want to strip all cache files from bloating tags such as <script> and <style>? ').startswith(
'y'):
import lxml.html
from lxml.html.clean import Cleaner
cleaner = Cleaner()
cleaner.style = True
cleaner.scripts = True
cleaner.javascript = True
for file in self._get_all_cache_files():
cfile = CompressedFile(file)
data = cfile.read()
cleaned = lxml.html.tostring(cleaner.clean_html(lxml.html.fromstring(data)))
cfile.write(cleaned)
logger.info('Cleaned {}. Size before: {}, after {}'.format(file, len(data), len(cleaned)))
def fix_broken_cache_names(self, url, search_engine, scrapemode, page_number):
"""Fix broken cache names.
Args:
url: A list of strings to add to each cached_file_name() call.
@todo: `url` is not used here -> check if scrape_method is passed to this function and remove it
"""
files = self._get_all_cache_files()
logger.debug('{} cache files found in {}'.format(len(files), self.config.get('cachedir', '.scrapecache')))
r = re.compile(r'<title>(?P<kw>.*?) - Google Search</title>')
i = 0
for path in files:
fname = os.path.split(path)[1].strip()
data = self.read_cached_file(path)
infilekws = r.search(data).group('kw')
realname = self.cached_file_name(infilekws, search_engine, scrapemode, page_number)
if fname != realname:
logger.debug('The search query in the title element in file {} differ from that hash of its name. Fixing...'.format(path))
src = os.path.abspath(path)
dst = os.path.abspath(os.path.join(os.path.split(path)[0], realname))
logger.debug('Renamed from {} => {}'.format(src, dst))
os.rename(src, dst)
i += 1
logger.debug('Renamed {} files.'.format(i))
def cached(self, f, attr_to_cache=None):
"""Decorator that makes return value of functions cachable.
Any function that returns a value and that is decorated with
cached will be supplied with the previously calculated result of
an earlier call. The parameter name with the cached value may
be set with attr_to_cache.
Args:
attr_to_cache: The name of attribute whose data
is cachable.
Returns: The modified and wrapped function.
@todo: `attr_to_cache` is not used here -> check if scrape_method is passed to this function and remove it
"""
def wraps(*args, **kwargs):
cached_value = self.get_cached(*args, params=kwargs)
if cached_value:
f(*args, attr_to_cache=cached_value, **kwargs)
else:
# Nothing was cached for this attribute
value = f(*args, attr_to_cache=None, **kwargs)
self.cache_results(value, *args, params=kwargs)
return wraps
if __name__ == '__main__':
import doctest
doctest.testmod()
| 20,538 | 37.035185 | 138 | py |
GoogleScraper | GoogleScraper-master/Tests/integration_tests.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import unittest
from GoogleScraper import scrape_with_config
from GoogleScraper.parsing import get_parser_by_search_engine
from GoogleScraper.config import get_config
from collections import Counter
config = get_config()
base = os.path.dirname(os.path.realpath(__file__))
all_search_engines = config.get('supported_search_engines')
class GoogleScraperIntegrationTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
### Test (very) static parsing for all search engines. The html files are saved in 'data/uncompressed_serp_pages/'
# The sample files may become old and the SERP format may change over time. But this is the only
# way to assert that a certain url or piece must be in the results.
# If the SERP format changes, update accordingly (after all, this shouldn't happen that often).
def get_parser_for_file(self, se, file, **kwargs):
file = os.path.join(base, file)
with open(file, 'r') as f:
html = f.read()
parser = get_parser_by_search_engine(se)
parser = parser(config, html, **kwargs)
return parser
def assert_around_10_results_with_snippets(self, parser, delta=4):
self.assertAlmostEqual(
len([v['snippet'] for v in parser.search_results['results'] if v['snippet'] is not None]), 10, delta=delta)
def assert_atleast90percent_of_items_are_not_None(self, parser, exclude_keys={'snippet'}):
for result_type, res in parser.search_results.items():
c = Counter()
for item in res:
for key, value in item.items():
if value is None:
c[key] += 1
for key, value in c.items():
if key not in exclude_keys:
assert (len(res) / int(value)) >= 9, key + ' has too many times a None value: ' + '{}/{}'.format(
int(value), len(res))
def test_parse_google(self):
parser = self.get_parser_for_file('google', 'data/uncompressed_serp_pages/abrakadabra_google_de_ip.html')
assert '232.000.000 Ergebnisse' in parser.num_results_for_query
assert len(parser.search_results['results']) == 12, len(parser.search_results)
assert all([v['visible_link'] for v in parser.search_results['results']])
assert all([v['link'] for v in parser.search_results['results']])
self.assert_around_10_results_with_snippets(parser)
assert any(['www.extremnews.com' in v['visible_link'] for v in parser.search_results[
'results']]), 'Theres a link in this serp page with visible url "www.extremnews.com"'
assert any(
['er Noise-Rock-Band Sonic Youth und wurde' in v['snippet'] for v in parser.search_results['results'] if
v['snippet']]), 'Specific string not found in snippet.'
self.assert_atleast90percent_of_items_are_not_None(parser)
def test_parse_bing(self):
parser = self.get_parser_for_file('bing', 'data/uncompressed_serp_pages/hello_bing_de_ip.html')
assert '16.900.000 results' == parser.num_results_for_query
assert len(parser.search_results['results']) == 12, len(parser.search_results['results'])
assert all([v['visible_link'] for v in parser.search_results['results']])
assert all([v['link'] for v in parser.search_results['results']])
self.assert_around_10_results_with_snippets(parser)
assert any(['Hello Kitty Online Shop - Hello' in v['title'] for v in
parser.search_results['results']]), 'Specific title not found in snippet.'
self.assert_atleast90percent_of_items_are_not_None(parser)
def test_parse_yahoo(self):
parser = self.get_parser_for_file('yahoo', 'data/uncompressed_serp_pages/snow_yahoo_de_ip.html')
assert '19,400,000 Ergebnisse' == parser.num_results_for_query
assert len(parser.search_results['results']) >= 10, len(parser.search_results['results'])
assert len([v['visible_link'] for v in parser.search_results['results'] if
v['visible_link']]) == 10, 'Not 10 elements with a visible link in yahoo serp page'
assert all([v['link'] for v in parser.search_results['results']])
self.assert_around_10_results_with_snippets(parser)
assert any(
[' crystalline water ice that falls from clouds. Since snow is composed of small ic' in v['snippet'] for v
in parser.search_results['results'] if v['snippet']]), 'Specific string not found in snippet.'
self.assert_atleast90percent_of_items_are_not_None(parser)
def test_parse_yandex(self):
return
parser = self.get_parser_for_file('yandex', 'data/uncompressed_serp_pages/game_yandex_de_ip.html')
assert '2 029 580' in parser.num_results_for_query
assert len(parser.search_results['results']) == 10, len(parser.search_results['results'])
assert len([v['visible_link'] for v in parser.search_results['results'] if
v['visible_link']]) == 10, 'Not 10 elements with a visible link in yandex serp page'
assert all([v['link'] for v in parser.search_results['results']])
self.assert_around_10_results_with_snippets(parser)
assert any(['n play games to compile games statist' in v['snippet'] for v in parser.search_results['results'] if
v['snippet']]), 'Specific string not found in snippet.'
self.assert_atleast90percent_of_items_are_not_None(parser)
def test_parse_baidu(self):
parser = self.get_parser_for_file('baidu', 'data/uncompressed_serp_pages/number_baidu_de_ip.html')
assert '100,000,000' in parser.num_results_for_query
assert len(parser.search_results['results']) >= 6, len(parser.search_results['results'])
assert all([v['link'] for v in parser.search_results['results']])
self.assert_around_10_results_with_snippets(parser, delta=5)
self.assert_atleast90percent_of_items_are_not_None(parser)
def test_parse_duckduckgo(self):
parser = self.get_parser_for_file('duckduckgo', 'data/uncompressed_serp_pages/mountain_duckduckgo_de_ip.html')
# duckduckgo is a biatch
def test_parse_ask(self):
parser = self.get_parser_for_file('ask', 'data/uncompressed_serp_pages/fellow_ask_de_ip.html')
assert len(parser.search_results['results']) >= 10, len(parser.search_results['results'])
assert len([v['visible_link'] for v in parser.search_results['results'] if
v['visible_link']]) == 10, 'Not 10 elements with a visible link in ask serp page'
assert all([v['link'] for v in parser.search_results['results']])
self.assert_around_10_results_with_snippets(parser)
self.assert_atleast90percent_of_items_are_not_None(parser)
### test csv output
def test_csv_output_static(self):
"""Test csv output.
Test parsing 4 html pages with two queries and two pages per query and
transforming the results to csv format.
The cached file should be saved in 'data/csv_tests/', there should
be as many files as search_engine * pages_for_keyword
The keyword used in the static SERP pages MUST be 'some words'
The filenames must be in the GoogleScraper cache format.
"""
import csv
from GoogleScraper.output_converter import csv_fieldnames
number_search_engines = len(all_search_engines)
csv_outfile = os.path.join(base, 'data/tmp/csv_test.csv')
config = {
'keyword': 'some words',
'search_engines': all_search_engines,
'num_pages_for_keyword': 2,
'scrape_method': 'selenium',
'cachedir': os.path.join(base, 'data/csv_tests/'),
'do_caching': True,
'verbosity': 0,
'output_filename': csv_outfile,
}
search = scrape_with_config(config)
assert os.path.exists(csv_outfile), '{} does not exist'.format(csv_outfile)
reader = csv.reader(open(csv_outfile, 'rt'))
# the items that should always have a value:
notnull = (
'link', 'query', 'rank', 'domain', 'title', 'link_type', 'scrape_method', 'page_number',
'search_engine_name',
'snippet')
for rownum, row in enumerate(reader):
if rownum == 0:
header = row
header_keys = set(row)
assert header_keys.issubset(set(csv_fieldnames)), 'Invalid CSV header: {}'.format(header)
for item in notnull:
assert row[header.index(item)], '{} has a item that has no value: {}'.format(item, row)
self.assertAlmostEqual(number_search_engines * 2 * 10, rownum, delta=30)
### test json output
def test_json_output_static(self):
"""Test json output.
"""
import json
number_search_engines = len(all_search_engines)
json_outfile = os.path.join(base, 'data/tmp/json_test.json')
config = {
'keyword': 'some words',
'search_engines': all_search_engines,
'num_pages_for_keyword': 2,
'scrape_method': 'selenium',
'cachedir': os.path.join(base, 'data/json_tests/'),
'do_caching': True,
'verbosity': 0,
'output_filename': json_outfile
}
search = scrape_with_config(config)
assert os.path.exists(json_outfile), '{} does not exist'.format(json_outfile)
file = open(json_outfile, 'r')
try:
results = json.load(file)
except ValueError as e:
print('Cannot parse output json file {}. Reason: {}'.format(json_outfile, e))
raise e
# the items that should always have a value:
notnull = ('link', 'rank', 'domain', 'title', 'link_type')
num_results = 0
for item in results:
for k, v in item.items():
if k == 'results':
for res in v:
num_results += 1
for item in notnull:
assert res[item], '{} has a item that has no value: {}'.format(item, res)
self.assertAlmostEqual(number_search_engines * 2 * 10, num_results, delta=30)
### test correct handling of SERP page that has no results for search query.
def test_no_results_for_query_google(self):
parser = self.get_parser_for_file('google', 'data/uncompressed_no_results_serp_pages/google.html')
assert parser.effective_query == '"be dealt and be evaluated"', 'No effective query.'
def test_no_results_for_query_yandex(self):
parser = self.get_parser_for_file('yandex', 'data/uncompressed_no_results_serp_pages/yandex.html')
assert parser.effective_query == 'food', 'Wrong effective query. {}'.format(parser.effective_query)
def test_no_results_for_query_bing(self):
parser = self.get_parser_for_file('bing', 'data/uncompressed_no_results_serp_pages/bing.html')
assert parser.effective_query == 'food', 'Wrong effective query. {}'.format(parser.effective_query)
def test_no_results_for_query_ask(self):
parser = self.get_parser_for_file('ask', 'data/uncompressed_no_results_serp_pages/ask.html')
assert parser.effective_query == 'food', 'Wrong effective query. {}'.format(parser.effective_query)
### test correct parsing of the current page number.
def test_page_number_selector_yandex(self):
parser = self.get_parser_for_file('yandex', 'data/page_number_selector/yandex_5.html')
assert parser.page_number == 5, 'Wrong page number. Got {}'.format(parser.page_number)
def test_page_number_selector_google(self):
"""Google is a bitch in testing this. While saving the html file, the selected
page is set back to 1. So page_number is always one."""
parser = self.get_parser_for_file('google', 'data/page_number_selector/google_8.html')
assert parser.page_number == 1, 'Wrong page number. Got {}'.format(parser.page_number)
def test_page_number_selector_bing(self):
parser = self.get_parser_for_file('bing', 'data/page_number_selector/bing_5.html')
assert parser.page_number == 5, 'Wrong page number. Got {}'.format(parser.page_number)
def test_page_number_selector_yahoo(self):
parser = self.get_parser_for_file('yahoo', 'data/page_number_selector/yahoo_3.html')
assert parser.page_number == 3, 'Wrong page number. Got {}'.format(parser.page_number)
def test_page_number_selector_baidu(self):
parser = self.get_parser_for_file('baidu', 'data/page_number_selector/baidu_9.html')
assert parser.page_number == 9, 'Wrong page number. Got {}'.format(parser.page_number)
def test_page_number_selector_ask(self):
parser = self.get_parser_for_file('ask', 'data/page_number_selector/ask_7.html')
assert parser.page_number == 7, 'Wrong page number. Got {}'.format(parser.page_number)
### test all SERP object indicate no results for all search engines.
def test_no_results_serp_object(self):
config = {
'keyword': 'asdfasdfa7654567654345654343sdfasd',
'search_engines': all_search_engines,
'num_pages_for_keyword': 1,
'scrape_method': 'selenium',
'cachedir': os.path.join(base, 'data/no_results/'),
'do_caching': True,
'verbosity': 1,
}
search = scrape_with_config(config)
assert search.number_search_engines_used == len(all_search_engines)
assert len(search.used_search_engines.split(',')) == len(search.used_search_engines.split(','))
assert search.number_proxies_used == 1
assert search.number_search_queries == 1
assert search.started_searching < search.stopped_searching
assert len(all_search_engines) == len(search.serps), 'Not enough results. Expected: {}, got {}'.format(
len(all_search_engines), len(search.serps))
for serp in search.serps:
assert serp.has_no_results_for_query(), 'num_results must be 0 but is {}. {}'.format(serp.num_results,
serp.links)
# some search engine do alternative searches instead of yielding
# nothing at all.
if serp.search_engine_name in ('google', 'bing'):
assert serp.effective_query, '{} must have an effective query when a keyword has no results.'.format(
serp.search_engine_name)
def test_no_results2_static(self):
query = '"Find ich besser als einfach nur den Propheten zu zeichnen, denn das ist nur reine Provokation. Was Titanic macht ist Satire."'
for search_engine in ('google', 'duckduckgo', 'bing', 'yahoo'):
parser = self.get_parser_for_file(search_engine, 'data/no_results_literal/{}.html'.format(search_engine),
query=query)
assert parser.num_results == 0 or parser.effective_query, 'No results must be true for search engine {}! But got {} serp entries and effective query: {}.'.format(
search_engine, parser.num_results, parser.effective_query)
### test correct parsing of the number of results for the query..
def test_csv_file_header_always_the_same(self):
"""
Check that csv files have always the same order in their header.
"""
csv_outfile_1 = os.path.join(base, 'data/tmp/csvout1.csv')
csv_outfile_2 = os.path.join(base, 'data/tmp/csvout2.csv')
config = {
'keyword': 'some words',
'search_engines': all_search_engines,
'num_pages_for_keyword': 2,
'scrape_method': 'selenium',
'cachedir': os.path.join(base, 'data/csv_tests/'),
'do_caching': True,
'verbosity': 0,
'output_filename': csv_outfile_1,
}
search = scrape_with_config(config)
search = scrape_with_config(config)
config.update({'output_filename': csv_outfile_2})
search = scrape_with_config(config)
assert os.path.isfile(csv_outfile_1) and os.path.isfile(csv_outfile_2)
file1 = open(csv_outfile_1, 'rt')
file2 = open(csv_outfile_2, 'rt')
import csv
reader1, reader2 = csv.DictReader(file1), csv.DictReader(file2)
header1, header2 = reader1.fieldnames, reader2.fieldnames
from GoogleScraper.output_converter import csv_fieldnames
assert header1 == header2 == csv_fieldnames
def test_duckduckgo_http_mode_works(self):
"""
duckduckgo has a non javascript version that should
be queried when using http mode
"""
parser = self.get_parser_for_file('duckduckgo', 'data/various/duckduckgo_http_mode_december_2015.html',
query='what happened')
assert parser.num_results > 8
for result_type, data in parser.search_results.items():
if result_type == 'normal':
assert len(data) > 8
for serp in data:
assert isinstance(serp['rank'], int)
assert len(serp['link']) > 8
assert serp['title']
assert len(serp['snippet']) > 5
if __name__ == '__main__':
unittest.main(warnings='ignore')
| 17,647 | 42.900498 | 174 | py |
GoogleScraper | GoogleScraper-master/Tests/testing_utils.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import random
from GoogleScraper.utils import get_some_words
def assert_atleast_delta_percent_of_items(data, attr_name, delta=0.9):
"""Checks that at least delta percent of the results are not None.
"""
n=k=0
for obj in data:
n+=1
if not getattr(obj, attr_name):
k += 1
return k/n <= (1-delta)
def assert_atleast_delta_percent_of_items_dict(data, key, delta=0.9):
"""Checks that at least delta percent of the results are not None.
"""
n=k=0
for obj in data:
n+=1
if not obj[key]:
k += 1
return k/n <= (1-delta)
def random_word():
return random.choice(words)
words = get_some_words(n=100) | 714 | 20.666667 | 70 | py |
GoogleScraper | GoogleScraper-master/Tests/functional_tests.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
These functional tests cannot cover the whole functionality of GoogleScraper.
But it is tried to cover at least the most common use cases with GoogleScraper, such that
a basic functionality is shown to be correct. The functional tests are run on git hook pre-push time,
so it is enforced that only minimally stable versions are online.
When testing single functions:
python -m pytest tests/functional_tests.py::GoogleScraperFunctionalTestCase::test_google_with_chrome_and_json_output
"""
import csv
import json
import tempfile
import os
import unittest
from GoogleScraper import scrape_with_config
from GoogleScraper.config import get_config
import testing_utils
base_config = get_config()
all_search_engines = base_config['supported_search_engines']
def is_string_and_longer_than(s, n):
return isinstance(s, str) and len(s) > n
def predicate_true_at_least_n_times(pred, collection, n, key):
"""
Ensures that the predicate is at least n times true for
items in a collection with the key.
"""
if hasattr(collection[0], key):
assert len([getattr(v, key) for v in collection if pred(getattr(v, key))]) > n
elif key in collection[0]:
assert len([v[key] for v in collection if pred(v[key])]) > n
class GoogleScraperMinimalFunctionalTestCase(unittest.TestCase):
"""
Those are the minimal required functional test cases that should work.
python -m pytest tests/functional_tests.py::GoogleScraperMinimalFunctionalTestCase
"""
def test_google_with_chrome_and_json_output(self):
"""
Very common use case:
Ensures that we can scrape three continuous sites with Google using
chrome in normal mode and save the results to a JSON file.
"""
results_file = os.path.join(tempfile.gettempdir(), 'results-chrome.json')
if os.path.exists(results_file):
os.remove(results_file)
query = 'Food New York'
config = {
'keyword': query,
'search_engines': ['Google'],
'num_results_per_page': 100,
'num_pages_for_keyword': 3,
'scrape_method': 'selenium',
'sel_browser': 'chrome',
'do_sleep': False,
'browser_mode': 'normal',
'chromedriver_path': '/home/nikolai/projects/private/Drivers/chromedriver',
'output_filename': results_file,
'do_caching': False,
}
search = scrape_with_config(config)
self.assertLess(search.started_searching, search.stopped_searching)
self.assertEqual(search.number_proxies_used, 1)
self.assertEqual(search.number_search_engines_used, 1)
self.assertEqual(search.number_search_queries, 1)
self.assertEqual(len(search.serps), 3)
self.assertEqual(search.serps[0].page_number, 1)
self.assertEqual(search.serps[1].page_number, 2)
self.assertEqual(search.serps[2].page_number, 3)
for serp in search.serps:
self.assertEqual(serp.status, 'successful')
self.assertEqual(serp.search_engine_name.lower(), 'google')
self.assertEqual(serp.scrape_method, 'selenium')
self.assertTrue(serp.num_results_for_query)
self.assertAlmostEqual(int(serp.num_results), 100, delta=10)
self.assertFalse(is_string_and_longer_than(serp.effective_query, 1), msg=serp.effective_query)
self.assertEqual(serp.no_results, False)
self.assertEqual(serp.num_results, len(serp.links))
for j, link in enumerate(serp.links):
if link.link_type == 'results':
self.assertTrue(is_string_and_longer_than(link.title, 3))
self.assertTrue(is_string_and_longer_than(link.snippet, 3))
self.assertTrue(is_string_and_longer_than(link.link, 10))
self.assertTrue(isinstance(link.rank, int))
# test that the json output is correct
self.assertTrue(os.path.isfile(results_file))
with open(results_file, 'rt') as file:
obj = json.load(file)
# check the same stuff again for the json file
for i, page in enumerate(obj):
self.assertEqual(page['effective_query'], '')
self.assertEqual(page['no_results'], 'False')
self.assertEqual(page['num_results'], str(len(page['results'])))
self.assertAlmostEqual(int(page['num_results']), 100, delta=10)
self.assertTrue(is_string_and_longer_than(page['num_results_for_query'], 5))
self.assertEqual(page['page_number'], str(i+1))
self.assertEqual(page['query'], query)
# todo: Test requested_at
self.assertEqual(page['requested_by'], 'localhost')
for j, result in enumerate(page['results']):
if result['link_type'] == 'results':
self.assertTrue(is_string_and_longer_than(result['title'], 3))
self.assertTrue(is_string_and_longer_than(result['snippet'], 3))
self.assertTrue(is_string_and_longer_than(result['link'], 10))
self.assertTrue(isinstance(int(result['rank']), int))
def test_bing_with_chrome_and_json_output(self):
"""
Very common use case:
Ensures that we can scrape three continuous sites with Bing using
chrome in headless mode and save the results to a JSON file.
"""
results_file = os.path.join(tempfile.gettempdir(), 'results-chrome.json')
if os.path.exists(results_file):
os.remove(results_file)
query = 'Startup San Francisco'
config = {
'keyword': query,
'search_engines': ['Bing'],
'num_results_per_page': 20, # this is ignored by bing, 10 results per page
'num_pages_for_keyword': 3,
'scrape_method': 'selenium',
'sel_browser': 'chrome',
'do_sleep': False,
'browser_mode': 'normal',
'chromedriver_path': '/home/nikolai/projects/private/Drivers/chromedriver',
'output_filename': results_file,
'do_caching': False,
}
search = scrape_with_config(config)
self.assertLess(search.started_searching, search.stopped_searching)
self.assertEqual(search.number_proxies_used, 1)
self.assertEqual(search.number_search_engines_used, 1)
self.assertEqual(search.number_search_queries, 1)
self.assertEqual(len(search.serps), 3)
self.assertEqual(search.serps[0].page_number, 1)
self.assertEqual(search.serps[1].page_number, 2)
self.assertEqual(search.serps[2].page_number, 3)
for serp in search.serps:
self.assertEqual(serp.status, 'successful')
self.assertEqual(serp.search_engine_name.lower(), 'bing')
self.assertEqual(serp.scrape_method, 'selenium')
self.assertTrue(serp.num_results_for_query)
self.assertAlmostEqual(int(serp.num_results), 10, delta=4)
self.assertFalse(is_string_and_longer_than(serp.effective_query, 1), msg=serp.effective_query)
#self.assertEqual(serp.no_results, False)
self.assertEqual(serp.num_results, len(serp.links))
for j, link in enumerate(serp.links):
if link.link_type == 'results':
self.assertTrue(is_string_and_longer_than(link.title, 3))
self.assertTrue(is_string_and_longer_than(link.snippet, 3))
self.assertTrue(is_string_and_longer_than(link.link, 10))
self.assertTrue(isinstance(link.rank, int))
# test that the json output is correct
self.assertTrue(os.path.isfile(results_file))
with open(results_file, 'rt') as file:
obj = json.load(file)
# check the same stuff again for the json file
for i, page in enumerate(obj):
self.assertEqual(page['effective_query'], '')
#self.assertEqual(page['no_results'], 'False')
self.assertEqual(page['num_results'], str(len(page['results'])))
self.assertAlmostEqual(int(page['num_results']), 10, delta=4)
self.assertTrue(is_string_and_longer_than(page['num_results_for_query'], 5))
self.assertEqual(page['page_number'], str(i+1))
self.assertEqual(page['query'], query)
# todo: Test requested_at
self.assertEqual(page['requested_by'], 'localhost')
for j, result in enumerate(page['results']):
if result['link_type'] == 'results':
self.assertTrue(is_string_and_longer_than(result['title'], 3))
self.assertTrue(is_string_and_longer_than(result['snippet'], 3))
self.assertTrue(is_string_and_longer_than(result['link'], 10))
self.assertTrue(isinstance(int(result['rank']), int))
class GoogleScraperFunctionalTestCase(unittest.TestCase):
def test_all_search_engines_in_http_mode(self):
"""
Very simple test case that assures that scraping all
search engines in http mode works.
"""
config = {
'keyword': 'in this world',
'search_engines': '*',
'scrape_method': 'http',
'do_caching': False,
'num_results_per_page': 10,
'log_level': 'WARNING',
'print_results': 'summarize',
}
search = scrape_with_config(config)
self.assertLess(search.started_searching, search.stopped_searching)
self.assertEqual(search.number_proxies_used, 1)
self.assertEqual(search.number_search_engines_used, len(all_search_engines))
self.assertEqual(search.number_search_queries, 1)
self.assertEqual(len(search.serps), len(all_search_engines))
for i, serp in enumerate(search.serps):
self.assertEqual(search.serps[i].page_number, 1)
self.assertEqual(serp.status, 'successful')
self.assertIn(serp.search_engine_name.lower(), all_search_engines)
self.assertEqual(serp.scrape_method, 'http')
self.assertTrue(serp.num_results_for_query)
self.assertTrue(serp.num_results >= 7)
self.assertFalse(is_string_and_longer_than(serp.effective_query, 1), msg=serp.effective_query)
self.assertEqual(serp.num_results, len(serp.links))
for j, link in enumerate(serp.links):
if link.link_type == 'results':
self.assertTrue(is_string_and_longer_than(link.title, 3))
# no snippet needed actually
# self.assertTrue(is_string_and_longer_than(link.snippet, 3))
self.assertTrue(is_string_and_longer_than(link.link, 10))
self.assertTrue(link.domain in link.link)
self.assertTrue(isinstance(link.rank, int))
def test_all_search_engines_in_selenium_mode(self):
"""
Very simple test case that assures that scraping all
search engines in selenium mode works.
Basically copy paste from `test_all_search_engines_in_http_mode`.
"""
config = {
'keyword': 'dont look back in anger',
'search_engines': '*',
'scrape_method': 'selenium',
'sel_browser': 'chrome',
'browser_mode': 'headless',
'chromedriver_path': '/home/nikolai/projects/private/Drivers/chromedriver',
'do_caching': False,
'num_results_per_page': 10,
}
search = scrape_with_config(config)
self.assertLess(search.started_searching, search.stopped_searching)
self.assertEqual(search.number_proxies_used, 1)
self.assertEqual(search.number_search_engines_used, len(all_search_engines))
self.assertEqual(search.number_search_queries, 1)
self.assertEqual(len(search.serps), len(all_search_engines))
for i, serp in enumerate(search.serps):
self.assertEqual(search.serps[i].page_number, 1)
self.assertEqual(serp.status, 'successful')
self.assertIn(serp.search_engine_name.lower(), all_search_engines)
self.assertEqual(serp.scrape_method, 'selenium')
self.assertTrue(serp.num_results_for_query)
self.assertAlmostEqual(serp.num_results, 10, delta=2)
self.assertFalse(is_string_and_longer_than(serp.effective_query, 1), msg=serp.effective_query)
self.assertEqual(serp.no_results, False)
self.assertEqual(serp.num_results, len(serp.links))
for j, link in enumerate(serp.links):
if link.link_type == 'results':
self.assertTrue(is_string_and_longer_than(link.title, 3))
self.assertTrue(is_string_and_longer_than(link.snippet, 3))
self.assertTrue(is_string_and_longer_than(link.link, 10))
self.assertTrue(link.domain in link.link)
self.assertTrue(isinstance(link.rank, int))
def test_google_with_chrome_and_json_output(self):
"""
Very common use case:
Ensures that we can scrape three continuous sites with Google using
chrome in headless mode and save the results to a JSON file.
"""
results_file = os.path.join(tempfile.gettempdir(), 'results.json')
if os.path.exists(results_file):
os.remove(results_file)
config = {
'keyword': 'apple tree',
'search_engines': ['Google'],
'num_results_per_page': 10,
'num_pages_for_keyword': 3,
'scrape_method': 'selenium',
'sel_browser': 'chrome',
'do_sleep': False,
'browser_mode': 'headless',
'chromedriver_path': '/home/nikolai/projects/private/Drivers/chromedriver',
'output_filename': results_file,
'do_caching': False,
}
search = scrape_with_config(config)
self.assertLess(search.started_searching, search.stopped_searching)
self.assertEqual(search.number_proxies_used, 1)
self.assertEqual(search.number_search_engines_used, 1)
self.assertEqual(search.number_search_queries, 1)
self.assertEqual(len(search.serps), 3)
self.assertEqual(search.serps[0].page_number, 1)
self.assertEqual(search.serps[1].page_number, 2)
self.assertEqual(search.serps[2].page_number, 3)
for serp in search.serps:
self.assertEqual(serp.status, 'successful')
self.assertEqual(serp.search_engine_name.lower(), 'google')
self.assertEqual(serp.scrape_method, 'selenium')
self.assertTrue(serp.num_results_for_query)
self.assertAlmostEqual(serp.num_results, 10, delta=2)
self.assertFalse(is_string_and_longer_than(serp.effective_query, 1), msg=serp.effective_query)
self.assertEqual(serp.no_results, False)
self.assertEqual(serp.num_results, len(serp.links))
for j, link in enumerate(serp.links):
if link.link_type == 'results':
self.assertTrue(is_string_and_longer_than(link.title, 3))
self.assertTrue(is_string_and_longer_than(link.snippet, 3))
self.assertTrue(is_string_and_longer_than(link.link, 10))
self.assertTrue(isinstance(link.rank, int))
# test that the json output is correct
self.assertTrue(os.path.isfile(results_file))
with open(results_file, 'rt') as file:
obj = json.load(file)
# check the same stuff again for the json file
for i, page in enumerate(obj):
self.assertEqual(page['effective_query'], '')
self.assertEqual(page['no_results'], 'False')
self.assertEqual(page['num_results'], str(len(page['results'])))
self.assertTrue(is_string_and_longer_than(page['num_results_for_query'], 5))
self.assertEqual(page['page_number'], str(i+1))
self.assertEqual(page['query'], 'apple tree')
# todo: Test requested_at
self.assertEqual(page['requested_by'], 'localhost')
for j, result in enumerate(page['results']):
if result['link_type'] == 'results':
self.assertTrue(is_string_and_longer_than(result['title'], 3))
self.assertTrue(is_string_and_longer_than(result['snippet'], 3))
self.assertTrue(is_string_and_longer_than(result['link'], 10))
self.assertTrue(isinstance(int(result['rank']), int))
def test_google_with_firefox_and_json_output(self):
"""
Very common use case:
Ensures that we can scrape three continuous sites with Google using
firefox in headless mode and save the results to a JSON file.
"""
results_file = os.path.join(tempfile.gettempdir(), 'results.json')
if os.path.exists(results_file):
os.remove(results_file)
config = {
'keyword': 'how to find large prime numbers',
'search_engines': ['Google'],
'num_results_per_page': 10,
'num_pages_for_keyword': 3,
'scrape_method': 'selenium',
'sel_browser': 'firefox',
'browser_mode': 'headless',
'do_sleep': False,
'firefox_binary_path': '/home/nikolai/firefox/firefox',
'geckodriver_path': '/home/nikolai/projects/private/Drivers/geckodriver',
'output_filename': results_file,
'do_caching': False,
}
search = scrape_with_config(config)
self.assertLess(search.started_searching, search.stopped_searching)
self.assertEqual(search.number_proxies_used, 1)
self.assertEqual(search.number_search_engines_used, 1)
self.assertEqual(search.number_search_queries, 1)
self.assertEqual(len(search.serps), 3)
self.assertEqual(search.serps[0].page_number, 1)
self.assertEqual(search.serps[1].page_number, 2)
self.assertEqual(search.serps[2].page_number, 3)
for serp in search.serps:
self.assertEqual(serp.status, 'successful')
self.assertEqual(serp.search_engine_name.lower(), 'google')
self.assertEqual(serp.scrape_method, 'selenium')
self.assertTrue(serp.num_results_for_query)
self.assertAlmostEqual(serp.num_results, 10, delta=2)
self.assertFalse(is_string_and_longer_than(serp.effective_query, 1), msg=serp.effective_query)
self.assertEqual(serp.no_results, False)
self.assertEqual(serp.num_results, len(serp.links))
self.assertTrue(testing_utils.assert_atleast_delta_percent_of_items(serp.links, 'snippet', delta=0.8))
for j, link in enumerate(serp.links):
if link.link_type == 'results':
self.assertTrue(is_string_and_longer_than(link.title, 3))
self.assertTrue(is_string_and_longer_than(link.link, 10))
self.assertTrue(isinstance(link.rank, int))
# test that the json output is correct
self.assertTrue(os.path.isfile(results_file))
with open(results_file, 'rt') as file:
obj = json.load(file)
# check the same stuff again for the json file
for i, page in enumerate(obj):
self.assertEqual(page['effective_query'], '')
self.assertEqual(page['no_results'], 'False')
self.assertEqual(page['num_results'], str(len(page['results'])))
self.assertTrue(is_string_and_longer_than(page['num_results_for_query'], 5))
self.assertEqual(page['page_number'], str(i+1))
self.assertEqual(page['query'], 'how to find large prime numbers')
# todo: Test requested_at
self.assertEqual(page['requested_by'], 'localhost')
self.assertTrue(testing_utils.assert_atleast_delta_percent_of_items_dict(page['results'], 'snippet', delta=0.8))
for j, result in enumerate(page['results']):
if result['link_type'] == 'results':
self.assertTrue(is_string_and_longer_than(result['title'], 3))
self.assertTrue(is_string_and_longer_than(result['link'], 10))
self.assertTrue(isinstance(int(result['rank']), int))
def test_http_mode_google_csv_output(self):
results_file = os.path.join(tempfile.gettempdir(), 'results.csv')
if os.path.exists(results_file):
os.remove(results_file)
config = {
'keyword': 'banana',
'search_engines': ['Google'],
'num_results_per_page': 10,
'num_pages_for_keyword': 2,
'scrape_method': 'http',
'output_filename': results_file,
'do_caching': False,
}
search = scrape_with_config(config)
self.assertLess(search.started_searching, search.stopped_searching)
self.assertEqual(search.number_proxies_used, 1)
self.assertEqual(search.number_search_engines_used, 1)
self.assertEqual(search.number_search_queries, 1)
self.assertEqual(len(search.serps), 2)
self.assertEqual(search.serps[0].page_number, 1)
self.assertEqual(search.serps[1].page_number, 2)
for serp in search.serps:
self.assertEqual(serp.query, 'banana')
self.assertEqual(serp.status, 'successful')
self.assertEqual(serp.search_engine_name.lower(), 'google')
self.assertEqual(serp.scrape_method, 'http')
self.assertTrue(serp.num_results_for_query)
self.assertAlmostEqual(serp.num_results, 10, delta=2)
self.assertFalse(is_string_and_longer_than(serp.effective_query, 1), msg=serp.effective_query)
self.assertEqual(serp.no_results, False)
self.assertEqual(serp.num_results, len(serp.links))
predicate_true_at_least_n_times(lambda v: is_string_and_longer_than(v, 3),
serp.links, 7, 'snippet')
for link in serp.links:
if link.link_type == 'results':
self.assertTrue(is_string_and_longer_than(link.title, 3))
self.assertTrue(is_string_and_longer_than(link.link, 10))
self.assertTrue(isinstance(link.rank, int))
# test that the csv output is correct
self.assertTrue(os.path.isfile(results_file))
with open(results_file, 'rt') as file:
reader = csv.DictReader(file, delimiter=',')
rows = [row for row in reader]
self.assertAlmostEqual(20, len(rows), delta=3)
for row in rows:
self.assertEqual(row['query'], 'banana')
self.assertTrue(is_string_and_longer_than(row['requested_at'], 5))
self.assertTrue(int(row['num_results']))
self.assertEqual(row['scrape_method'], 'http')
self.assertEqual(row['requested_by'], 'localhost')
self.assertEqual(row['search_engine_name'], 'google')
self.assertIn(int(row['page_number']), [1,2])
self.assertEqual(row['status'], 'successful')
self.assertTrue(row['no_results'] == 'False')
self.assertTrue(row['effective_query'] == '')
if row['link_type'] == 'results':
self.assertTrue(is_string_and_longer_than(row['title'], 3))
self.assertTrue(is_string_and_longer_than(row['snippet'], 3))
self.assertTrue(is_string_and_longer_than(row['domain'], 5))
self.assertTrue(is_string_and_longer_than(row['visible_link'], 5))
self.assertTrue(is_string_and_longer_than(row['num_results_for_query'], 3))
self.assertTrue(is_string_and_longer_than(row['link'], 10))
self.assertTrue(row['rank'].isdigit())
# ensure that at least 90% of all entries have a string as snippet
predicate_true_at_least_n_times(lambda v: is_string_and_longer_than(v, 3), rows, int(0.8*len(rows)), 'snippet')
def test_asynchronous_mode_bing_and_yandex(self):
"""
Expected results:
- around 60 results
- 30 results for bing and 30 results for yandex
- valid json file with the contents
"""
results_file = os.path.join(tempfile.gettempdir(), 'async_results.json')
if os.path.exists(results_file):
os.remove(results_file)
config = {
'keyword': 'where is my mind',
'search_engines': ['bing', 'yandex'],
'num_results_per_page': 10,
'num_pages_for_keyword': 3,
'scrape_method': 'http-async',
'output_filename': results_file,
'do_caching': False,
}
search = scrape_with_config(config)
self.assertEqual(search.keyword_file, '')
self.assertLess(search.started_searching, search.stopped_searching)
self.assertEqual(search.number_proxies_used, 1)
self.assertEqual(search.number_search_engines_used, 2)
self.assertEqual(search.number_search_queries, 1)
self.assertEqual(len(search.serps), 6)
# test that we have twice [1,2,3] as page numbers
self.assertSetEqual(set([serp.page_number for serp in search.serps]), {1,2,3})
self.assertAlmostEqual(sum([len(serp.links) for serp in search.serps]), 60, delta=10)
self.assertAlmostEqual(sum([len(serp.links) for serp in search.serps if serp.search_engine_name == 'yandex']), 30, delta=5)
self.assertAlmostEqual(sum([len(serp.links) for serp in search.serps if serp.search_engine_name == 'bing']), 30, delta=5)
for serp in search.serps:
self.assertEqual(serp.query, 'where is my mind')
self.assertEqual(serp.status, 'successful')
self.assertIn(serp.search_engine_name.lower(), ('bing', 'yandex'))
self.assertEqual(serp.scrape_method, 'http-async')
if serp.search_engine_name != 'yandex':
self.assertTrue(is_string_and_longer_than(serp.num_results_for_query, 5))
self.assertAlmostEqual(serp.num_results, 10, delta=2)
self.assertFalse(is_string_and_longer_than(serp.effective_query, 1), msg=serp.effective_query)
self.assertEqual(serp.num_results, len(serp.links))
predicate_true_at_least_n_times(lambda v: is_string_and_longer_than(v, 3),
serp.links, 7, 'snippet')
for link in serp.links:
if link.link_type == 'results':
self.assertTrue(is_string_and_longer_than(link.title, 3))
self.assertTrue(is_string_and_longer_than(link.link, 10))
self.assertTrue(isinstance(link.rank, int))
# test that the json output is correct
self.assertTrue(os.path.isfile(results_file))
with open(results_file, 'rt') as file:
obj = json.load(file)
# check the same stuff again for the json file
for i, page in enumerate(obj):
self.assertEqual(page['effective_query'], '')
self.assertEqual(page['num_results'], str(len(page['results'])))
if page['search_engine_name'].lower() != 'yandex':
self.assertTrue(is_string_and_longer_than(page['num_results_for_query'], 5))
self.assertEqual(page['query'], 'where is my mind')
self.assertEqual(page['requested_by'], 'localhost')
for j, result in enumerate(page['results']):
if result['link_type'] == 'results':
self.assertTrue(is_string_and_longer_than(result['title'], 3))
self.assertTrue(is_string_and_longer_than(result['snippet'], 3))
self.assertTrue(is_string_and_longer_than(result['link'], 10))
self.assertTrue(isinstance(int(result['rank']), int))
if __name__ == '__main__':
unittest.main(warnings='ignore')
| 29,467 | 44.196319 | 131 | py |
GoogleScraper | GoogleScraper-master/docs/source/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'GoogleScraper'
copyright = '2018, Nikolai Tschacher'
author = 'Nikolai Tschacher'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = 'October 2018'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'GoogleScraperdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'GoogleScraper.tex', 'GoogleScraper Documentation',
'Nikolai Tschacher', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'googlescraper', 'GoogleScraper Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'GoogleScraper', 'GoogleScraper Documentation',
author, 'GoogleScraper', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True | 5,975 | 29.181818 | 79 | py |
GoogleScraper | GoogleScraper-master/Examples/headlesschrome.py | from selenium import webdriver
chromedriver = '/usr/bin/chromedriver'
options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_argument('window-size=1200x600') # optional
#browser = webdriver.Chrome(executable_path=chromedriver, chrome_options=options)
browser = webdriver.Chrome(chrome_options=options, executable_path=chromedriver)
browser.get('https://www.google.com')
browser.save_screenshot('headless_chrome_test.png')
browser.quit()
| 469 | 26.647059 | 81 | py |
GoogleScraper | GoogleScraper-master/Examples/image_search.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from GoogleScraper import scrape_with_config, GoogleSearchError
# simulating a image search for all search engines that support image search.
# Then download all found images :)
target_directory = 'images/'
# See in the config.cfg file for possible values
config = {
'keyword': 'beautiful landscape', # :D hehe have fun my dear friends
'search_engines': ['yandex', 'google', 'bing', 'yahoo'], # duckduckgo not supported
'search_type': 'image',
'scrape_method': 'selenium',
'do_caching': True,
}
try:
search = scrape_with_config(config)
except GoogleSearchError as e:
print(e)
image_urls = []
for serp in search.serps:
image_urls.extend(
[link.link for link in serp.links]
)
print('[i] Going to scrape {num} images and saving them in "{dir}"'.format(
num=len(image_urls),
dir=target_directory
))
import threading,requests, os, urllib
# In our case we want to download the
# images as fast as possible, so we use threads.
class FetchResource(threading.Thread):
"""Grabs a web resource and stores it in the target directory.
Args:
target: A directory where to save the resource.
urls: A bunch of urls to grab
"""
def __init__(self, target, urls):
super().__init__()
self.target = target
self.urls = urls
def run(self):
for url in self.urls:
url = urllib.parse.unquote(url)
with open(os.path.join(self.target, url.split('/')[-1]), 'wb') as f:
try:
content = requests.get(url).content
f.write(content)
except Exception as e:
pass
print('[+] Fetched {}'.format(url))
# make a directory for the results
try:
os.mkdir(target_directory)
except FileExistsError:
pass
# fire up 100 threads to get the images
num_threads = 100
threads = [FetchResource('images/', []) for i in range(num_threads)]
while image_urls:
for t in threads:
try:
t.urls.append(image_urls.pop())
except IndexError as e:
break
threads = [t for t in threads if t.urls]
for t in threads:
t.start()
for t in threads:
t.join()
# that's it :)
| 2,278 | 23.771739 | 87 | py |
GoogleScraper | GoogleScraper-master/Examples/keywords.py | """
GoogleScraper will scrape the jobs according to the dictionaries below.
Passing the scrape_jobs to GoogleScraper is simple:
GoogleScraper --keyword-file keywords.py
"""
scrape_jobs = [
{
'query': 'hello world',
'search_engine': 'google', # on which search engines this keyword should be searched.
'proxy': 'socks5 localhost 9050', # which proxy to use for this keyword
'num_pages': 10, # how many pages to scrape this keyword
'scrape_method': 'http'
},
{
'query': 'blubb',
'search_engine': 'yandex',
'another option': 'some fancy value', # you can specify other (even senseless) options
'scrape_method': 'selenium'
},
{
'query': 'mountain',
'search_engine': 'baidu',
'scrape_method': 'http'
},
# ...
] | 836 | 24.363636 | 94 | py |
GoogleScraper | GoogleScraper-master/Examples/http_mode_example.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from GoogleScraper import scrape_with_config, GoogleSearchError
keywords = [
'one ',
'two',
'three'
]
# See in the config.cfg file for possible values
config = {
'use_own_ip': 'True',
'keywords': keywords,
'search_engines': ['bing',],
'num_pages_for_keyword': 1,
'scrape_method': 'http',
'do_caching': 'True'
}
try:
search = scrape_with_config(config)
except GoogleSearchError as e:
print(e)
for serp in search.serps:
print(serp)
for link in serp.links:
print(link) | 577 | 18.266667 | 63 | py |
GoogleScraper | GoogleScraper-master/Examples/finding_plagiarized_content.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from GoogleScraper import scrape_with_config, GoogleSearchError
text = """
Python is a multi-paradigm programming language: object-oriented programming and structured programming are fully supported, and there are a number of language features which support functional programming and aspect-oriented programming (including by metaprogramming[32] and by magic methods).[33] Many other paradigms are supported using extensions, including design by contract[34][35] and logic programming.[36]
Python uses dynamic typing and a combination of reference counting and a cycle-detecting garbage collector for memory management. An important feature of Python is dynamic name resolution (late binding), which binds method and variable names during program execution.
The design of Python offers only limited support for functional programming in the Lisp tradition. The language has map(), reduce() and filter() functions; comprehensions for lists, dictionaries, and sets; as well as generator expressions.[37] The standard library has two modules (itertools and functools) that implement functional tools borrowed from Haskell and Standard ML.[38]
The core philosophy of the language is summarized by the document "PEP 20 (The Zen of Python)", which includes aphorisms such as:[39]
Beautiful is better than ugly
Explicit is better than implicit
Simple is better than complex
Complex is better than complicated
Readability counts
Rather than requiring all desired functionality to be built into the language's core, Python was designed to be highly extensible. Python can also be embedded in existing applications that need a programmable interface. This design of a small core language with a large standard library and an easily extensible interpreter was intended by Van Rossum from the very start because of his frustrations with ABC (which espoused the opposite mindset).[25]
While offering choice in coding methodology, the Python philosophy rejects exuberant syntax, such as in Perl, in favor of a sparser, less-cluttered grammar. As Alex Martelli put it: "To describe something as clever is not considered a compliment in the Python culture."[40] Python's philosophy rejects the Perl "there is more than one way to do it" approach to language design in favor of "there should be one—and preferably only one—obvious way to do it".[39]
Python's developers strive to avoid premature optimization, and moreover, reject patches to non-critical parts of CPython which would offer a marginal increase in speed at the cost of clarity.[41] When speed is important, Python programmers use PyPy, a just-in-time compiler, or move time-critical functions to extension modules written in languages such as C. Cython is also available which translates a Python script into C and makes direct C level API calls into the Python interpreter.
An important goal of the Python developers is making Python fun to use. This is reflected in the origin of the name which comes from Monty Python,[42] and in an occasionally playful approach to tutorials and reference materials, for example using spam and eggs instead of the standard foo and bar.[43][44]
"""
def make_chunks(text):
"""Splits the text in chunks suitable for a single search query.
The algorithm works the following way:
If a sentence is between 25 and 125 chars long, then use it as a chunk.
If the sentence is shorter, do nothing.
If the sentence is longer, then split by commatas.
Args:
The text to check.
Returns:
Quoted chunks to use in google.
"""
# normalize text
text = text.replace('\n', '').replace('\t', '')
chunks = []
sentences = text.split('.')
for sentence in sentences:
if len(sentence) in range(25, 125):
chunks.append('"{}"'.format(sentence))
elif len(sentence) < 25:
consume_next = True
# just ignore this for now. Short sentences are not usable anyways.
elif len(sentence) > 125:
chunks.extend(
['"{}"'.format(s) for s in sentence.split(',') if len(s) > 25]
)
return chunks
# write the chunks to a file
with open('chunks.txt', 'wt') as f:
for chunk in make_chunks(text):
f.write(chunk + '\n')
# # See in the config.cfg file for possible values
config = {
'use_own_ip': True,
'keyword_file': 'chunks.txt',
'search_engines': ['google'],
'num_pages_for_keyword': 1,
'scrape_method': 'selenium',
'sel_browser': 'chrome',
}
try:
search = scrape_with_config(config)
except GoogleSearchError as e:
print(e)
for serp in search.serps:
# if the original query yielded some results and thus was found by google.
if not serp.effective_query:
print('Found plagiarized content: "{}"'.format(serp.query)) | 4,894 | 52.791209 | 489 | py |
GoogleScraper | GoogleScraper-master/Examples/async_mode_example.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from GoogleScraper import scrape_with_config, GoogleSearchError
from GoogleScraper.utils import get_some_words
keywords = get_some_words(10)
with open('keywords.txt', 'wt') as f:
for word in keywords:
f.write(word + '\n')
# See in the config.cfg file for possible values
config = {
'use_own_ip': True,
'keyword_file': 'keywords.txt',
'search_engines': ['bing', 'duckduckgo'],
'num_pages_for_keyword': 2,
'scrape_method': 'http-async',
'do_caching': True,
'output_filename': 'out.csv',
}
try:
search = scrape_with_config(config)
except GoogleSearchError as e:
print(e)
# let's inspect what we got. Get the last search:
for serp in search.serps:
print(serp)
for link in serp.links:
print(link)
| 808 | 23.515152 | 63 | py |
GoogleScraper | GoogleScraper-master/Examples/headless-firefox.py | from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
binary = FirefoxBinary('/home/nikolai/ff/firefox/firefox')
options = Options()
options.set_headless(headless=True)
driver = webdriver.Firefox(firefox_binary=binary,
firefox_options=options, executable_path='../Drivers/geckodriver')
driver.get("http://google.com/")
print ("Headless Firefox Initialized")
driver.quit()
| 478 | 33.214286 | 70 | py |
GoogleScraper | GoogleScraper-master/Examples/basic.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from GoogleScraper import scrape_with_config, GoogleSearchError
# See in the config.cfg file for possible values
config = {
'use_own_ip': True,
'keyword': 'Let\'s go bubbles!',
'search_engines': ['yandex', 'bing'],
'num_pages_for_keyword': 1,
'scrape_method': 'selenium',
'sel_browser': 'chrome',
'do_caching': False
}
try:
search = scrape_with_config(config)
except GoogleSearchError as e:
print(e)
# let's inspect what we got
for serp in search.serps:
print(serp)
print(serp.search_engine_name)
print(serp.scrape_method)
print(serp.page_number)
print(serp.requested_at)
print(serp.num_results)
# ... more attributes ...
for link in serp.links:
print(link)
| 786 | 22.147059 | 63 | py |
GoogleScraper | GoogleScraper-master/Examples/basic_2_pages.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from GoogleScraper import scrape_with_config, GoogleSearchError
# See in the config.cfg file for possible values
config = {
'use_own_ip': True,
'keyword': 'reddit',
'search_engines': ['bing',],
'num_pages_for_keyword': 2,
'scrape_method': 'selenium',
'sel_browser': 'chrome',
}
try:
search = scrape_with_config(config)
except GoogleSearchError as e:
print(e)
# let's inspect what we got
for serp in search.serps:
print(serp)
print(serp.search_engine_name)
print(serp.scrape_method)
print(serp.page_number)
print(serp.requested_at)
print(serp.num_results)
# ... more attributes ...
for link in serp.links:
print(link)
| 741 | 21.484848 | 63 | py |
GoogleScraper | GoogleScraper-master/Examples/selenium_mode_example.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from GoogleScraper import scrape_with_config, GoogleSearchError
keywords = [
'alpha ',
'beta',
'yankee'
]
# See in the config.cfg file for possible values
config = {
'use_own_ip': True,
'keywords': keywords,
'search_engines': ['baidu', 'duckduckgo'],
'num_pages_for_keyword': 2,
'scrape_method': 'selenium',
'sel_browser': 'chrome',
}
try:
search = scrape_with_config(config)
except GoogleSearchError as e:
print(e)
for serp in search.serps:
print(serp)
for link in serp.links:
print(link) | 601 | 19.066667 | 63 | py |
GoogleScraper | GoogleScraper-master/Examples/normal-chrome.py | import time
from selenium import webdriver
from selenium.common.exceptions import TimeoutException, WebDriverException, ElementNotVisibleException, NoSuchElementException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
chromedriver = '/home/nikolai/projects/work/aws-lambda-scraping/bin/chromedriver'
binary = '/home/nikolai/projects/work/aws-lambda-scraping/bin/headless-chromium'
options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_argument('window-size=1200x600') # optional
options.add_argument('--no-sandbox')
options.add_argument('--disable-gpu')
options.add_argument('--user-data-dir=/tmp')
options.add_argument('--hide-scrollbars')
options.add_argument('--enable-logging')
options.add_argument('--log-level=0')
options.add_argument('--v=99')
options.add_argument('--single-process')
options.add_argument('--data-path=/tmp')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--homedir=/tmp')
options.add_argument('--disk-cache-dir=/tmp')
#options.add_argument('user-agent=bot')
options.binary_location = binary
browser = webdriver.Chrome(executable_path=chromedriver, options=options)
browser.get('https://www.google.com/search?num=100')
search_input = None
try:
search_input = WebDriverWait(browser, 5).until(
EC.visibility_of_element_located((By.NAME, 'q')))
print('Got a search input field.')
except TimeoutException:
print('No search input field located after 5 seconds.')
keyword = 'startup berlin'
search_input = browser.find_element_by_xpath('//*[@name="q"]')
try:
search_input.send_keys(keyword + Keys.ENTER)
print('Google search for "{}" successful! '.format(keyword))
except WebDriverException as e:
print('Cannot make a google search: {}'.format(e))
time.sleep(1)
try:
all_links = browser.find_elements_by_css_selector('#center_col .g')
except NoSuchElementException as e:
print('Cannot find serp page: {}'.format(e))
print('Found {} results'.format(len(all_links)))
# browser.save_screenshot('headless_chrome_test.png')
# browser.quit()
| 2,227 | 33.8125 | 127 | py |
crnn-pytorch | crnn-pytorch-master/alphabets.py | alphabet = """的
是
不
我
一
有
大
在
人
了
中
到
資
要
可
以
這
個
你
會
好
為
上
來
就
學
交
也
用
能
如
文
時
沒
說
他
看
提
那
問
生
過
下
請
天
們
所
多
麼
小
想
得
之
還
電
出
工
對
都
機
自
後
子
而
訊
站
去
心
只
家
知
國
台
很
信
成
章
何
同
道
地
發
法
無
然
但
嗎
當
於
本
現
年
前
真
最
和
新
因
果
定
意
情
點
題
其
事
方
清
科
樣
些
吧
三
此
位
理
行
作
經
者
什
謝
名
日
正
華
話
開
實
再
城
愛
與
二
動
比
高
面
又
車
力
或
種
像
應
女
教
分
手
打
已
次
長
太
明
己
路
起
相
主
關
鳳
間
呢
覺
該
十
外
凰
友
才
民
系
進
使
她
著
各
少
全
兩
回
加
將
感
第
性
球
式
把
被
老
公
龍
程
論
及
別
給
聽
水
重
體
做
校
裡
常
東
風
您
灣
啦
見
解
等
部
原
月
美
先
管
區
錯
音
否
啊
找
網
樂
讓
通
入
期
選
較
四
場
由
書
它
快
從
歡
數
表
怎
至
立
內
合
目
望
認
幾
社
告
更
版
度
考
喜
頭
難
光
買
今
身
許
弟
若
算
記
代
統
處
完
號
接
言
政
玩
師
字
並
男
計
誰
山
張
黨
每
且
結
改
非
星
連
哈
建
放
直
轉
報
活
設
變
指
氣
研
陳
試
西
五
希
取
神
化
物
王
戰
近
世
受
義
反
單
死
任
跟
便
空
林
士
臺
卻
北
隊
功
必
聲
寫
平
影
業
金
檔
片
討
色
容
央
妳
向
市
則
員
興
利
強
白
價
安
呵
特
思
叫
總
辦
保
花
議
傳
元
求
份
件
持
萬
未
究
決
投
哪
喔
笑
貓
組
獨
級
走
支
曾
標
流
竹
兄
阿
室
卡
馬
共
需
海
口
門
般
線
語
命
觀
視
朋
聯
參
格
黃
錢
修
失
兒
住
八
腦
板
吃
另
換
即
象
料
錄
拿
專
遠
速
基
幫
形
確
候
裝
孩
備
歌
界
除
南
器
畫
訴
差
講
類
英
案
帶
久
乎
掉
迷
量
引
整
似
耶
奇
制
邊
型
超
識
雖
怪
飛
始
品
運
賽
費
夢
故
班
權
破
驗
眼
滿
念
造
軍
精
務
留
服
六
圖
收
舍
半
讀
願
李
底
約
雄
課
答
令
深
票
達
演
早
賣
棒
夠
黑
院
假
曲
火
準
百
談
勝
碟
術
推
存
治
離
易
往
況
晚
示
證
段
導
傷
調
團
七
永
剛
哥
甚
德
殺
怕
包
列
概
照
夜
排
客
絕
軟
商
根
九
切
條
集
千
落
竟
越
待
忘
盡
據
雙
供
稱
座
值
消
產
紅
跑
嘛
園
附
硬
雲
遊
展
執
聞
唱
育
斯
某
技
唉
息
苦
質
油
救
效
須
介
首
助
職
例
熱
畢
節
害
擊
亂
態
嗯
寶
倒
注
停
古
輸
規
福
親
查
復
步
舉
魚
斷
終
輕
環
練
印
隨
依
趣
限
響
省
局
續
司
角
簡
極
幹
篇
羅
佛
克
陽
武
疑
送
拉
習
源
免
志
鳥
煩
足
館
仍
低
廣
土
呀
樓
壞
兵
顯
率
聖
碼
眾
爭
初
誤
楚
責
境
野
預
具
智
壓
係
青
貴
順
負
魔
適
哇
測
慢
懷
懂
史
配
嗚
味
亦
醫
迎
舞
戀
細
灌
甲
帝
句
屬
靈
評
騎
宜
敗
左
追
狂
敢
春
狗
際
遇
族
群
痛
右
康
佳
楊
木
病
戲
項
抓
徵
善
官
護
博
補
石
爾
營
歷
隻
按
妹
里
編
歲
擇
溫
守
血
領
尋
田
養
謂
居
異
雨
止
跳
君
爛
優
封
拜
惡
啥
浪
核
聊
急
狀
陸
激
模
攻
忙
良
劇
牛
壘
增
維
靜
陣
抱
勢
嚴
詞
亞
夫
簽
悲
密
幕
毒
廠
爽
緣
店
吳
蘭
睡
致
江
宿
翻
香
蠻
警
控
趙
冷
威
微
坐
週
宗
普
登
母
絡
午
恐
套
巴
雜
創
舊
輯
幸
劍
亮
述
堂
酒
麗
牌
仔
腳
突
搞
父
俊
暴
防
吉
禮
素
招
草
周
房
餐
慮
充
府
背
典
仁
漫
景
紹
諸
琴
憶
援
尤
缺
扁
罵
純
惜
授
皮
松
委
湖
誠
麻
置
靠
繼
判
益
波
姐
既
射
欲
刻
堆
釋
含
承
退
莫
劉
昨
旁
紀
趕
製
尚
藝
肉
律
鐵
奏
樹
毛
罪
筆
彩
註
歸
彈
虎
衛
刀
皆
鍵
售
塊
險
榮
播
施
銘
囉
漢
賞
欣
升
葉
螢
載
嘿
弄
鐘
付
寄
鬼
哦
燈
呆
洋
嘻
布
磁
薦
檢
派
構
媽
藍
貼
豬
策
紙
暗
巧
努
雷
架
享
宣
逢
均
擔
啟
濟
罷
呼
劃
偉
島
歉
郭
訓
穿
詳
沙
督
梅
顧
敵"""
| 2,017 | 1.015984 | 15 | py |
crnn-pytorch | crnn-pytorch-master/utils.py | #!/usr/bin/python
# encoding: utf-8
import torch
import torch.nn as nn
from torch.autograd import Variable
import collections
class strLabelConverter(object):
"""Convert between str and label.
NOTE:
Insert `blank` to the alphabet for CTC.
Args:
alphabet (str): set of the possible characters.
ignore_case (bool, default=True): whether or not to ignore all of the case.
"""
def __init__(self, alphabet, ignore_case=False):
self._ignore_case = ignore_case
if self._ignore_case:
alphabet = alphabet.lower()
self.alphabet = alphabet + '-' # for `-1` index
self.dict = {}
for i, char in enumerate(alphabet):
# NOTE: 0 is reserved for 'blank' required by wrap_ctc
self.dict[char] = i + 1
def encode(self, text):
"""Support batch or single str.
Args:
text (str or list of str): texts to convert.
Returns:
torch.LongTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.
torch.LongTensor [n]: length of each text.
"""
length = []
result = []
for item in text:
item = item.decode('utf-8','strict')
length.append(len(item))
r = []
for char in item:
index = self.dict[char]
# result.append(index)
r.append(index)
result.append(r)
max_len = 0
for r in result:
if len(r) > max_len:
max_len = len(r)
result_temp = []
for r in result:
for i in range(max_len - len(r)):
r.append(0)
result_temp.append(r)
text = result_temp
return (torch.LongTensor(text), torch.LongTensor(length))
def decode(self, t, length, raw=False):
"""Decode encoded texts back into strs.
Args:
torch.LongTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.
torch.LongTensor [n]: length of each text.
Raises:
AssertionError: when the texts and its length does not match.
Returns:
text (str or list of str): texts to convert.
"""
if length.numel() == 1:
length = length[0]
assert t.numel() == length, "text with length: {} does not match declared length: {}".format(t.numel(), length)
if raw:
return ''.join([self.alphabet[i - 1] for i in t])
else:
char_list = []
for i in range(length):
if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])):
char_list.append(self.alphabet[t[i] - 1])
return ''.join(char_list)
else:
# batch mode
assert t.numel() == length.sum(), "texts with length: {} does not match declared length: {}".format(t.numel(), length.sum())
texts = []
index = 0
for i in range(length.numel()):
l = length[i]
texts.append(
self.decode(
t[index:index + l], torch.LongTensor([l]), raw=raw))
index += l
return texts
class averager(object):
"""Compute average for `torch.Variable` and `torch.Tensor`. """
def __init__(self):
self.reset()
def add(self, v):
if isinstance(v, Variable):
count = v.data.numel()
v = v.data.sum()
elif isinstance(v, torch.Tensor):
count = v.numel()
v = v.sum()
self.n_count += count
self.sum += v
def reset(self):
self.n_count = 0
self.sum = 0
def val(self):
res = 0
if self.n_count != 0:
res = self.sum / float(self.n_count)
return res
def oneHot(v, v_length, nc):
batchSize = v_length.size(0)
maxLength = v_length.max()
v_onehot = torch.FloatTensor(batchSize, maxLength, nc).fill_(0)
acc = 0
for i in range(batchSize):
length = v_length[i]
label = v[acc:acc + length].view(-1, 1).long()
v_onehot[i, :length].scatter_(1, label, 1.0)
acc += length
return v_onehot
def loadData(v, data):
with torch.no_grad():
v.resize_(data.size()).copy_(data)
def prettyPrint(v):
print('Size {0}, Type: {1}'.format(str(v.size()), v.data.type()))
print('| Max: %f | Min: %f | Mean: %f' % (v.max().data[0], v.min().data[0],
v.mean().data[0]))
def assureRatio(img):
"""Ensure imgH <= imgW."""
b, c, h, w = img.size()
if h > w:
main = nn.UpsamplingBilinear2d(size=(h, h), scale_factor=None)
img = main(img)
return img
| 4,860 | 28.107784 | 136 | py |
crnn-pytorch | crnn-pytorch-master/dataset.py | #!/usr/bin/python
# encoding: utf-8
import random
import torch
from torch.utils.data import Dataset
from torch.utils.data import sampler
import torchvision.transforms as transforms
import lmdb
import six
import sys
from PIL import Image
import numpy as np
class lmdbDataset(Dataset):
def __init__(self, root=None, transform=None, target_transform=None):
self.env = lmdb.open(
root,
max_readers=1,
readonly=True,
lock=False,
readahead=False,
meminit=False)
if not self.env:
print('cannot creat lmdb from %s' % (root))
sys.exit(0)
with self.env.begin(write=False) as txn:
nSamples = int(txn.get('num-samples'.encode('utf-8')))
self.nSamples = nSamples
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return self.nSamples
def __getitem__(self, index):
assert index <= len(self), 'index range error'
index += 1
with self.env.begin(write=False) as txn:
img_key = 'image-%09d' % index
imgbuf = txn.get(img_key.encode('utf-8'))
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
try:
img = Image.open(buf).convert('L')
except IOError:
print('Corrupted image for %d' % index)
return self[index + 1]
if self.transform is not None:
img = self.transform(img)
label_key = 'label-%09d' % index
label = txn.get(label_key.encode('utf-8'))
if self.target_transform is not None:
label = self.target_transform(label)
return (img, label)
class resizeNormalize(object):
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
self.toTensor = transforms.ToTensor()
def __call__(self, img):
img = img.resize(self.size, self.interpolation)
img = self.toTensor(img)
img.sub_(0.5).div_(0.5)
return img
class randomSequentialSampler(sampler.Sampler):
def __init__(self, data_source, batch_size):
self.num_samples = len(data_source)
self.batch_size = batch_size
def __iter__(self):
n_batch = len(self) // self.batch_size
tail = len(self) % self.batch_size
index = torch.LongTensor(len(self)).fill_(0)
for i in range(n_batch):
random_start = random.randint(0, len(self) - self.batch_size)
batch_index = random_start + torch.range(0, self.batch_size - 1)
index[i * self.batch_size:(i + 1) * self.batch_size] = batch_index
# deal with tail
if tail:
random_start = random.randint(0, len(self) - self.batch_size)
tail_index = random_start + torch.range(0, tail - 1)
index[(i + 1) * self.batch_size:] = tail_index
return iter(index)
def __len__(self):
return self.num_samples
class alignCollate(object):
def __init__(self, imgH=32, imgW=100, keep_ratio=False, min_ratio=1):
self.imgH = imgH
self.imgW = imgW
self.keep_ratio = keep_ratio
self.min_ratio = min_ratio
def __call__(self, batch):
images, labels = zip(*batch)
imgH = self.imgH
imgW = self.imgW
if self.keep_ratio:
ratios = []
for image in images:
w, h = image.size
ratios.append(w / float(h))
ratios.sort()
max_ratio = ratios[-1]
imgW = int(np.floor(max_ratio * imgH))
imgW = max(imgH * self.min_ratio, imgW) # assure imgH >= imgW
transform = resizeNormalize((imgW, imgH))
images = [transform(image) for image in images]
images = torch.cat([t.unsqueeze(0) for t in images], 0)
return images, labels
| 4,008 | 28.262774 | 78 | py |
crnn-pytorch | crnn-pytorch-master/demo.py | import torch
from torch.autograd import Variable
import utils
import dataset
from PIL import Image
import models.crnn as crnn
import params
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model_path', type = str, required = True, help = 'crnn model path')
parser.add_argument('-i', '--image_path', type = str, required = True, help = 'demo image path')
args = parser.parse_args()
model_path = args.model_path
image_path = args.image_path
# net init
nclass = len(params.alphabet) + 1
model = crnn.CRNN(params.imgH, params.nc, nclass, params.nh)
if torch.cuda.is_available():
model = model.cuda()
# load model
print('loading pretrained model from %s' % model_path)
if params.multi_gpu:
model = torch.nn.DataParallel(model)
model.load_state_dict(torch.load(model_path))
converter = utils.strLabelConverter(params.alphabet)
transformer = dataset.resizeNormalize((100, 32))
image = Image.open(image_path).convert('L')
image = transformer(image)
if torch.cuda.is_available():
image = image.cuda()
image = image.view(1, *image.size())
image = Variable(image)
model.eval()
preds = model(image)
_, preds = preds.max(2)
preds = preds.transpose(1, 0).contiguous().view(-1)
preds_size = Variable(torch.LongTensor([preds.size(0)]))
raw_pred = converter.decode(preds.data, preds_size.data, raw=True)
sim_pred = converter.decode(preds.data, preds_size.data, raw=False)
print('%-20s => %-20s' % (raw_pred, sim_pred))
| 1,455 | 27.54902 | 96 | py |
crnn-pytorch | crnn-pytorch-master/params.py | import alphabets
# about data and net
alphabet = alphabets.alphabet
keep_ratio = False # whether to keep ratio for image resize
manualSeed = 1234 # reproduce experiemnt
random_sample = True # whether to sample the dataset with random sampler
imgH = 32 # the height of the input image to network
imgW = 100 # the width of the input image to network
nh = 256 # size of the lstm hidden state
nc = 1
pretrained = '' # path to pretrained model (to continue training)
expr_dir = 'expr' # where to store samples and models
dealwith_lossnan = False # whether to replace all nan/inf in gradients to zero
# hardware
cuda = True # enables cuda
multi_gpu = False # whether to use multi gpu
ngpu = 1 # number of GPUs to use. Do remember to set multi_gpu to True!
workers = 0 # number of data loading workers
# training process
displayInterval = 100 # interval to be print the train loss
valInterval = 1000 # interval to val the model loss and accuray
saveInterval = 1000 # interval to save model
n_val_disp = 10 # number of samples to display when val the model
# finetune
nepoch = 1000 # number of epochs to train for
batchSize = 64 # input batch size
lr = 0.0001 # learning rate for Critic, not used by adadealta
beta1 = 0.5 # beta1 for adam. default=0.5
adam = False # whether to use adam (default is rmsprop)
adadelta = False # whether to use adadelta (default is rmsprop)
| 1,368 | 38.114286 | 78 | py |
crnn-pytorch | crnn-pytorch-master/train.py | from __future__ import print_function
from __future__ import division
import argparse
import random
import torch
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
from torch.autograd import Variable
import numpy as np
# from warpctc_pytorch import CTCLoss
from torch.nn import CTCLoss
import os
import utils
import dataset
import models.crnn as net
import params
parser = argparse.ArgumentParser()
parser.add_argument('-train', '--trainroot', required=True, help='path to train dataset')
parser.add_argument('-val', '--valroot', required=True, help='path to val dataset')
args = parser.parse_args()
if not os.path.exists(params.expr_dir):
os.makedirs(params.expr_dir)
# ensure everytime the random is the same
random.seed(params.manualSeed)
np.random.seed(params.manualSeed)
torch.manual_seed(params.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not params.cuda:
print("WARNING: You have a CUDA device, so you should probably set cuda in params.py to True")
# -----------------------------------------------
"""
In this block
Get train and val data_loader
"""
def data_loader():
# train
train_dataset = dataset.lmdbDataset(root=args.trainroot)
assert train_dataset
if not params.random_sample:
sampler = dataset.randomSequentialSampler(train_dataset, params.batchSize)
else:
sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=params.batchSize, \
shuffle=True, sampler=sampler, num_workers=int(params.workers), \
collate_fn=dataset.alignCollate(imgH=params.imgH, imgW=params.imgW, keep_ratio=params.keep_ratio))
# val
val_dataset = dataset.lmdbDataset(root=args.valroot, transform=dataset.resizeNormalize((params.imgW, params.imgH)))
assert val_dataset
val_loader = torch.utils.data.DataLoader(val_dataset, shuffle=True, batch_size=params.batchSize, num_workers=int(params.workers))
return train_loader, val_loader
train_loader, val_loader = data_loader()
# -----------------------------------------------
"""
In this block
Net init
Weight init
Load pretrained model
"""
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def net_init():
nclass = len(params.alphabet) + 1
crnn = net.CRNN(params.imgH, params.nc, nclass, params.nh)
crnn.apply(weights_init)
if params.pretrained != '':
print('loading pretrained model from %s' % params.pretrained)
if params.multi_gpu:
crnn = torch.nn.DataParallel(crnn)
crnn.load_state_dict(torch.load(params.pretrained))
return crnn
crnn = net_init()
print(crnn)
# -----------------------------------------------
"""
In this block
Init some utils defined in utils.py
"""
# Compute average for `torch.Variable` and `torch.Tensor`.
loss_avg = utils.averager()
# Convert between str and label.
converter = utils.strLabelConverter(params.alphabet)
# -----------------------------------------------
"""
In this block
criterion define
"""
criterion = CTCLoss()
# -----------------------------------------------
"""
In this block
Init some tensor
Put tensor and net on cuda
NOTE:
image, text, length is used by both val and train
becaues train and val will never use it at the same time.
"""
image = torch.FloatTensor(params.batchSize, 3, params.imgH, params.imgH)
text = torch.LongTensor(params.batchSize * 5)
length = torch.LongTensor(params.batchSize)
if params.cuda and torch.cuda.is_available():
criterion = criterion.cuda()
image = image.cuda()
text = text.cuda()
crnn = crnn.cuda()
if params.multi_gpu:
crnn = torch.nn.DataParallel(crnn, device_ids=range(params.ngpu))
image = Variable(image)
text = Variable(text)
length = Variable(length)
# -----------------------------------------------
"""
In this block
Setup optimizer
"""
if params.adam:
optimizer = optim.Adam(crnn.parameters(), lr=params.lr, betas=(params.beta1, 0.999))
elif params.adadelta:
optimizer = optim.Adadelta(crnn.parameters())
else:
optimizer = optim.RMSprop(crnn.parameters(), lr=params.lr)
# -----------------------------------------------
"""
In this block
Dealwith lossnan
NOTE:
I use different way to dealwith loss nan according to the torch version.
"""
if params.dealwith_lossnan:
if torch.__version__ >= '1.1.0':
"""
zero_infinity (bool, optional):
Whether to zero infinite losses and the associated gradients.
Default: ``False``
Infinite losses mainly occur when the inputs are too short
to be aligned to the targets.
Pytorch add this param after v1.1.0
"""
criterion = CTCLoss(zero_infinity = True)
else:
"""
only when
torch.__version__ < '1.1.0'
we use this way to change the inf to zero
"""
crnn.register_backward_hook(crnn.backward_hook)
# -----------------------------------------------
def val(net, criterion):
print('Start val')
for p in crnn.parameters():
p.requires_grad = False
net.eval()
val_iter = iter(val_loader)
i = 0
n_correct = 0
loss_avg = utils.averager() # The blobal loss_avg is used by train
max_iter = len(val_loader)
for i in range(max_iter):
data = val_iter.next()
i += 1
cpu_images, cpu_texts = data
batch_size = cpu_images.size(0)
utils.loadData(image, cpu_images)
t, l = converter.encode(cpu_texts)
utils.loadData(text, t)
utils.loadData(length, l)
preds = crnn(image)
preds_size = Variable(torch.LongTensor([preds.size(0)] * batch_size))
cost = criterion(preds, text, preds_size, length) / batch_size
loss_avg.add(cost)
_, preds = preds.max(2)
preds = preds.transpose(1, 0).contiguous().view(-1)
sim_preds = converter.decode(preds.data, preds_size.data, raw=False)
cpu_texts_decode = []
for i in cpu_texts:
cpu_texts_decode.append(i.decode('utf-8', 'strict'))
for pred, target in zip(sim_preds, cpu_texts_decode):
if pred == target:
n_correct += 1
raw_preds = converter.decode(preds.data, preds_size.data, raw=True)[:params.n_val_disp]
for raw_pred, pred, gt in zip(raw_preds, sim_preds, cpu_texts_decode):
print('%-20s => %-20s, gt: %-20s' % (raw_pred, pred, gt))
accuracy = n_correct / float(max_iter * params.batchSize)
print('Val loss: %f, accuray: %f' % (loss_avg.val(), accuracy))
def train(net, criterion, optimizer, train_iter):
for p in crnn.parameters():
p.requires_grad = True
crnn.train()
data = train_iter.next()
cpu_images, cpu_texts = data
batch_size = cpu_images.size(0)
utils.loadData(image, cpu_images)
t, l = converter.encode(cpu_texts)
utils.loadData(text, t)
utils.loadData(length, l)
optimizer.zero_grad()
preds = crnn(image)
preds_size = Variable(torch.LongTensor([preds.size(0)] * batch_size))
cost = criterion(preds, text, preds_size, length) / batch_size
# crnn.zero_grad()
cost.backward()
optimizer.step()
return cost
if __name__ == "__main__":
for epoch in range(params.nepoch):
train_iter = iter(train_loader)
i = 0
while i < len(train_loader):
cost = train(crnn, criterion, optimizer, train_iter)
loss_avg.add(cost)
i += 1
if i % params.displayInterval == 0:
print('[%d/%d][%d/%d] Loss: %f' %
(epoch, params.nepoch, i, len(train_loader), loss_avg.val()))
loss_avg.reset()
if i % params.valInterval == 0:
val(crnn, criterion)
# do checkpointing
if i % params.saveInterval == 0:
torch.save(crnn.state_dict(), '{0}/netCRNN_{1}_{2}.pth'.format(params.expr_dir, epoch, i))
| 8,227 | 29.587361 | 133 | py |
crnn-pytorch | crnn-pytorch-master/models/crnn.py | import torch.nn as nn
import params
import torch.nn.functional as F
class BidirectionalLSTM(nn.Module):
def __init__(self, nIn, nHidden, nOut):
super(BidirectionalLSTM, self).__init__()
self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True)
self.embedding = nn.Linear(nHidden * 2, nOut)
def forward(self, input):
recurrent, _ = self.rnn(input)
T, b, h = recurrent.size()
t_rec = recurrent.view(T * b, h)
output = self.embedding(t_rec) # [T * b, nOut]
output = output.view(T, b, -1)
return output
class CRNN(nn.Module):
def __init__(self, imgH, nc, nclass, nh, n_rnn=2, leakyRelu=False):
super(CRNN, self).__init__()
assert imgH % 16 == 0, 'imgH has to be a multiple of 16'
ks = [3, 3, 3, 3, 3, 3, 2]
ps = [1, 1, 1, 1, 1, 1, 0]
ss = [1, 1, 1, 1, 1, 1, 1]
nm = [64, 128, 256, 256, 512, 512, 512]
cnn = nn.Sequential()
def convRelu(i, batchNormalization=False):
nIn = nc if i == 0 else nm[i - 1]
nOut = nm[i]
cnn.add_module('conv{0}'.format(i),
nn.Conv2d(nIn, nOut, ks[i], ss[i], ps[i]))
if batchNormalization:
cnn.add_module('batchnorm{0}'.format(i), nn.BatchNorm2d(nOut))
if leakyRelu:
cnn.add_module('relu{0}'.format(i),
nn.LeakyReLU(0.2, inplace=True))
else:
cnn.add_module('relu{0}'.format(i), nn.ReLU(True))
convRelu(0)
cnn.add_module('pooling{0}'.format(0), nn.MaxPool2d(2, 2)) # 64x16x64
convRelu(1)
cnn.add_module('pooling{0}'.format(1), nn.MaxPool2d(2, 2)) # 128x8x32
convRelu(2, True)
convRelu(3)
cnn.add_module('pooling{0}'.format(2),
nn.MaxPool2d((2, 2), (2, 1), (0, 1))) # 256x4x16
convRelu(4, True)
convRelu(5)
cnn.add_module('pooling{0}'.format(3),
nn.MaxPool2d((2, 2), (2, 1), (0, 1))) # 512x2x16
convRelu(6, True) # 512x1x16
self.cnn = cnn
self.rnn = nn.Sequential(
BidirectionalLSTM(512, nh, nh),
BidirectionalLSTM(nh, nh, nclass))
def forward(self, input):
# conv features
conv = self.cnn(input)
b, c, h, w = conv.size()
assert h == 1, "the height of conv must be 1"
conv = conv.squeeze(2)
conv = conv.permute(2, 0, 1) # [w, b, c]
# rnn features
output = self.rnn(conv)
# add log_softmax to converge output
output = F.log_softmax(output, dim=2)
return output
def backward_hook(self, module, grad_input, grad_output):
for g in grad_input:
g[g != g] = 0 # replace all nan/inf in gradients to zero
| 2,865 | 30.494505 | 78 | py |
crnn-pytorch | crnn-pytorch-master/models/__init__.py | 0 | 0 | 0 | py | |
crnn-pytorch | crnn-pytorch-master/tool/convert_t7.py | import torchfile
import argparse
import torch
from torch.nn.parameter import Parameter
import numpy as np
import models.crnn as crnn
layer_map = {
'SpatialConvolution': 'Conv2d',
'SpatialBatchNormalization': 'BatchNorm2d',
'ReLU': 'ReLU',
'SpatialMaxPooling': 'MaxPool2d',
'SpatialAveragePooling': 'AvgPool2d',
'SpatialUpSamplingNearest': 'UpsamplingNearest2d',
'View': None,
'Linear': 'linear',
'Dropout': 'Dropout',
'SoftMax': 'Softmax',
'Identity': None,
'SpatialFullConvolution': 'ConvTranspose2d',
'SpatialReplicationPadding': None,
'SpatialReflectionPadding': None,
'Copy': None,
'Narrow': None,
'SpatialCrossMapLRN': None,
'Sequential': None,
'ConcatTable': None, # output is list
'CAddTable': None, # input is list
'Concat': None,
'TorchObject': None,
'LstmLayer': 'LSTM',
'BiRnnJoin': 'Linear'
}
def torch_layer_serial(layer, layers):
name = layer[0]
if name == 'nn.Sequential' or name == 'nn.ConcatTable':
tmp_layers = []
for sub_layer in layer[1]:
torch_layer_serial(sub_layer, tmp_layers)
layers.extend(tmp_layers)
else:
layers.append(layer)
def py_layer_serial(layer, layers):
"""
Assume modules are defined as executive sequence.
"""
if len(layer._modules) >= 1:
tmp_layers = []
for sub_layer in layer.children():
py_layer_serial(sub_layer, tmp_layers)
layers.extend(tmp_layers)
else:
layers.append(layer)
def trans_pos(param, part_indexes, dim=0):
parts = np.split(param, len(part_indexes), dim)
new_parts = []
for i in part_indexes:
new_parts.append(parts[i])
return np.concatenate(new_parts, dim)
def load_params(py_layer, t7_layer):
if type(py_layer).__name__ == 'LSTM':
# LSTM
all_weights = []
num_directions = 2 if py_layer.bidirectional else 1
for i in range(py_layer.num_layers):
for j in range(num_directions):
suffix = '_reverse' if j == 1 else ''
weights = ['weight_ih_l{}{}', 'bias_ih_l{}{}',
'weight_hh_l{}{}', 'bias_hh_l{}{}']
weights = [x.format(i, suffix) for x in weights]
all_weights += weights
params = []
for i in range(len(t7_layer)):
params.extend(t7_layer[i][1])
params = [trans_pos(p, [0, 1, 3, 2], dim=0) for p in params]
else:
all_weights = []
name = t7_layer[0].split('.')[-1]
if name == 'BiRnnJoin':
weight_0, bias_0, weight_1, bias_1 = t7_layer[1]
weight = np.concatenate((weight_0, weight_1), axis=1)
bias = bias_0 + bias_1
t7_layer[1] = [weight, bias]
all_weights += ['weight', 'bias']
elif name == 'SpatialConvolution' or name == 'Linear':
all_weights += ['weight', 'bias']
elif name == 'SpatialBatchNormalization':
all_weights += ['weight', 'bias', 'running_mean', 'running_var']
params = t7_layer[1]
params = [torch.from_numpy(item) for item in params]
assert len(all_weights) == len(params), "params' number not match"
for py_param_name, t7_param in zip(all_weights, params):
item = getattr(py_layer, py_param_name)
if isinstance(item, Parameter):
item = item.data
try:
item.copy_(t7_param)
except RuntimeError:
print('Size not match between %s and %s' %
(item.size(), t7_param.size()))
def torch_to_pytorch(model, t7_file, output):
py_layers = []
for layer in list(model.children()):
py_layer_serial(layer, py_layers)
t7_data = torchfile.load(t7_file)
t7_layers = []
for layer in t7_data:
torch_layer_serial(layer, t7_layers)
j = 0
for i, py_layer in enumerate(py_layers):
py_name = type(py_layer).__name__
t7_layer = t7_layers[j]
t7_name = t7_layer[0].split('.')[-1]
if layer_map[t7_name] != py_name:
raise RuntimeError('%s does not match %s' % (py_name, t7_name))
if py_name == 'LSTM':
n_layer = 2 if py_layer.bidirectional else 1
n_layer *= py_layer.num_layers
t7_layer = t7_layers[j:j + n_layer]
j += n_layer
else:
j += 1
load_params(py_layer, t7_layer)
torch.save(model.state_dict(), output)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Convert torch t7 model to pytorch'
)
parser.add_argument(
'--model_file',
'-m',
type=str,
required=True,
help='torch model file in t7 format'
)
parser.add_argument(
'--output',
'-o',
type=str,
default=None,
help='output file name prefix, xxx.py xxx.pth'
)
args = parser.parse_args()
py_model = crnn.CRNN(32, 1, 37, 256, 1)
torch_to_pytorch(py_model, args.model_file, args.output)
| 5,075 | 29.214286 | 76 | py |
crnn-pytorch | crnn-pytorch-master/tool/create_dataset.py | import os
import lmdb
import cv2
import numpy as np
import argparse
import shutil
import sys
def checkImageIsValid(imageBin):
if imageBin is None:
return False
try:
imageBuf = np.fromstring(imageBin, dtype=np.uint8)
img = cv2.imdecode(imageBuf, cv2.IMREAD_GRAYSCALE)
imgH, imgW = img.shape[0], img.shape[1]
except:
return False
else:
if imgH * imgW == 0:
return False
return True
def writeCache(env, cache):
with env.begin(write=True) as txn:
for k, v in cache.items():
if type(k) == str:
k = k.encode()
if type(v) == str:
v = v.encode()
txn.put(k,v)
def createDataset(outputPath, imagePathList, labelList, lexiconList=None, checkValid=True):
"""
Create LMDB dataset for CRNN training.
ARGS:
outputPath : LMDB output path
imagePathList : list of image path
labelList : list of corresponding groundtruth texts
lexiconList : (optional) list of lexicon lists
checkValid : if true, check the validity of every image
"""
# If lmdb file already exists, remove it. Or the new data will add to it.
if os.path.exists(outputPath):
shutil.rmtree(outputPath)
os.makedirs(outputPath)
else:
os.makedirs(outputPath)
assert (len(imagePathList) == len(labelList))
nSamples = len(imagePathList)
env = lmdb.open(outputPath, map_size=1099511627776)
cache = {}
cnt = 1
for i in range(nSamples):
imagePath = imagePathList[i]
label = labelList[i]
if not os.path.exists(imagePath):
print('%s does not exist' % imagePath)
continue
with open(imagePath, 'rb') as f:
imageBin = f.read()
if checkValid:
if not checkImageIsValid(imageBin):
print('%s is not a valid image' % imagePath)
continue
imageKey = 'image-%09d' % cnt
labelKey = 'label-%09d' % cnt
cache[imageKey] = imageBin
cache[labelKey] = label
if lexiconList:
lexiconKey = 'lexicon-%09d' % cnt
cache[lexiconKey] = ' '.join(lexiconList[i])
if cnt % 1000 == 0:
writeCache(env, cache)
cache = {}
print('Written %d / %d' % (cnt, nSamples))
cnt += 1
nSamples = cnt-1
cache['num-samples'] = str(nSamples)
writeCache(env, cache)
env.close()
print('Created dataset with %d samples' % nSamples)
def read_data_from_folder(folder_path):
image_path_list = []
label_list = []
pics = os.listdir(folder_path)
pics.sort(key = lambda i: len(i))
for pic in pics:
image_path_list.append(folder_path + '/' + pic)
label_list.append(pic.split('_')[0])
return image_path_list, label_list
def read_data_from_file(file_path):
image_path_list = []
label_list = []
f = open(file_path)
while True:
line1 = f.readline()
line2 = f.readline()
if not line1 or not line2:
break
line1 = line1.replace('\r', '').replace('\n', '')
line2 = line2.replace('\r', '').replace('\n', '')
image_path_list.append(line1)
label_list.append(line2)
return image_path_list, label_list
def show_demo(demo_number, image_path_list, label_list):
print ('\nShow some demo to prevent creating wrong lmdb data')
print ('The first line is the path to image and the second line is the image label')
for i in range(demo_number):
print ('image: %s\nlabel: %s\n' % (image_path_list[i], label_list[i]))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--out', type = str, required = True, help = 'lmdb data output path')
parser.add_argument('--folder', type = str, help = 'path to folder which contains the images')
parser.add_argument('--file', type = str, help = 'path to file which contains the image path and label')
args = parser.parse_args()
if args.file is not None:
image_path_list, label_list = read_data_from_file(args.file)
createDataset(args.out, image_path_list, label_list)
show_demo(2, image_path_list, label_list)
elif args.folder is not None:
image_path_list, label_list = read_data_from_folder(args.folder)
createDataset(args.out, image_path_list, label_list)
show_demo(2, image_path_list, label_list)
else:
print ('Please use --floder or --file to assign the input. Use -h to see more.')
sys.exit()
| 4,645 | 32.185714 | 108 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/parser.py |
import os
import torch
import argparse
def parse_arguments():
parser = argparse.ArgumentParser(description="Benchmarking Visual Geolocalization",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Training parameters
parser.add_argument("--train_batch_size", type=int, default=4,
help="Number of triplets (query, pos, negs) in a batch. Each triplet consists of 12 images")
parser.add_argument("--infer_batch_size", type=int, default=16,
help="Batch size for inference (caching and testing)")
parser.add_argument("--criterion", type=str, default='triplet', help='loss to be used',
choices=["triplet", "sare_ind", "sare_joint"])
parser.add_argument("--margin", type=float, default=0.1,
help="margin for the triplet loss")
parser.add_argument("--epochs_num", type=int, default=1000,
help="number of epochs to train for")
parser.add_argument("--patience", type=int, default=3)
parser.add_argument("--lr", type=float, default=0.00001, help="_")
parser.add_argument("--lr_crn_layer", type=float, default=5e-3, help="Learning rate for the CRN layer")
parser.add_argument("--lr_crn_net", type=float, default=5e-4, help="Learning rate to finetune pretrained network when using CRN")
parser.add_argument("--optim", type=str, default="adam", help="_", choices=["adam", "sgd"])
parser.add_argument("--cache_refresh_rate", type=int, default=1000,
help="How often to refresh cache, in number of queries")
parser.add_argument("--queries_per_epoch", type=int, default=5000,
help="How many queries to consider for one epoch. Must be multiple of cache_refresh_rate")
parser.add_argument("--negs_num_per_query", type=int, default=10,
help="How many negatives to consider per each query in the loss")
parser.add_argument("--neg_samples_num", type=int, default=1000,
help="How many negatives to use to compute the hardest ones")
parser.add_argument("--mining", type=str, default="partial", choices=["partial", "full", "random", "msls_weighted"])
# Model parameters
parser.add_argument("--backbone", type=str, default="resnet18conv4",
choices=["alexnet", "vgg16", "resnet18conv4", "resnet18conv5",
"resnet50conv4", "resnet50conv5", "resnet101conv4", "resnet101conv5",
"cct384", "vit"], help="_")
parser.add_argument("--l2", type=str, default="before_pool", choices=["before_pool", "after_pool", "none"],
help="When (and if) to apply the l2 norm with shallow aggregation layers")
parser.add_argument("--aggregation", type=str, default="netvlad", choices=["netvlad", "gem", "spoc", "mac", "rmac", "crn", "rrm",
"cls", "seqpool"])
parser.add_argument('--netvlad_clusters', type=int, default=64, help="Number of clusters for NetVLAD layer.")
parser.add_argument('--pca_dim', type=int, default=None, help="PCA dimension (number of principal components). If None, PCA is not used.")
parser.add_argument('--fc_output_dim', type=int, default=None,
help="Output dimension of fully connected layer. If None, don't use a fully connected layer.")
parser.add_argument('--pretrain', type=str, default="imagenet", choices=['imagenet', 'gldv2', 'places'],
help="Select the pretrained weights for the starting network")
parser.add_argument("--off_the_shelf", type=str, default="imagenet", choices=["imagenet", "radenovic_sfm", "radenovic_gldv1", "naver"],
help="Off-the-shelf networks from popular GitHub repos. Only with ResNet-50/101 + GeM + FC 2048")
parser.add_argument("--trunc_te", type=int, default=None, choices=list(range(0, 14)))
parser.add_argument("--freeze_te", type=int, default=None, choices=list(range(-1, 14)))
# Initialization parameters
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--resume", type=str, default=None,
help="Path to load checkpoint from, for resuming training or testing.")
# Other parameters
parser.add_argument("--device", type=str, default="cuda", choices=["cuda", "cpu"])
parser.add_argument("--num_workers", type=int, default=8, help="num_workers for all dataloaders")
parser.add_argument('--resize', type=int, default=[480, 640], nargs=2, help="Resizing shape for images (HxW).")
parser.add_argument('--test_method', type=str, default="hard_resize",
choices=["hard_resize", "single_query", "central_crop", "five_crops", "nearest_crop", "maj_voting"],
help="This includes pre/post-processing methods and prediction refinement")
parser.add_argument("--majority_weight", type=float, default=0.01,
help="only for majority voting, scale factor, the higher it is the more importance is given to agreement")
parser.add_argument("--efficient_ram_testing", action='store_true', help="_")
parser.add_argument("--val_positive_dist_threshold", type=int, default=25, help="_")
parser.add_argument("--train_positives_dist_threshold", type=int, default=10, help="_")
parser.add_argument('--recall_values', type=int, default=[1, 5, 10, 20], nargs="+",
help="Recalls to be computed, such as R@5.")
# Data augmentation parameters
parser.add_argument("--brightness", type=float, default=0, help="_")
parser.add_argument("--contrast", type=float, default=0, help="_")
parser.add_argument("--saturation", type=float, default=0, help="_")
parser.add_argument("--hue", type=float, default=0, help="_")
parser.add_argument("--rand_perspective", type=float, default=0, help="_")
parser.add_argument("--horizontal_flip", action='store_true', help="_")
parser.add_argument("--random_resized_crop", type=float, default=0, help="_")
parser.add_argument("--random_rotation", type=float, default=0, help="_")
# Paths parameters
parser.add_argument("--datasets_folder", type=str, default=None, help="Path with all datasets")
parser.add_argument("--dataset_name", type=str, default="pitts30k", help="Relative path of the dataset")
parser.add_argument("--pca_dataset_folder", type=str, default=None,
help="Path with images to be used to compute PCA (ie: pitts30k/images/train")
parser.add_argument("--save_dir", type=str, default="default",
help="Folder name of the current run (saved in ./logs/)")
args = parser.parse_args()
if args.datasets_folder is None:
try:
args.datasets_folder = os.environ['DATASETS_FOLDER']
except KeyError:
raise Exception("You should set the parameter --datasets_folder or export " +
"the DATASETS_FOLDER environment variable as such \n" +
"export DATASETS_FOLDER=../datasets_vg/datasets")
if args.aggregation == "crn" and args.resume is None:
raise ValueError("CRN must be resumed from a trained NetVLAD checkpoint, but you set resume=None.")
if args.queries_per_epoch % args.cache_refresh_rate != 0:
raise ValueError("Ensure that queries_per_epoch is divisible by cache_refresh_rate, " +
f"because {args.queries_per_epoch} is not divisible by {args.cache_refresh_rate}")
if torch.cuda.device_count() >= 2 and args.criterion in ['sare_joint', "sare_ind"]:
raise NotImplementedError("SARE losses are not implemented for multiple GPUs, " +
f"but you're using {torch.cuda.device_count()} GPUs and {args.criterion} loss.")
if args.mining == "msls_weighted" and args.dataset_name != "msls":
raise ValueError("msls_weighted mining can only be applied to msls dataset, but you're using it on {args.dataset_name}")
if args.off_the_shelf in ["radenovic_sfm", "radenovic_gldv1", "naver"]:
if args.backbone not in ["resnet50conv5", "resnet101conv5"] or args.aggregation != "gem" or args.fc_output_dim != 2048:
raise ValueError("Off-the-shelf models are trained only with ResNet-50/101 + GeM + FC 2048")
if args.pca_dim is not None and args.pca_dataset_folder is None:
raise ValueError("Please specify --pca_dataset_folder when using pca")
if args.backbone == "vit":
if args.resize != [224, 224] and args.resize != [384, 384]:
raise ValueError(f'Image size for ViT must be either 224 or 384 {args.resize}')
if args.backbone == "cct384":
if args.resize != [384, 384]:
raise ValueError(f'Image size for CCT384 must be 384, but it is {args.resize}')
if args.backbone in ["alexnet", "vgg16", "resnet18conv4", "resnet18conv5",
"resnet50conv4", "resnet50conv5", "resnet101conv4", "resnet101conv5"]:
if args.aggregation in ["cls", "seqpool"]:
raise ValueError(f"CNNs like {args.backbone} can't work with aggregation {args.aggregation}")
if args.backbone in ["cct384"]:
if args.aggregation in ["spoc", "mac", "rmac", "crn", "rrm"]:
raise ValueError(f"CCT can't work with aggregation {args.aggregation}. Please use one among [netvlad, gem, cls, seqpool]")
if args.backbone == "vit":
if args.aggregation not in ["cls", "gem", "netvlad"]:
raise ValueError(f"ViT can't work with aggregation {args.aggregation}. Please use one among [netvlad, gem, cls]")
return args
| 9,823 | 70.188406 | 142 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/test.py |
import faiss
import torch
import logging
import numpy as np
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Subset
def test_efficient_ram_usage(args, eval_ds, model, test_method="hard_resize"):
"""This function gives the same output as test(), but uses much less RAM.
This can be useful when testing with large descriptors (e.g. NetVLAD) on large datasets (e.g. San Francisco).
Obviously it is slower than test(), and can't be used with PCA.
"""
model = model.eval()
if test_method == 'nearest_crop' or test_method == "maj_voting":
distances = np.empty([eval_ds.queries_num * 5, eval_ds.database_num], dtype=np.float32)
else:
distances = np.empty([eval_ds.queries_num, eval_ds.database_num], dtype=np.float32)
with torch.no_grad():
if test_method == 'nearest_crop' or test_method == 'maj_voting':
queries_features = np.ones((eval_ds.queries_num * 5, args.features_dim), dtype="float32")
else:
queries_features = np.ones((eval_ds.queries_num, args.features_dim), dtype="float32")
logging.debug("Extracting queries features for evaluation/testing")
queries_infer_batch_size = 1 if test_method == "single_query" else args.infer_batch_size
eval_ds.test_method = test_method
queries_subset_ds = Subset(eval_ds, list(range(eval_ds.database_num, eval_ds.database_num+eval_ds.queries_num)))
queries_dataloader = DataLoader(dataset=queries_subset_ds, num_workers=args.num_workers,
batch_size=queries_infer_batch_size, pin_memory=(args.device == "cuda"))
for inputs, indices in tqdm(queries_dataloader, ncols=100):
if test_method == "five_crops" or test_method == "nearest_crop" or test_method == 'maj_voting':
inputs = torch.cat(tuple(inputs)) # shape = 5*bs x 3 x 480 x 480
features = model(inputs.to(args.device))
if test_method == "five_crops": # Compute mean along the 5 crops
features = torch.stack(torch.split(features, 5)).mean(1)
if test_method == "nearest_crop" or test_method == 'maj_voting':
start_idx = (indices[0] - eval_ds.database_num) * 5
end_idx = start_idx + indices.shape[0] * 5
indices = np.arange(start_idx, end_idx)
queries_features[indices, :] = features.cpu().numpy()
else:
queries_features[indices.numpy()-eval_ds.database_num, :] = features.cpu().numpy()
queries_features = torch.tensor(queries_features).type(torch.float32).cuda()
logging.debug("Extracting database features for evaluation/testing")
# For database use "hard_resize", although it usually has no effect because database images have same resolution
eval_ds.test_method = "hard_resize"
database_subset_ds = Subset(eval_ds, list(range(eval_ds.database_num)))
database_dataloader = DataLoader(dataset=database_subset_ds, num_workers=args.num_workers,
batch_size=args.infer_batch_size, pin_memory=(args.device == "cuda"))
for inputs, indices in tqdm(database_dataloader, ncols=100):
inputs = inputs.to(args.device)
features = model(inputs)
for pn, (index, pred_feature) in enumerate(zip(indices, features)):
distances[:, index] = ((queries_features-pred_feature)**2).sum(1).cpu().numpy()
del features, queries_features, pred_feature
predictions = distances.argsort(axis=1)[:, :max(args.recall_values)]
if test_method == 'nearest_crop':
distances = np.array([distances[row, index] for row, index in enumerate(predictions)])
distances = np.reshape(distances, (eval_ds.queries_num, 20 * 5))
predictions = np.reshape(predictions, (eval_ds.queries_num, 20 * 5))
for q in range(eval_ds.queries_num):
# sort predictions by distance
sort_idx = np.argsort(distances[q])
predictions[q] = predictions[q, sort_idx]
# remove duplicated predictions, i.e. keep only the closest ones
_, unique_idx = np.unique(predictions[q], return_index=True)
# unique_idx is sorted based on the unique values, sort it again
predictions[q, :20] = predictions[q, np.sort(unique_idx)][:20]
predictions = predictions[:, :20] # keep only the closer 20 predictions for each
elif test_method == 'maj_voting':
distances = np.array([distances[row, index] for row, index in enumerate(predictions)])
distances = np.reshape(distances, (eval_ds.queries_num, 5, 20))
predictions = np.reshape(predictions, (eval_ds.queries_num, 5, 20))
for q in range(eval_ds.queries_num):
# votings, modify distances in-place
top_n_voting('top1', predictions[q], distances[q], args.majority_weight)
top_n_voting('top5', predictions[q], distances[q], args.majority_weight)
top_n_voting('top10', predictions[q], distances[q], args.majority_weight)
# flatten dist and preds from 5, 20 -> 20*5
# and then proceed as usual to keep only first 20
dists = distances[q].flatten()
preds = predictions[q].flatten()
# sort predictions by distance
sort_idx = np.argsort(dists)
preds = preds[sort_idx]
# remove duplicated predictions, i.e. keep only the closest ones
_, unique_idx = np.unique(preds, return_index=True)
# unique_idx is sorted based on the unique values, sort it again
# here the row corresponding to the first crop is used as a
# 'buffer' for each query, and in the end the dimension
# relative to crops is eliminated
predictions[q, 0, :20] = preds[np.sort(unique_idx)][:20]
predictions = predictions[:, 0, :20] # keep only the closer 20 predictions for each query
del distances
#### For each query, check if the predictions are correct
positives_per_query = eval_ds.get_positives()
# args.recall_values by default is [1, 5, 10, 20]
recalls = np.zeros(len(args.recall_values))
for query_index, pred in enumerate(predictions):
for i, n in enumerate(args.recall_values):
if np.any(np.in1d(pred[:n], positives_per_query[query_index])):
recalls[i:] += 1
break
recalls = recalls / eval_ds.queries_num * 100
recalls_str = ", ".join([f"R@{val}: {rec:.1f}" for val, rec in zip(args.recall_values, recalls)])
return recalls, recalls_str
def test(args, eval_ds, model, test_method="hard_resize", pca=None):
"""Compute features of the given dataset and compute the recalls."""
assert test_method in ["hard_resize", "single_query", "central_crop", "five_crops",
"nearest_crop", "maj_voting"], f"test_method can't be {test_method}"
if args.efficient_ram_testing:
return test_efficient_ram_usage(args, eval_ds, model, test_method)
model = model.eval()
with torch.no_grad():
logging.debug("Extracting database features for evaluation/testing")
# For database use "hard_resize", although it usually has no effect because database images have same resolution
eval_ds.test_method = "hard_resize"
database_subset_ds = Subset(eval_ds, list(range(eval_ds.database_num)))
database_dataloader = DataLoader(dataset=database_subset_ds, num_workers=args.num_workers,
batch_size=args.infer_batch_size, pin_memory=(args.device == "cuda"))
if test_method == "nearest_crop" or test_method == 'maj_voting':
all_features = np.empty((5 * eval_ds.queries_num + eval_ds.database_num, args.features_dim), dtype="float32")
else:
all_features = np.empty((len(eval_ds), args.features_dim), dtype="float32")
for inputs, indices in tqdm(database_dataloader, ncols=100):
features = model(inputs.to(args.device))
features = features.cpu().numpy()
if pca is not None:
features = pca.transform(features)
all_features[indices.numpy(), :] = features
logging.debug("Extracting queries features for evaluation/testing")
queries_infer_batch_size = 1 if test_method == "single_query" else args.infer_batch_size
eval_ds.test_method = test_method
queries_subset_ds = Subset(eval_ds, list(range(eval_ds.database_num, eval_ds.database_num+eval_ds.queries_num)))
queries_dataloader = DataLoader(dataset=queries_subset_ds, num_workers=args.num_workers,
batch_size=queries_infer_batch_size, pin_memory=(args.device == "cuda"))
for inputs, indices in tqdm(queries_dataloader, ncols=100):
if test_method == "five_crops" or test_method == "nearest_crop" or test_method == 'maj_voting':
inputs = torch.cat(tuple(inputs)) # shape = 5*bs x 3 x 480 x 480
features = model(inputs.to(args.device))
if test_method == "five_crops": # Compute mean along the 5 crops
features = torch.stack(torch.split(features, 5)).mean(1)
features = features.cpu().numpy()
if pca is not None:
features = pca.transform(features)
if test_method == "nearest_crop" or test_method == 'maj_voting': # store the features of all 5 crops
start_idx = eval_ds.database_num + (indices[0] - eval_ds.database_num) * 5
end_idx = start_idx + indices.shape[0] * 5
indices = np.arange(start_idx, end_idx)
all_features[indices, :] = features
else:
all_features[indices.numpy(), :] = features
queries_features = all_features[eval_ds.database_num:]
database_features = all_features[:eval_ds.database_num]
faiss_index = faiss.IndexFlatL2(args.features_dim)
faiss_index.add(database_features)
del database_features, all_features
logging.debug("Calculating recalls")
distances, predictions = faiss_index.search(queries_features, max(args.recall_values))
if test_method == 'nearest_crop':
distances = np.reshape(distances, (eval_ds.queries_num, 20 * 5))
predictions = np.reshape(predictions, (eval_ds.queries_num, 20 * 5))
for q in range(eval_ds.queries_num):
# sort predictions by distance
sort_idx = np.argsort(distances[q])
predictions[q] = predictions[q, sort_idx]
# remove duplicated predictions, i.e. keep only the closest ones
_, unique_idx = np.unique(predictions[q], return_index=True)
# unique_idx is sorted based on the unique values, sort it again
predictions[q, :20] = predictions[q, np.sort(unique_idx)][:20]
predictions = predictions[:, :20] # keep only the closer 20 predictions for each query
elif test_method == 'maj_voting':
distances = np.reshape(distances, (eval_ds.queries_num, 5, 20))
predictions = np.reshape(predictions, (eval_ds.queries_num, 5, 20))
for q in range(eval_ds.queries_num):
# votings, modify distances in-place
top_n_voting('top1', predictions[q], distances[q], args.majority_weight)
top_n_voting('top5', predictions[q], distances[q], args.majority_weight)
top_n_voting('top10', predictions[q], distances[q], args.majority_weight)
# flatten dist and preds from 5, 20 -> 20*5
# and then proceed as usual to keep only first 20
dists = distances[q].flatten()
preds = predictions[q].flatten()
# sort predictions by distance
sort_idx = np.argsort(dists)
preds = preds[sort_idx]
# remove duplicated predictions, i.e. keep only the closest ones
_, unique_idx = np.unique(preds, return_index=True)
# unique_idx is sorted based on the unique values, sort it again
# here the row corresponding to the first crop is used as a
# 'buffer' for each query, and in the end the dimension
# relative to crops is eliminated
predictions[q, 0, :20] = preds[np.sort(unique_idx)][:20]
predictions = predictions[:, 0, :20] # keep only the closer 20 predictions for each query
#### For each query, check if the predictions are correct
positives_per_query = eval_ds.get_positives()
# args.recall_values by default is [1, 5, 10, 20]
recalls = np.zeros(len(args.recall_values))
for query_index, pred in enumerate(predictions):
for i, n in enumerate(args.recall_values):
if np.any(np.in1d(pred[:n], positives_per_query[query_index])):
recalls[i:] += 1
break
# Divide by the number of queries*100, so the recalls are in percentages
recalls = recalls / eval_ds.queries_num * 100
recalls_str = ", ".join([f"R@{val}: {rec:.1f}" for val, rec in zip(args.recall_values, recalls)])
return recalls, recalls_str
def top_n_voting(topn, predictions, distances, maj_weight):
if topn == 'top1':
n = 1
selected = 0
elif topn == 'top5':
n = 5
selected = slice(0, 5)
elif topn == 'top10':
n = 10
selected = slice(0, 10)
# find predictions that repeat in the first, first five,
# or fist ten columns for each crop
vals, counts = np.unique(predictions[:, selected], return_counts=True)
# for each prediction that repeats more than once,
# subtract from its score
for val, count in zip(vals[counts > 1], counts[counts > 1]):
mask = (predictions[:, selected] == val)
distances[:, selected][mask] -= maj_weight * count/n
| 14,018 | 53.761719 | 121 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/commons.py |
"""
This file contains some functions and classes which can be useful in very diverse projects.
"""
import os
import sys
import torch
import random
import logging
import traceback
import numpy as np
from os.path import join
def make_deterministic(seed=0):
"""Make results deterministic. If seed == -1, do not make deterministic.
Running the script in a deterministic way might slow it down.
"""
if seed == -1:
return
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def setup_logging(save_dir, console="debug",
info_filename="info.log", debug_filename="debug.log"):
"""Set up logging files and console output.
Creates one file for INFO logs and one for DEBUG logs.
Args:
save_dir (str): creates the folder where to save the files.
debug (str):
if == "debug" prints on console debug messages and higher
if == "info" prints on console info messages and higher
if == None does not use console (useful when a logger has already been set)
info_filename (str): the name of the info file. if None, don't create info file
debug_filename (str): the name of the debug file. if None, don't create debug file
"""
if os.path.exists(save_dir):
raise FileExistsError(f"{save_dir} already exists!")
os.makedirs(save_dir, exist_ok=True)
# logging.Logger.manager.loggerDict.keys() to check which loggers are in use
base_formatter = logging.Formatter('%(asctime)s %(message)s', "%Y-%m-%d %H:%M:%S")
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
if info_filename is not None:
info_file_handler = logging.FileHandler(join(save_dir, info_filename))
info_file_handler.setLevel(logging.INFO)
info_file_handler.setFormatter(base_formatter)
logger.addHandler(info_file_handler)
if debug_filename is not None:
debug_file_handler = logging.FileHandler(join(save_dir, debug_filename))
debug_file_handler.setLevel(logging.DEBUG)
debug_file_handler.setFormatter(base_formatter)
logger.addHandler(debug_file_handler)
if console is not None:
console_handler = logging.StreamHandler()
if console == "debug":
console_handler.setLevel(logging.DEBUG)
if console == "info":
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(base_formatter)
logger.addHandler(console_handler)
def exception_handler(type_, value, tb):
logger.info("\n" + "".join(traceback.format_exception(type, value, tb)))
sys.excepthook = exception_handler
| 2,811 | 36.493333 | 91 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/util.py |
import re
import torch
import shutil
import logging
import torchscan
import numpy as np
from collections import OrderedDict
from os.path import join
from sklearn.decomposition import PCA
import datasets_ws
def get_flops(model, input_shape=(480, 640)):
"""Return the FLOPs as a string, such as '22.33 GFLOPs'"""
assert len(input_shape) == 2, f"input_shape should have len==2, but it's {input_shape}"
module_info = torchscan.crawl_module(model, (3, input_shape[0], input_shape[1]))
output = torchscan.utils.format_info(module_info)
return re.findall("Floating Point Operations on forward: (.*)\n", output)[0]
def save_checkpoint(args, state, is_best, filename):
model_path = join(args.save_dir, filename)
torch.save(state, model_path)
if is_best:
shutil.copyfile(model_path, join(args.save_dir, "best_model.pth"))
def resume_model(args, model):
checkpoint = torch.load(args.resume, map_location=args.device)
if 'model_state_dict' in checkpoint:
state_dict = checkpoint['model_state_dict']
else:
# The pre-trained models that we provide in the README do not have 'state_dict' in the keys as
# the checkpoint is directly the state dict
state_dict = checkpoint
# if the model contains the prefix "module" which is appendend by
# DataParallel, remove it to avoid errors when loading dict
if list(state_dict.keys())[0].startswith('module'):
state_dict = OrderedDict({k.replace('module.', ''): v for (k, v) in state_dict.items()})
model.load_state_dict(state_dict)
return model
def resume_train(args, model, optimizer=None, strict=False):
"""Load model, optimizer, and other training parameters"""
logging.debug(f"Loading checkpoint: {args.resume}")
checkpoint = torch.load(args.resume)
start_epoch_num = checkpoint["epoch_num"]
model.load_state_dict(checkpoint["model_state_dict"], strict=strict)
if optimizer:
optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
best_r5 = checkpoint["best_r5"]
not_improved_num = checkpoint["not_improved_num"]
logging.debug(f"Loaded checkpoint: start_epoch_num = {start_epoch_num}, "
f"current_best_R@5 = {best_r5:.1f}")
if args.resume.endswith("last_model.pth"): # Copy best model to current save_dir
shutil.copy(args.resume.replace("last_model.pth", "best_model.pth"), args.save_dir)
return model, optimizer, best_r5, start_epoch_num, not_improved_num
def compute_pca(args, model, pca_dataset_folder, full_features_dim):
model = model.eval()
pca_ds = datasets_ws.PCADataset(args, args.datasets_folder, pca_dataset_folder)
dl = torch.utils.data.DataLoader(pca_ds, args.infer_batch_size, shuffle=True)
pca_features = np.empty([min(len(pca_ds), 2**14), full_features_dim])
with torch.no_grad():
for i, images in enumerate(dl):
if i*args.infer_batch_size >= len(pca_features):
break
features = model(images).cpu().numpy()
pca_features[i*args.infer_batch_size : (i*args.infer_batch_size)+len(features)] = features
pca = PCA(args.pca_dim)
pca.fit(pca_features)
return pca
| 3,201 | 40.584416 | 102 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/datasets_ws.py |
import os
import torch
import faiss
import logging
import numpy as np
from glob import glob
from tqdm import tqdm
from PIL import Image
from os.path import join
import torch.utils.data as data
import torchvision.transforms as T
from torch.utils.data.dataset import Subset
from sklearn.neighbors import NearestNeighbors
from torch.utils.data.dataloader import DataLoader
base_transform = T.Compose([
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
def path_to_pil_img(path):
return Image.open(path).convert("RGB")
def collate_fn(batch):
"""Creates mini-batch tensors from the list of tuples (images,
triplets_local_indexes, triplets_global_indexes).
triplets_local_indexes are the indexes referring to each triplet within images.
triplets_global_indexes are the global indexes of each image.
Args:
batch: list of tuple (images, triplets_local_indexes, triplets_global_indexes).
considering each query to have 10 negatives (negs_num_per_query=10):
- images: torch tensor of shape (12, 3, h, w).
- triplets_local_indexes: torch tensor of shape (10, 3).
- triplets_global_indexes: torch tensor of shape (12).
Returns:
images: torch tensor of shape (batch_size*12, 3, h, w).
triplets_local_indexes: torch tensor of shape (batch_size*10, 3).
triplets_global_indexes: torch tensor of shape (batch_size, 12).
"""
images = torch.cat([e[0] for e in batch])
triplets_local_indexes = torch.cat([e[1][None] for e in batch])
triplets_global_indexes = torch.cat([e[2][None] for e in batch])
for i, (local_indexes, global_indexes) in enumerate(zip(triplets_local_indexes, triplets_global_indexes)):
local_indexes += len(global_indexes) * i # Increment local indexes by offset (len(global_indexes) is 12)
return images, torch.cat(tuple(triplets_local_indexes)), triplets_global_indexes
class PCADataset(data.Dataset):
def __init__(self, args, datasets_folder="dataset", dataset_folder="pitts30k/images/train"):
dataset_folder_full_path = join(datasets_folder, dataset_folder)
if not os.path.exists(dataset_folder_full_path):
raise FileNotFoundError(f"Folder {dataset_folder_full_path} does not exist")
self.images_paths = sorted(glob(join(dataset_folder_full_path, "**", "*.jpg"), recursive=True))
def __getitem__(self, index):
return base_transform(path_to_pil_img(self.images_paths[index]))
def __len__(self):
return len(self.images_paths)
class BaseDataset(data.Dataset):
"""Dataset with images from database and queries, used for inference (testing and building cache).
"""
def __init__(self, args, datasets_folder="datasets", dataset_name="pitts30k", split="train"):
super().__init__()
self.args = args
self.dataset_name = dataset_name
self.dataset_folder = join(datasets_folder, dataset_name, "images", split)
if not os.path.exists(self.dataset_folder):
raise FileNotFoundError(f"Folder {self.dataset_folder} does not exist")
self.resize = args.resize
self.test_method = args.test_method
#### Read paths and UTM coordinates for all images.
database_folder = join(self.dataset_folder, "database")
queries_folder = join(self.dataset_folder, "queries")
if not os.path.exists(database_folder):
raise FileNotFoundError(f"Folder {database_folder} does not exist")
if not os.path.exists(queries_folder):
raise FileNotFoundError(f"Folder {queries_folder} does not exist")
self.database_paths = sorted(glob(join(database_folder, "**", "*.jpg"), recursive=True))
self.queries_paths = sorted(glob(join(queries_folder, "**", "*.jpg"), recursive=True))
# The format must be path/to/file/@utm_easting@utm_northing@...@.jpg
self.database_utms = np.array([(path.split("@")[1], path.split("@")[2]) for path in self.database_paths]).astype(float)
self.queries_utms = np.array([(path.split("@")[1], path.split("@")[2]) for path in self.queries_paths]).astype(float)
# Find soft_positives_per_query, which are within val_positive_dist_threshold (deafult 25 meters)
knn = NearestNeighbors(n_jobs=-1)
knn.fit(self.database_utms)
self.soft_positives_per_query = knn.radius_neighbors(self.queries_utms,
radius=args.val_positive_dist_threshold,
return_distance=False)
self.images_paths = list(self.database_paths) + list(self.queries_paths)
self.database_num = len(self.database_paths)
self.queries_num = len(self.queries_paths)
def __getitem__(self, index):
img = path_to_pil_img(self.images_paths[index])
img = base_transform(img)
# With database images self.test_method should always be "hard_resize"
if self.test_method == "hard_resize":
# self.test_method=="hard_resize" is the default, resizes all images to the same size.
img = T.functional.resize(img, self.resize)
else:
img = self._test_query_transform(img)
return img, index
def _test_query_transform(self, img):
"""Transform query image according to self.test_method."""
C, H, W = img.shape
if self.test_method == "single_query":
# self.test_method=="single_query" is used when queries have varying sizes, and can't be stacked in a batch.
processed_img = T.functional.resize(img, min(self.resize))
elif self.test_method == "central_crop":
# Take the biggest central crop of size self.resize. Preserves ratio.
scale = max(self.resize[0]/H, self.resize[1]/W)
processed_img = torch.nn.functional.interpolate(img.unsqueeze(0), scale_factor=scale).squeeze(0)
processed_img = T.functional.center_crop(processed_img, self.resize)
assert processed_img.shape[1:] == torch.Size(self.resize), f"{processed_img.shape[1:]} {self.resize}"
elif self.test_method == "five_crops" or self.test_method == 'nearest_crop' or self.test_method == 'maj_voting':
# Get 5 square crops with size==shorter_side (usually 480). Preserves ratio and allows batches.
shorter_side = min(self.resize)
processed_img = T.functional.resize(img, shorter_side)
processed_img = torch.stack(T.functional.five_crop(processed_img, shorter_side))
assert processed_img.shape == torch.Size([5, 3, shorter_side, shorter_side]), \
f"{processed_img.shape} {torch.Size([5, 3, shorter_side, shorter_side])}"
return processed_img
def __len__(self):
return len(self.images_paths)
def __repr__(self):
return f"< {self.__class__.__name__}, {self.dataset_name} - #database: {self.database_num}; #queries: {self.queries_num} >"
def get_positives(self):
return self.soft_positives_per_query
class TripletsDataset(BaseDataset):
"""Dataset used for training, it is used to compute the triplets
with TripletsDataset.compute_triplets() with various mining methods.
If is_inference == True, uses methods of the parent class BaseDataset,
this is used for example when computing the cache, because we compute features
of each image, not triplets.
"""
def __init__(self, args, datasets_folder="datasets", dataset_name="pitts30k", split="train", negs_num_per_query=10):
super().__init__(args, datasets_folder, dataset_name, split)
self.mining = args.mining
self.neg_samples_num = args.neg_samples_num # Number of negatives to randomly sample
self.negs_num_per_query = negs_num_per_query # Number of negatives per query in each batch
if self.mining == "full": # "Full database mining" keeps a cache with last used negatives
self.neg_cache = [np.empty((0,), dtype=np.int32) for _ in range(self.queries_num)]
self.is_inference = False
identity_transform = T.Lambda(lambda x: x)
self.resized_transform = T.Compose([
T.Resize(self.resize) if self.resize is not None else identity_transform,
base_transform
])
self.query_transform = T.Compose([
T.ColorJitter(args.brightness, args.contrast, args.saturation, args.hue),
T.RandomPerspective(args.rand_perspective),
T.RandomResizedCrop(size=self.resize, scale=(1-args.random_resized_crop, 1)),
T.RandomRotation(degrees=args.random_rotation),
self.resized_transform,
])
# Find hard_positives_per_query, which are within train_positives_dist_threshold (10 meters)
knn = NearestNeighbors(n_jobs=-1)
knn.fit(self.database_utms)
self.hard_positives_per_query = list(knn.radius_neighbors(self.queries_utms,
radius=args.train_positives_dist_threshold, # 10 meters
return_distance=False))
#### Some queries might have no positive, we should remove those queries.
queries_without_any_hard_positive = np.where(np.array([len(p) for p in self.hard_positives_per_query], dtype=object) == 0)[0]
if len(queries_without_any_hard_positive) != 0:
logging.info(f"There are {len(queries_without_any_hard_positive)} queries without any positives " +
"within the training set. They won't be considered as they're useless for training.")
# Remove queries without positives
self.hard_positives_per_query = np.delete(self.hard_positives_per_query, queries_without_any_hard_positive)
self.queries_paths = np.delete(self.queries_paths, queries_without_any_hard_positive)
# Recompute images_paths and queries_num because some queries might have been removed
self.images_paths = list(self.database_paths) + list(self.queries_paths)
self.queries_num = len(self.queries_paths)
# msls_weighted refers to the mining presented in MSLS paper's supplementary.
# Basically, images from uncommon domains are sampled more often. Works only with MSLS dataset.
if self.mining == "msls_weighted":
notes = [p.split("@")[-2] for p in self.queries_paths]
try:
night_indexes = np.where(np.array([n.split("_")[0] == "night" for n in notes]))[0]
sideways_indexes = np.where(np.array([n.split("_")[1] == "sideways" for n in notes]))[0]
except IndexError:
raise RuntimeError("You're using msls_weighted mining but this dataset " +
"does not have night/sideways information. Are you using Mapillary SLS?")
self.weights = np.ones(self.queries_num)
assert len(night_indexes) != 0 and len(sideways_indexes) != 0, \
"There should be night and sideways images for msls_weighted mining, but there are none. Are you using Mapillary SLS?"
self.weights[night_indexes] += self.queries_num / len(night_indexes)
self.weights[sideways_indexes] += self.queries_num / len(sideways_indexes)
self.weights /= self.weights.sum()
logging.info(f"#sideways_indexes [{len(sideways_indexes)}/{self.queries_num}]; " +
"#night_indexes; [{len(night_indexes)}/{self.queries_num}]")
def __getitem__(self, index):
if self.is_inference:
# At inference time return the single image. This is used for caching or computing NetVLAD's clusters
return super().__getitem__(index)
query_index, best_positive_index, neg_indexes = torch.split(self.triplets_global_indexes[index], (1, 1, self.negs_num_per_query))
query = self.query_transform(path_to_pil_img(self.queries_paths[query_index]))
positive = self.resized_transform(path_to_pil_img(self.database_paths[best_positive_index]))
negatives = [self.resized_transform(path_to_pil_img(self.database_paths[i])) for i in neg_indexes]
images = torch.stack((query, positive, *negatives), 0)
triplets_local_indexes = torch.empty((0, 3), dtype=torch.int)
for neg_num in range(len(neg_indexes)):
triplets_local_indexes = torch.cat((triplets_local_indexes, torch.tensor([0, 1, 2 + neg_num]).reshape(1, 3)))
return images, triplets_local_indexes, self.triplets_global_indexes[index]
def __len__(self):
if self.is_inference:
# At inference time return the number of images. This is used for caching or computing NetVLAD's clusters
return super().__len__()
else:
return len(self.triplets_global_indexes)
def compute_triplets(self, args, model):
self.is_inference = True
if self.mining == "full":
self.compute_triplets_full(args, model)
elif self.mining == "partial" or self.mining == "msls_weighted":
self.compute_triplets_partial(args, model)
elif self.mining == "random":
self.compute_triplets_random(args, model)
@staticmethod
def compute_cache(args, model, subset_ds, cache_shape):
"""Compute the cache containing features of images, which is used to
find best positive and hardest negatives."""
subset_dl = DataLoader(dataset=subset_ds, num_workers=args.num_workers,
batch_size=args.infer_batch_size, shuffle=False,
pin_memory=(args.device == "cuda"))
model = model.eval()
# RAMEfficient2DMatrix can be replaced by np.zeros, but using
# RAMEfficient2DMatrix is RAM efficient for full database mining.
cache = RAMEfficient2DMatrix(cache_shape, dtype=np.float32)
with torch.no_grad():
for images, indexes in tqdm(subset_dl, ncols=100):
images = images.to(args.device)
features = model(images)
cache[indexes.numpy()] = features.cpu().numpy()
return cache
def get_query_features(self, query_index, cache):
query_features = cache[query_index + self.database_num]
if query_features is None:
raise RuntimeError(f"For query {self.queries_paths[query_index]} " +
f"with index {query_index} features have not been computed!\n" +
"There might be some bug with caching")
return query_features
def get_best_positive_index(self, args, query_index, cache, query_features):
positives_features = cache[self.hard_positives_per_query[query_index]]
faiss_index = faiss.IndexFlatL2(args.features_dim)
faiss_index.add(positives_features)
# Search the best positive (within 10 meters AND nearest in features space)
_, best_positive_num = faiss_index.search(query_features.reshape(1, -1), 1)
best_positive_index = self.hard_positives_per_query[query_index][best_positive_num[0]].item()
return best_positive_index
def get_hardest_negatives_indexes(self, args, cache, query_features, neg_samples):
neg_features = cache[neg_samples]
faiss_index = faiss.IndexFlatL2(args.features_dim)
faiss_index.add(neg_features)
# Search the 10 nearest negatives (further than 25 meters and nearest in features space)
_, neg_nums = faiss_index.search(query_features.reshape(1, -1), self.negs_num_per_query)
neg_nums = neg_nums.reshape(-1)
neg_indexes = neg_samples[neg_nums].astype(np.int32)
return neg_indexes
def compute_triplets_random(self, args, model):
self.triplets_global_indexes = []
# Take 1000 random queries
sampled_queries_indexes = np.random.choice(self.queries_num, args.cache_refresh_rate, replace=False)
# Take all the positives
positives_indexes = [self.hard_positives_per_query[i] for i in sampled_queries_indexes]
positives_indexes = [p for pos in positives_indexes for p in pos] # Flatten list of lists to a list
positives_indexes = list(np.unique(positives_indexes))
# Compute the cache only for queries and their positives, in order to find the best positive
subset_ds = Subset(self, positives_indexes + list(sampled_queries_indexes + self.database_num))
cache = self.compute_cache(args, model, subset_ds, (len(self), args.features_dim))
# This loop's iterations could be done individually in the __getitem__(). This way is slower but clearer (and yields same results)
for query_index in tqdm(sampled_queries_indexes, ncols=100):
query_features = self.get_query_features(query_index, cache)
best_positive_index = self.get_best_positive_index(args, query_index, cache, query_features)
# Choose some random database images, from those remove the soft_positives, and then take the first 10 images as neg_indexes
soft_positives = self.soft_positives_per_query[query_index]
neg_indexes = np.random.choice(self.database_num, size=self.negs_num_per_query+len(soft_positives), replace=False)
neg_indexes = np.setdiff1d(neg_indexes, soft_positives, assume_unique=True)[:self.negs_num_per_query]
self.triplets_global_indexes.append((query_index, best_positive_index, *neg_indexes))
# self.triplets_global_indexes is a tensor of shape [1000, 12]
self.triplets_global_indexes = torch.tensor(self.triplets_global_indexes)
def compute_triplets_full(self, args, model):
self.triplets_global_indexes = []
# Take 1000 random queries
sampled_queries_indexes = np.random.choice(self.queries_num, args.cache_refresh_rate, replace=False)
# Take all database indexes
database_indexes = list(range(self.database_num))
# Compute features for all images and store them in cache
subset_ds = Subset(self, database_indexes + list(sampled_queries_indexes + self.database_num))
cache = self.compute_cache(args, model, subset_ds, (len(self), args.features_dim))
# This loop's iterations could be done individually in the __getitem__(). This way is slower but clearer (and yields same results)
for query_index in tqdm(sampled_queries_indexes, ncols=100):
query_features = self.get_query_features(query_index, cache)
best_positive_index = self.get_best_positive_index(args, query_index, cache, query_features)
# Choose 1000 random database images (neg_indexes)
neg_indexes = np.random.choice(self.database_num, self.neg_samples_num, replace=False)
# Remove the eventual soft_positives from neg_indexes
soft_positives = self.soft_positives_per_query[query_index]
neg_indexes = np.setdiff1d(neg_indexes, soft_positives, assume_unique=True)
# Concatenate neg_indexes with the previous top 10 negatives (neg_cache)
neg_indexes = np.unique(np.concatenate([self.neg_cache[query_index], neg_indexes]))
# Search the hardest negatives
neg_indexes = self.get_hardest_negatives_indexes(args, cache, query_features, neg_indexes)
# Update nearest negatives in neg_cache
self.neg_cache[query_index] = neg_indexes
self.triplets_global_indexes.append((query_index, best_positive_index, *neg_indexes))
# self.triplets_global_indexes is a tensor of shape [1000, 12]
self.triplets_global_indexes = torch.tensor(self.triplets_global_indexes)
def compute_triplets_partial(self, args, model):
self.triplets_global_indexes = []
# Take 1000 random queries
if self.mining == "partial":
sampled_queries_indexes = np.random.choice(self.queries_num, args.cache_refresh_rate, replace=False)
elif self.mining == "msls_weighted": # Pick night and sideways queries with higher probability
sampled_queries_indexes = np.random.choice(self.queries_num, args.cache_refresh_rate, replace=False, p=self.weights)
# Sample 1000 random database images for the negatives
sampled_database_indexes = np.random.choice(self.database_num, self.neg_samples_num, replace=False)
# Take all the positives
positives_indexes = [self.hard_positives_per_query[i] for i in sampled_queries_indexes]
positives_indexes = [p for pos in positives_indexes for p in pos]
# Merge them into database_indexes and remove duplicates
database_indexes = list(sampled_database_indexes) + positives_indexes
database_indexes = list(np.unique(database_indexes))
subset_ds = Subset(self, database_indexes + list(sampled_queries_indexes + self.database_num))
cache = self.compute_cache(args, model, subset_ds, cache_shape=(len(self), args.features_dim))
# This loop's iterations could be done individually in the __getitem__(). This way is slower but clearer (and yields same results)
for query_index in tqdm(sampled_queries_indexes, ncols=100):
query_features = self.get_query_features(query_index, cache)
best_positive_index = self.get_best_positive_index(args, query_index, cache, query_features)
# Choose the hardest negatives within sampled_database_indexes, ensuring that there are no positives
soft_positives = self.soft_positives_per_query[query_index]
neg_indexes = np.setdiff1d(sampled_database_indexes, soft_positives, assume_unique=True)
# Take all database images that are negatives and are within the sampled database images (aka database_indexes)
neg_indexes = self.get_hardest_negatives_indexes(args, cache, query_features, neg_indexes)
self.triplets_global_indexes.append((query_index, best_positive_index, *neg_indexes))
# self.triplets_global_indexes is a tensor of shape [1000, 12]
self.triplets_global_indexes = torch.tensor(self.triplets_global_indexes)
class RAMEfficient2DMatrix:
"""This class behaves similarly to a numpy.ndarray initialized
with np.zeros(), but is implemented to save RAM when the rows
within the 2D array are sparse. In this case it's needed because
we don't always compute features for each image, just for few of
them"""
def __init__(self, shape, dtype=np.float32):
self.shape = shape
self.dtype = dtype
self.matrix = [None] * shape[0]
def __setitem__(self, indexes, vals):
assert vals.shape[1] == self.shape[1], f"{vals.shape[1]} {self.shape[1]}"
for i, val in zip(indexes, vals):
self.matrix[i] = val.astype(self.dtype, copy=False)
def __getitem__(self, index):
if hasattr(index, "__len__"):
return np.array([self.matrix[i] for i in index])
else:
return self.matrix[index]
| 23,388 | 56.750617 | 138 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/eval.py |
"""
With this script you can evaluate checkpoints or test models from two popular
landmark retrieval github repos.
The first is https://github.com/naver/deep-image-retrieval from Naver labs,
provides ResNet-50 and ResNet-101 trained with AP on Google Landmarks 18 clean.
$ python eval.py --off_the_shelf=naver --l2=none --backbone=resnet101conv5 --aggregation=gem --fc_output_dim=2048
The second is https://github.com/filipradenovic/cnnimageretrieval-pytorch from
Radenovic, provides ResNet-50 and ResNet-101 trained with a triplet loss
on Google Landmarks 18 and sfm120k.
$ python eval.py --off_the_shelf=radenovic_gldv1 --l2=after_pool --backbone=resnet101conv5 --aggregation=gem --fc_output_dim=2048
$ python eval.py --off_the_shelf=radenovic_sfm --l2=after_pool --backbone=resnet101conv5 --aggregation=gem --fc_output_dim=2048
Note that although the architectures are almost the same, Naver's
implementation does not use a l2 normalization before/after the GeM aggregation,
while Radenovic's uses it after (and we use it before, which shows better
results in VG)
"""
import os
import sys
import torch
import parser
import logging
import sklearn
from os.path import join
from datetime import datetime
from torch.utils.model_zoo import load_url
from google_drive_downloader import GoogleDriveDownloader as gdd
import test
import util
import commons
import datasets_ws
from model import network
OFF_THE_SHELF_RADENOVIC = {
'resnet50conv5_sfm' : 'http://cmp.felk.cvut.cz/cnnimageretrieval/data/networks/retrieval-SfM-120k/rSfM120k-tl-resnet50-gem-w-97bf910.pth',
'resnet101conv5_sfm' : 'http://cmp.felk.cvut.cz/cnnimageretrieval/data/networks/retrieval-SfM-120k/rSfM120k-tl-resnet101-gem-w-a155e54.pth',
'resnet50conv5_gldv1' : 'http://cmp.felk.cvut.cz/cnnimageretrieval/data/networks/gl18/gl18-tl-resnet50-gem-w-83fdc30.pth',
'resnet101conv5_gldv1' : 'http://cmp.felk.cvut.cz/cnnimageretrieval/data/networks/gl18/gl18-tl-resnet101-gem-w-a4d43db.pth',
}
OFF_THE_SHELF_NAVER = {
"resnet50conv5" : "1oPtE_go9tnsiDLkWjN4NMpKjh-_md1G5",
'resnet101conv5' : "1UWJGDuHtzaQdFhSMojoYVQjmCXhIwVvy"
}
######################################### SETUP #########################################
args = parser.parse_arguments()
start_time = datetime.now()
args.save_dir = join("test", args.save_dir, start_time.strftime('%Y-%m-%d_%H-%M-%S'))
commons.setup_logging(args.save_dir)
commons.make_deterministic(args.seed)
logging.info(f"Arguments: {args}")
logging.info(f"The outputs are being saved in {args.save_dir}")
######################################### MODEL #########################################
model = network.GeoLocalizationNet(args)
model = model.to(args.device)
if args.aggregation in ["netvlad", "crn"]:
args.features_dim *= args.netvlad_clusters
if args.off_the_shelf.startswith("radenovic") or args.off_the_shelf.startswith("naver"):
if args.off_the_shelf.startswith("radenovic"):
pretrain_dataset_name = args.off_the_shelf.split("_")[1] # sfm or gldv1 datasets
url = OFF_THE_SHELF_RADENOVIC[f"{args.backbone}_{pretrain_dataset_name}"]
state_dict = load_url(url, model_dir=join("data", "off_the_shelf_nets"))
else:
# This is a hacky workaround to maintain compatibility
sys.modules['sklearn.decomposition.pca'] = sklearn.decomposition._pca
zip_file_path = join("data", "off_the_shelf_nets", args.backbone + "_naver.zip")
if not os.path.exists(zip_file_path):
gdd.download_file_from_google_drive(file_id=OFF_THE_SHELF_NAVER[args.backbone],
dest_path=zip_file_path, unzip=True)
if args.backbone == "resnet50conv5":
state_dict_filename = "Resnet50-AP-GeM.pt"
elif args.backbone == "resnet101conv5":
state_dict_filename = "Resnet-101-AP-GeM.pt"
state_dict = torch.load(join("data", "off_the_shelf_nets", state_dict_filename))
state_dict = state_dict["state_dict"]
model_keys = model.state_dict().keys()
renamed_state_dict = {k: v for k, v in zip(model_keys, state_dict.values())}
model.load_state_dict(renamed_state_dict)
elif args.resume is not None:
logging.info(f"Resuming model from {args.resume}")
model = util.resume_model(args, model)
# Enable DataParallel after loading checkpoint, otherwise doing it before
# would append "module." in front of the keys of the state dict triggering errors
model = torch.nn.DataParallel(model)
if args.pca_dim is None:
pca = None
else:
full_features_dim = args.features_dim
args.features_dim = args.pca_dim
pca = util.compute_pca(args, model, args.pca_dataset_folder, full_features_dim)
######################################### DATASETS #########################################
test_ds = datasets_ws.BaseDataset(args, args.datasets_folder, args.dataset_name, "test")
logging.info(f"Test set: {test_ds}")
######################################### TEST on TEST SET #########################################
recalls, recalls_str = test.test(args, test_ds, model, args.test_method, pca)
logging.info(f"Recalls on {test_ds}: {recalls_str}")
logging.info(f"Finished in {str(datetime.now() - start_time)[:-7]}")
| 5,209 | 46.363636 | 146 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/train.py |
import math
import torch
import logging
import numpy as np
from tqdm import tqdm
import torch.nn as nn
import multiprocessing
from os.path import join
from datetime import datetime
import torchvision.transforms as transforms
from torch.utils.data.dataloader import DataLoader
import util
import test
import parser
import commons
import datasets_ws
from model import network
from model.sync_batchnorm import convert_model
from model.functional import sare_ind, sare_joint
torch.backends.cudnn.benchmark = True # Provides a speedup
#### Initial setup: parser, logging...
args = parser.parse_arguments()
start_time = datetime.now()
args.save_dir = join("logs", args.save_dir, start_time.strftime('%Y-%m-%d_%H-%M-%S'))
commons.setup_logging(args.save_dir)
commons.make_deterministic(args.seed)
logging.info(f"Arguments: {args}")
logging.info(f"The outputs are being saved in {args.save_dir}")
logging.info(f"Using {torch.cuda.device_count()} GPUs and {multiprocessing.cpu_count()} CPUs")
#### Creation of Datasets
logging.debug(f"Loading dataset {args.dataset_name} from folder {args.datasets_folder}")
triplets_ds = datasets_ws.TripletsDataset(args, args.datasets_folder, args.dataset_name, "train", args.negs_num_per_query)
logging.info(f"Train query set: {triplets_ds}")
val_ds = datasets_ws.BaseDataset(args, args.datasets_folder, args.dataset_name, "val")
logging.info(f"Val set: {val_ds}")
test_ds = datasets_ws.BaseDataset(args, args.datasets_folder, args.dataset_name, "test")
logging.info(f"Test set: {test_ds}")
#### Initialize model
model = network.GeoLocalizationNet(args)
model = model.to(args.device)
if args.aggregation in ["netvlad", "crn"]: # If using NetVLAD layer, initialize it
if not args.resume:
triplets_ds.is_inference = True
model.aggregation.initialize_netvlad_layer(args, triplets_ds, model.backbone)
args.features_dim *= args.netvlad_clusters
model = torch.nn.DataParallel(model)
#### Setup Optimizer and Loss
if args.aggregation == "crn":
crn_params = list(model.module.aggregation.crn.parameters())
net_params = list(model.module.backbone.parameters()) + \
list([m[1] for m in model.module.aggregation.named_parameters() if not m[0].startswith('crn')])
if args.optim == "adam":
optimizer = torch.optim.Adam([{'params': crn_params, 'lr': args.lr_crn_layer},
{'params': net_params, 'lr': args.lr_crn_net}])
logging.info("You're using CRN with Adam, it is advised to use SGD")
elif args.optim == "sgd":
optimizer = torch.optim.SGD([{'params': crn_params, 'lr': args.lr_crn_layer, 'momentum': 0.9, 'weight_decay': 0.001},
{'params': net_params, 'lr': args.lr_crn_net, 'momentum': 0.9, 'weight_decay': 0.001}])
else:
if args.optim == "adam":
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
elif args.optim == "sgd":
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=0.001)
if args.criterion == "triplet":
criterion_triplet = nn.TripletMarginLoss(margin=args.margin, p=2, reduction="sum")
elif args.criterion == "sare_ind":
criterion_triplet = sare_ind
elif args.criterion == "sare_joint":
criterion_triplet = sare_joint
#### Resume model, optimizer, and other training parameters
if args.resume:
if args.aggregation != 'crn':
model, optimizer, best_r5, start_epoch_num, not_improved_num = util.resume_train(args, model, optimizer)
else:
# CRN uses pretrained NetVLAD, then requires loading with strict=False and
# does not load the optimizer from the checkpoint file.
model, _, best_r5, start_epoch_num, not_improved_num = util.resume_train(args, model, strict=False)
logging.info(f"Resuming from epoch {start_epoch_num} with best recall@5 {best_r5:.1f}")
else:
best_r5 = start_epoch_num = not_improved_num = 0
if args.backbone.startswith('vit'):
logging.info(f"Output dimension of the model is {args.features_dim}")
else:
logging.info(f"Output dimension of the model is {args.features_dim}, with {util.get_flops(model, args.resize)}")
if torch.cuda.device_count() >= 2:
# When using more than 1GPU, use sync_batchnorm for torch.nn.DataParallel
model = convert_model(model)
model = model.cuda()
#### Training loop
for epoch_num in range(start_epoch_num, args.epochs_num):
logging.info(f"Start training epoch: {epoch_num:02d}")
epoch_start_time = datetime.now()
epoch_losses = np.zeros((0, 1), dtype=np.float32)
# How many loops should an epoch last (default is 5000/1000=5)
loops_num = math.ceil(args.queries_per_epoch / args.cache_refresh_rate)
for loop_num in range(loops_num):
logging.debug(f"Cache: {loop_num} / {loops_num}")
# Compute triplets to use in the triplet loss
triplets_ds.is_inference = True
triplets_ds.compute_triplets(args, model)
triplets_ds.is_inference = False
triplets_dl = DataLoader(dataset=triplets_ds, num_workers=args.num_workers,
batch_size=args.train_batch_size,
collate_fn=datasets_ws.collate_fn,
pin_memory=(args.device == "cuda"),
drop_last=True)
model = model.train()
# images shape: (train_batch_size*12)*3*H*W ; by default train_batch_size=4, H=480, W=640
# triplets_local_indexes shape: (train_batch_size*10)*3 ; because 10 triplets per query
for images, triplets_local_indexes, _ in tqdm(triplets_dl, ncols=100):
# Flip all triplets or none
if args.horizontal_flip:
images = transforms.RandomHorizontalFlip()(images)
# Compute features of all images (images contains queries, positives and negatives)
features = model(images.to(args.device))
loss_triplet = 0
if args.criterion == "triplet":
triplets_local_indexes = torch.transpose(
triplets_local_indexes.view(args.train_batch_size, args.negs_num_per_query, 3), 1, 0)
for triplets in triplets_local_indexes:
queries_indexes, positives_indexes, negatives_indexes = triplets.T
loss_triplet += criterion_triplet(features[queries_indexes],
features[positives_indexes],
features[negatives_indexes])
elif args.criterion == 'sare_joint':
# sare_joint needs to receive all the negatives at once
triplet_index_batch = triplets_local_indexes.view(args.train_batch_size, 10, 3)
for batch_triplet_index in triplet_index_batch:
q = features[batch_triplet_index[0, 0]].unsqueeze(0) # obtain query as tensor of shape 1xn_features
p = features[batch_triplet_index[0, 1]].unsqueeze(0) # obtain positive as tensor of shape 1xn_features
n = features[batch_triplet_index[:, 2]] # obtain negatives as tensor of shape 10xn_features
loss_triplet += criterion_triplet(q, p, n)
elif args.criterion == "sare_ind":
for triplet in triplets_local_indexes:
# triplet is a 1-D tensor with the 3 scalars indexes of the triplet
q_i, p_i, n_i = triplet
loss_triplet += criterion_triplet(features[q_i:q_i+1], features[p_i:p_i+1], features[n_i:n_i+1])
del features
loss_triplet /= (args.train_batch_size * args.negs_num_per_query)
optimizer.zero_grad()
loss_triplet.backward()
optimizer.step()
# Keep track of all losses by appending them to epoch_losses
batch_loss = loss_triplet.item()
epoch_losses = np.append(epoch_losses, batch_loss)
del loss_triplet
logging.debug(f"Epoch[{epoch_num:02d}]({loop_num}/{loops_num}): " +
f"current batch triplet loss = {batch_loss:.4f}, " +
f"average epoch triplet loss = {epoch_losses.mean():.4f}")
logging.info(f"Finished epoch {epoch_num:02d} in {str(datetime.now() - epoch_start_time)[:-7]}, "
f"average epoch triplet loss = {epoch_losses.mean():.4f}")
# Compute recalls on validation set
recalls, recalls_str = test.test(args, val_ds, model)
logging.info(f"Recalls on val set {val_ds}: {recalls_str}")
is_best = recalls[1] > best_r5
# Save checkpoint, which contains all training parameters
util.save_checkpoint(args, {
"epoch_num": epoch_num, "model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(), "recalls": recalls, "best_r5": best_r5,
"not_improved_num": not_improved_num
}, is_best, filename="last_model.pth")
# If recall@5 did not improve for "many" epochs, stop training
if is_best:
logging.info(f"Improved: previous best R@5 = {best_r5:.1f}, current R@5 = {recalls[1]:.1f}")
best_r5 = recalls[1]
not_improved_num = 0
else:
not_improved_num += 1
logging.info(f"Not improved: {not_improved_num} / {args.patience}: best R@5 = {best_r5:.1f}, current R@5 = {recalls[1]:.1f}")
if not_improved_num >= args.patience:
logging.info(f"Performance did not improve for {not_improved_num} epochs. Stop training.")
break
logging.info(f"Best R@5: {best_r5:.1f}")
logging.info(f"Trained for {epoch_num+1:02d} epochs, in total in {str(datetime.now() - start_time)[:-7]}")
#### Test best model on test set
best_model_state_dict = torch.load(join(args.save_dir, "best_model.pth"))["model_state_dict"]
model.load_state_dict(best_model_state_dict)
recalls, recalls_str = test.test(args, test_ds, model, test_method=args.test_method)
logging.info(f"Recalls on {test_ds}: {recalls_str}")
| 10,186 | 45.729358 | 133 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/aggregation.py |
import math
import torch
import faiss
import logging
import numpy as np
from tqdm import tqdm
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.utils.data import DataLoader, SubsetRandomSampler
import model.functional as LF
import model.normalization as normalization
class MAC(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return LF.mac(x)
def __repr__(self):
return self.__class__.__name__ + '()'
class SPoC(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return LF.spoc(x)
def __repr__(self):
return self.__class__.__name__ + '()'
class GeM(nn.Module):
def __init__(self, p=3, eps=1e-6, work_with_tokens=False):
super().__init__()
self.p = Parameter(torch.ones(1)*p)
self.eps = eps
self.work_with_tokens=work_with_tokens
def forward(self, x):
return LF.gem(x, p=self.p, eps=self.eps, work_with_tokens=self.work_with_tokens)
def __repr__(self):
return self.__class__.__name__ + '(' + 'p=' + '{:.4f}'.format(self.p.data.tolist()[0]) + ', ' + 'eps=' + str(self.eps) + ')'
class RMAC(nn.Module):
def __init__(self, L=3, eps=1e-6):
super().__init__()
self.L = L
self.eps = eps
def forward(self, x):
return LF.rmac(x, L=self.L, eps=self.eps)
def __repr__(self):
return self.__class__.__name__ + '(' + 'L=' + '{}'.format(self.L) + ')'
class Flatten(torch.nn.Module):
def __init__(self): super().__init__()
def forward(self, x): assert x.shape[2] == x.shape[3] == 1; return x[:,:,0,0]
class RRM(nn.Module):
"""Residual Retrieval Module as described in the paper
`Leveraging EfficientNet and Contrastive Learning for AccurateGlobal-scale
Location Estimation <https://arxiv.org/pdf/2105.07645.pdf>`
"""
def __init__(self, dim):
super().__init__()
self.avgpool = nn.AdaptiveAvgPool2d(output_size=1)
self.flatten = Flatten()
self.ln1 = nn.LayerNorm(normalized_shape=dim)
self.fc1 = nn.Linear(in_features=dim, out_features=dim)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(in_features=dim, out_features=dim)
self.ln2 = nn.LayerNorm(normalized_shape=dim)
self.l2 = normalization.L2Norm()
def forward(self, x):
x = self.avgpool(x)
x = self.flatten(x)
x = self.ln1(x)
identity = x
out = self.fc2(self.relu(self.fc1(x)))
out += identity
out = self.l2(self.ln2(out))
return out
# based on https://github.com/lyakaap/NetVLAD-pytorch/blob/master/netvlad.py
class NetVLAD(nn.Module):
"""NetVLAD layer implementation"""
def __init__(self, clusters_num=64, dim=128, normalize_input=True, work_with_tokens=False):
"""
Args:
clusters_num : int
The number of clusters
dim : int
Dimension of descriptors
alpha : float
Parameter of initialization. Larger value is harder assignment.
normalize_input : bool
If true, descriptor-wise L2 normalization is applied to input.
"""
super().__init__()
self.clusters_num = clusters_num
self.dim = dim
self.alpha = 0
self.normalize_input = normalize_input
self.work_with_tokens = work_with_tokens
if work_with_tokens:
self.conv = nn.Conv1d(dim, clusters_num, kernel_size=1, bias=False)
else:
self.conv = nn.Conv2d(dim, clusters_num, kernel_size=(1, 1), bias=False)
self.centroids = nn.Parameter(torch.rand(clusters_num, dim))
def init_params(self, centroids, descriptors):
centroids_assign = centroids / np.linalg.norm(centroids, axis=1, keepdims=True)
dots = np.dot(centroids_assign, descriptors.T)
dots.sort(0)
dots = dots[::-1, :] # sort, descending
self.alpha = (-np.log(0.01) / np.mean(dots[0,:] - dots[1,:])).item()
self.centroids = nn.Parameter(torch.from_numpy(centroids))
if self.work_with_tokens:
self.conv.weight = nn.Parameter(torch.from_numpy(self.alpha * centroids_assign).unsqueeze(2))
else:
self.conv.weight = nn.Parameter(torch.from_numpy(self.alpha*centroids_assign).unsqueeze(2).unsqueeze(3))
self.conv.bias = None
def forward(self, x):
if self.work_with_tokens:
x = x.permute(0, 2, 1)
N, D, _ = x.shape[:]
else:
N, D, H, W = x.shape[:]
if self.normalize_input:
x = F.normalize(x, p=2, dim=1) # Across descriptor dim
x_flatten = x.view(N, D, -1)
soft_assign = self.conv(x).view(N, self.clusters_num, -1)
soft_assign = F.softmax(soft_assign, dim=1)
vlad = torch.zeros([N, self.clusters_num, D], dtype=x_flatten.dtype, device=x_flatten.device)
for D in range(self.clusters_num): # Slower than non-looped, but lower memory usage
residual = x_flatten.unsqueeze(0).permute(1, 0, 2, 3) - \
self.centroids[D:D+1, :].expand(x_flatten.size(-1), -1, -1).permute(1, 2, 0).unsqueeze(0)
residual = residual * soft_assign[:,D:D+1,:].unsqueeze(2)
vlad[:,D:D+1,:] = residual.sum(dim=-1)
vlad = F.normalize(vlad, p=2, dim=2) # intra-normalization
vlad = vlad.view(N, -1) # Flatten
vlad = F.normalize(vlad, p=2, dim=1) # L2 normalize
return vlad
def initialize_netvlad_layer(self, args, cluster_ds, backbone):
descriptors_num = 50000
descs_num_per_image = 100
images_num = math.ceil(descriptors_num / descs_num_per_image)
random_sampler = SubsetRandomSampler(np.random.choice(len(cluster_ds), images_num, replace=False))
random_dl = DataLoader(dataset=cluster_ds, num_workers=args.num_workers,
batch_size=args.infer_batch_size, sampler=random_sampler)
with torch.no_grad():
backbone = backbone.eval()
logging.debug("Extracting features to initialize NetVLAD layer")
descriptors = np.zeros(shape=(descriptors_num, args.features_dim), dtype=np.float32)
for iteration, (inputs, _) in enumerate(tqdm(random_dl, ncols=100)):
inputs = inputs.to(args.device)
outputs = backbone(inputs)
norm_outputs = F.normalize(outputs, p=2, dim=1)
image_descriptors = norm_outputs.view(norm_outputs.shape[0], args.features_dim, -1).permute(0, 2, 1)
image_descriptors = image_descriptors.cpu().numpy()
batchix = iteration * args.infer_batch_size * descs_num_per_image
for ix in range(image_descriptors.shape[0]):
sample = np.random.choice(image_descriptors.shape[1], descs_num_per_image, replace=False)
startix = batchix + ix * descs_num_per_image
descriptors[startix:startix + descs_num_per_image, :] = image_descriptors[ix, sample, :]
kmeans = faiss.Kmeans(args.features_dim, self.clusters_num, niter=100, verbose=False)
kmeans.train(descriptors)
logging.debug(f"NetVLAD centroids shape: {kmeans.centroids.shape}")
self.init_params(kmeans.centroids, descriptors)
self = self.to(args.device)
class CRNModule(nn.Module):
def __init__(self, dim):
super().__init__()
# Downsample pooling
self.downsample_pool = nn.AvgPool2d(kernel_size=3, stride=(2, 2),
padding=0, ceil_mode=True)
# Multiscale Context Filters
self.filter_3_3 = nn.Conv2d(in_channels=dim, out_channels=32,
kernel_size=(3, 3), padding=1)
self.filter_5_5 = nn.Conv2d(in_channels=dim, out_channels=32,
kernel_size=(5, 5), padding=2)
self.filter_7_7 = nn.Conv2d(in_channels=dim, out_channels=20,
kernel_size=(7, 7), padding=3)
# Accumulation weight
self.acc_w = nn.Conv2d(in_channels=84, out_channels=1, kernel_size=(1, 1))
# Upsampling
self.upsample = F.interpolate
self._initialize_weights()
def _initialize_weights(self):
# Initialize Context Filters
torch.nn.init.xavier_normal_(self.filter_3_3.weight)
torch.nn.init.constant_(self.filter_3_3.bias, 0.0)
torch.nn.init.xavier_normal_(self.filter_5_5.weight)
torch.nn.init.constant_(self.filter_5_5.bias, 0.0)
torch.nn.init.xavier_normal_(self.filter_7_7.weight)
torch.nn.init.constant_(self.filter_7_7.bias, 0.0)
torch.nn.init.constant_(self.acc_w.weight, 1.0)
torch.nn.init.constant_(self.acc_w.bias, 0.0)
self.acc_w.weight.requires_grad = False
self.acc_w.bias.requires_grad = False
def forward(self, x):
# Contextual Reweighting Network
x_crn = self.downsample_pool(x)
# Compute multiscale context filters g_n
g_3 = self.filter_3_3(x_crn)
g_5 = self.filter_5_5(x_crn)
g_7 = self.filter_7_7(x_crn)
g = torch.cat((g_3, g_5, g_7), dim=1)
g = F.relu(g)
w = F.relu(self.acc_w(g)) # Accumulation weight
mask = self.upsample(w, scale_factor=2, mode='bilinear') # Reweighting Mask
return mask
class CRN(NetVLAD):
def __init__(self, clusters_num=64, dim=128, normalize_input=True):
super().__init__(clusters_num, dim, normalize_input)
self.crn = CRNModule(dim)
def forward(self, x):
N, D, H, W = x.shape[:]
if self.normalize_input:
x = F.normalize(x, p=2, dim=1) # Across descriptor dim
mask = self.crn(x)
x_flatten = x.view(N, D, -1)
soft_assign = self.conv(x).view(N, self.clusters_num, -1)
soft_assign = F.softmax(soft_assign, dim=1)
# Weight soft_assign using CRN's mask
soft_assign = soft_assign * mask.view(N, 1, H * W)
vlad = torch.zeros([N, self.clusters_num, D], dtype=x_flatten.dtype, device=x_flatten.device)
for D in range(self.clusters_num): # Slower than non-looped, but lower memory usage
residual = x_flatten.unsqueeze(0).permute(1, 0, 2, 3) - \
self.centroids[D:D + 1, :].expand(x_flatten.size(-1), -1, -1).permute(1, 2, 0).unsqueeze(0)
residual = residual * soft_assign[:, D:D + 1, :].unsqueeze(2)
vlad[:, D:D + 1, :] = residual.sum(dim=-1)
vlad = F.normalize(vlad, p=2, dim=2) # intra-normalization
vlad = vlad.view(N, -1) # Flatten
vlad = F.normalize(vlad, p=2, dim=1) # L2 normalize
return vlad
| 10,963 | 41.007663 | 132 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/network.py |
import os
import torch
import logging
import torchvision
from torch import nn
from os.path import join
from transformers import ViTModel
from google_drive_downloader import GoogleDriveDownloader as gdd
from model.cct import cct_14_7x2_384
from model.aggregation import Flatten
from model.normalization import L2Norm
import model.aggregation as aggregation
# Pretrained models on Google Landmarks v2 and Places 365
PRETRAINED_MODELS = {
'resnet18_places' : '1DnEQXhmPxtBUrRc81nAvT8z17bk-GBj5',
'resnet50_places' : '1zsY4mN4jJ-AsmV3h4hjbT72CBfJsgSGC',
'resnet101_places' : '1E1ibXQcg7qkmmmyYgmwMTh7Xf1cDNQXa',
'vgg16_places' : '1UWl1uz6rZ6Nqmp1K5z3GHAIZJmDh4bDu',
'resnet18_gldv2' : '1wkUeUXFXuPHuEvGTXVpuP5BMB-JJ1xke',
'resnet50_gldv2' : '1UDUv6mszlXNC1lv6McLdeBNMq9-kaA70',
'resnet101_gldv2' : '1apiRxMJpDlV0XmKlC5Na_Drg2jtGL-uE',
'vgg16_gldv2' : '10Ov9JdO7gbyz6mB5x0v_VSAUMj91Ta4o'
}
class GeoLocalizationNet(nn.Module):
"""The used networks are composed of a backbone and an aggregation layer.
"""
def __init__(self, args):
super().__init__()
self.backbone = get_backbone(args)
self.arch_name = args.backbone
self.aggregation = get_aggregation(args)
if args.aggregation in ["gem", "spoc", "mac", "rmac"]:
if args.l2 == "before_pool":
self.aggregation = nn.Sequential(L2Norm(), self.aggregation, Flatten())
elif args.l2 == "after_pool":
self.aggregation = nn.Sequential(self.aggregation, L2Norm(), Flatten())
elif args.l2 == "none":
self.aggregation = nn.Sequential(self.aggregation, Flatten())
if args.fc_output_dim != None:
# Concatenate fully connected layer to the aggregation layer
self.aggregation = nn.Sequential(self.aggregation,
nn.Linear(args.features_dim, args.fc_output_dim),
L2Norm())
args.features_dim = args.fc_output_dim
def forward(self, x):
x = self.backbone(x)
x = self.aggregation(x)
return x
def get_aggregation(args):
if args.aggregation == "gem":
return aggregation.GeM(work_with_tokens=args.work_with_tokens)
elif args.aggregation == "spoc":
return aggregation.SPoC()
elif args.aggregation == "mac":
return aggregation.MAC()
elif args.aggregation == "rmac":
return aggregation.RMAC()
elif args.aggregation == "netvlad":
return aggregation.NetVLAD(clusters_num=args.netvlad_clusters, dim=args.features_dim,
work_with_tokens=args.work_with_tokens)
elif args.aggregation == 'crn':
return aggregation.CRN(clusters_num=args.netvlad_clusters, dim=args.features_dim)
elif args.aggregation == "rrm":
return aggregation.RRM(args.features_dim)
elif args.aggregation in ['cls', 'seqpool']:
return nn.Identity()
def get_pretrained_model(args):
if args.pretrain == 'places': num_classes = 365
elif args.pretrain == 'gldv2': num_classes = 512
if args.backbone.startswith("resnet18"):
model = torchvision.models.resnet18(num_classes=num_classes)
elif args.backbone.startswith("resnet50"):
model = torchvision.models.resnet50(num_classes=num_classes)
elif args.backbone.startswith("resnet101"):
model = torchvision.models.resnet101(num_classes=num_classes)
elif args.backbone.startswith("vgg16"):
model = torchvision.models.vgg16(num_classes=num_classes)
if args.backbone.startswith('resnet'):
model_name = args.backbone.split('conv')[0] + "_" + args.pretrain
else:
model_name = args.backbone + "_" + args.pretrain
file_path = join("data", "pretrained_nets", model_name +".pth")
if not os.path.exists(file_path):
gdd.download_file_from_google_drive(file_id=PRETRAINED_MODELS[model_name],
dest_path=file_path)
state_dict = torch.load(file_path, map_location=torch.device('cpu'))
model.load_state_dict(state_dict)
return model
def get_backbone(args):
# The aggregation layer works differently based on the type of architecture
args.work_with_tokens = args.backbone.startswith('cct') or args.backbone.startswith('vit')
if args.backbone.startswith("resnet"):
if args.pretrain in ['places', 'gldv2']:
backbone = get_pretrained_model(args)
elif args.backbone.startswith("resnet18"):
backbone = torchvision.models.resnet18(pretrained=True)
elif args.backbone.startswith("resnet50"):
backbone = torchvision.models.resnet50(pretrained=True)
elif args.backbone.startswith("resnet101"):
backbone = torchvision.models.resnet101(pretrained=True)
for name, child in backbone.named_children():
# Freeze layers before conv_3
if name == "layer3":
break
for params in child.parameters():
params.requires_grad = False
if args.backbone.endswith("conv4"):
logging.debug(f"Train only conv4_x of the resnet{args.backbone.split('conv')[0]} (remove conv5_x), freeze the previous ones")
layers = list(backbone.children())[:-3]
elif args.backbone.endswith("conv5"):
logging.debug(f"Train only conv4_x and conv5_x of the resnet{args.backbone.split('conv')[0]}, freeze the previous ones")
layers = list(backbone.children())[:-2]
elif args.backbone == "vgg16":
if args.pretrain in ['places', 'gldv2']:
backbone = get_pretrained_model(args)
else:
backbone = torchvision.models.vgg16(pretrained=True)
layers = list(backbone.features.children())[:-2]
for l in layers[:-5]:
for p in l.parameters(): p.requires_grad = False
logging.debug("Train last layers of the vgg16, freeze the previous ones")
elif args.backbone == "alexnet":
backbone = torchvision.models.alexnet(pretrained=True)
layers = list(backbone.features.children())[:-2]
for l in layers[:5]:
for p in l.parameters(): p.requires_grad = False
logging.debug("Train last layers of the alexnet, freeze the previous ones")
elif args.backbone.startswith("cct"):
if args.backbone.startswith("cct384"):
backbone = cct_14_7x2_384(pretrained=True, progress=True, aggregation=args.aggregation)
if args.trunc_te:
logging.debug(f"Truncate CCT at transformers encoder {args.trunc_te}")
backbone.classifier.blocks = torch.nn.ModuleList(backbone.classifier.blocks[:args.trunc_te].children())
if args.freeze_te:
logging.debug(f"Freeze all the layers up to tranformer encoder {args.freeze_te}")
for p in backbone.parameters():
p.requires_grad = False
for name, child in backbone.classifier.blocks.named_children():
if int(name) > args.freeze_te:
for params in child.parameters():
params.requires_grad = True
args.features_dim = 384
return backbone
elif args.backbone.startswith("vit"):
assert args.resize[0] in [224, 384], f'Image size for ViT must be either 224 or 384, but it\'s {args.resize[0]}'
if args.resize[0] == 224:
backbone = ViTModel.from_pretrained('google/vit-base-patch16-224-in21k')
elif args.resize[0] == 384:
backbone = ViTModel.from_pretrained('google/vit-base-patch16-384')
if args.trunc_te:
logging.debug(f"Truncate ViT at transformers encoder {args.trunc_te}")
backbone.encoder.layer = backbone.encoder.layer[:args.trunc_te]
if args.freeze_te:
logging.debug(f"Freeze all the layers up to tranformer encoder {args.freeze_te+1}")
for p in backbone.parameters():
p.requires_grad = False
for name, child in backbone.encoder.layer.named_children():
if int(name) > args.freeze_te:
for params in child.parameters():
params.requires_grad = True
backbone = VitWrapper(backbone, args.aggregation)
args.features_dim = 768
return backbone
backbone = torch.nn.Sequential(*layers)
args.features_dim = get_output_channels_dim(backbone) # Dinamically obtain number of channels in output
return backbone
class VitWrapper(nn.Module):
def __init__(self, vit_model, aggregation):
super().__init__()
self.vit_model = vit_model
self.aggregation = aggregation
def forward(self, x):
if self.aggregation in ["netvlad", "gem"]:
return self.vit_model(x).last_hidden_state[:, 1:, :]
else:
return self.vit_model(x).last_hidden_state[:, 0, :]
def get_output_channels_dim(model):
"""Return the number of channels in the output of a model."""
return model(torch.ones([1, 3, 224, 224])).shape[1]
| 9,160 | 43.687805 | 137 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/functional.py |
import math
import torch
import torch.nn.functional as F
def sare_ind(query, positive, negative):
'''all 3 inputs are supposed to be shape 1xn_features'''
dist_pos = ((query - positive)**2).sum(1)
dist_neg = ((query - negative)**2).sum(1)
dist = - torch.cat((dist_pos, dist_neg))
dist = F.log_softmax(dist, 0)
#loss = (- dist[:, 0]).mean() on a batch
loss = -dist[0]
return loss
def sare_joint(query, positive, negatives):
'''query and positive have to be 1xn_features; whereas negatives has to be
shape n_negative x n_features. n_negative is usually 10'''
# NOTE: the implementation is the same if batch_size=1 as all operations
# are vectorial. If there were the additional n_batch dimension a different
# handling of that situation would have to be implemented here.
# This function is declared anyway for the sake of clarity as the 2 should
# be called in different situations because, even though there would be
# no Exceptions, there would actually be a conceptual error.
return sare_ind(query, positive, negatives)
def mac(x):
return F.adaptive_max_pool2d(x, (1,1))
def spoc(x):
return F.adaptive_avg_pool2d(x, (1,1))
def gem(x, p=3, eps=1e-6, work_with_tokens=False):
if work_with_tokens:
x = x.permute(0, 2, 1)
# unseqeeze to maintain compatibility with Flatten
return F.avg_pool1d(x.clamp(min=eps).pow(p), (x.size(-1))).pow(1./p).unsqueeze(3)
else:
return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1))).pow(1./p)
def rmac(x, L=3, eps=1e-6):
ovr = 0.4 # desired overlap of neighboring regions
steps = torch.Tensor([2, 3, 4, 5, 6, 7]) # possible regions for the long dimension
W = x.size(3)
H = x.size(2)
w = min(W, H)
# w2 = math.floor(w/2.0 - 1)
b = (max(H, W)-w)/(steps-1)
(tmp, idx) = torch.min(torch.abs(((w**2 - w*b)/w**2)-ovr), 0) # steps(idx) regions for long dimension
# region overplus per dimension
Wd = 0;
Hd = 0;
if H < W:
Wd = idx.item() + 1
elif H > W:
Hd = idx.item() + 1
v = F.max_pool2d(x, (x.size(-2), x.size(-1)))
v = v / (torch.norm(v, p=2, dim=1, keepdim=True) + eps).expand_as(v)
for l in range(1, L+1):
wl = math.floor(2*w/(l+1))
wl2 = math.floor(wl/2 - 1)
if l+Wd == 1:
b = 0
else:
b = (W-wl)/(l+Wd-1)
cenW = torch.floor(wl2 + torch.Tensor(range(l-1+Wd+1))*b) - wl2 # center coordinates
if l+Hd == 1:
b = 0
else:
b = (H-wl)/(l+Hd-1)
cenH = torch.floor(wl2 + torch.Tensor(range(l-1+Hd+1))*b) - wl2 # center coordinates
for i_ in cenH.tolist():
for j_ in cenW.tolist():
if wl == 0:
continue
R = x[:,:,(int(i_)+torch.Tensor(range(wl)).long()).tolist(),:]
R = R[:,:,:,(int(j_)+torch.Tensor(range(wl)).long()).tolist()]
vt = F.max_pool2d(R, (R.size(-2), R.size(-1)))
vt = vt / (torch.norm(vt, p=2, dim=1, keepdim=True) + eps).expand_as(vt)
v += vt
return v
| 3,170 | 36.305882 | 105 | py |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/__init__.py | 0 | 0 | 0 | py | |
deep-visual-geo-localization-benchmark | deep-visual-geo-localization-benchmark-main/model/normalization.py |
import torch.nn as nn
import torch.nn.functional as F
class L2Norm(nn.Module):
def __init__(self, dim=1):
super().__init__()
self.dim = dim
def forward(self, x):
return F.normalize(x, p=2, dim=self.dim)
| 238 | 18.916667 | 48 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.